1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
64 #define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT (5*HZ)
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86 static int multi_mode = 1;
87 module_param(multi_mode, int, 0);
88 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
91 static int num_queues;
92 module_param(num_queues, int, 0);
93 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 static int dropless_fc;
106 module_param(dropless_fc, int, 0);
107 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110 module_param(poll, int, 0);
111 MODULE_PARM_DESC(poll, " Use polling (for debug)");
113 static int mrrs = -1;
114 module_param(mrrs, int, 0);
115 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118 module_param(debug, int, 0);
119 MODULE_PARM_DESC(debug, " Default debug msglevel");
121 static struct workqueue_struct *bnx2x_wq;
123 enum bnx2x_board_type {
129 /* indexed by board_type, above */
132 } board_info[] __devinitdata = {
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
153 * locking is done by mcp
155 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
175 const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
198 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 struct dmae_command dmae;
202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
214 memset(&dmae, 0, sizeof(struct dmae_command));
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
222 DMAE_CMD_ENDIANITY_DW_SWAP |
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
246 mutex_lock(&bp->dmae_mutex);
250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258 BNX2X_ERR("DMAE timeout!\n");
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
269 mutex_unlock(&bp->dmae_mutex);
272 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
274 struct dmae_command dmae;
275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
289 memset(&dmae, 0, sizeof(struct dmae_command));
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
297 DMAE_CMD_ENDIANITY_DW_SWAP |
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
318 mutex_lock(&bp->dmae_mutex);
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
327 while (*wb_comp != DMAE_COMP_VAL) {
330 BNX2X_ERR("DMAE timeout!\n");
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
344 mutex_unlock(&bp->dmae_mutex);
347 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
353 while (len > dmae_wr_max) {
354 bnx2x_write_dmae(bp, phys_addr + offset,
355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
374 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
378 REG_RD_DMAE(bp, reg, wb_data, 2);
380 return HILO_U64(wb_data[0], wb_data[1]);
384 static int bnx2x_mc_assert(struct bnx2x *bp)
388 u32 row0, row1, row2, row3;
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
505 static void bnx2x_fw_dump(struct bnx2x *bp)
513 BNX2X_ERR("NO MCP - can not dump\n");
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520 pr_err("begin fw dump (mark 0x%x)\n", mark);
523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
524 for (word = 0; word < 8; word++)
525 data[word] = htonl(REG_RD(bp, offset + 4*word));
527 pr_cont("%s", (char *)data);
529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
530 for (word = 0; word < 8; word++)
531 data[word] = htonl(REG_RD(bp, offset + 4*word));
533 pr_cont("%s", (char *)data);
535 pr_err("end of fw dump\n");
538 void bnx2x_panic_dump(struct bnx2x *bp)
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
546 BNX2X_ERR("begin crash dump -----------------\n");
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
557 for_each_queue(bp, i) {
558 struct bnx2x_fastpath *fp = &bp->fp[i];
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
563 i, fp->rx_bd_prod, fp->rx_bd_cons,
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
574 for_each_queue(bp, i) {
575 struct bnx2x_fastpath *fp = &bp->fp[i];
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
584 fp->status_blk->c_status_block.status_block_index,
585 fp->tx_db.data.prod);
590 for_each_queue(bp, i) {
591 struct bnx2x_fastpath *fp = &bp->fp[i];
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
595 for (j = start; j != end; j = RX_BD(j + 1)) {
596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
605 for (j = start; j != end; j = RX_SGE(j + 1)) {
606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
624 for_each_queue(bp, i) {
625 struct bnx2x_fastpath *fp = &bp->fp[i];
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
648 BNX2X_ERR("end crash dump -----------------\n");
651 void bnx2x_int_enable(struct bnx2x *bp)
653 int port = BP_PORT(bp);
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
678 REG_WR(bp, addr, val);
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
686 REG_WR(bp, addr, val);
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
698 /* enable nig and gpio3 attention */
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
707 /* Make sure that interrupts are indeed enabled from here on */
711 static void bnx2x_int_disable(struct bnx2x *bp)
713 int port = BP_PORT(bp);
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
725 /* flush all outstanding writes */
728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
733 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
738 /* disable interrupt handling */
739 atomic_inc(&bp->intr_sem);
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
746 /* make sure all ISRs are done */
748 synchronize_irq(bp->msix_table[0].vector);
753 for_each_queue(bp, i)
754 synchronize_irq(bp->msix_table[i + offset].vector);
756 synchronize_irq(bp->pdev->irq);
758 /* make sure sp_task is not running */
759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
766 * General service functions
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
805 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
808 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
809 union eth_rx_cqe *rr_cqe)
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
817 fp->index, cid, command, bp->state,
818 rr_cqe->ramrod_cqe.ramrod_type);
823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
828 fp->state = BNX2X_FP_STATE_OPEN;
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
834 fp->state = BNX2X_FP_STATE_HALTED;
838 BNX2X_ERR("unexpected MC reply (%d) "
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
843 mb(); /* force bnx2x_wait_ramrod() to see the change */
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874 bp->set_mac_pending--;
878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880 bp->set_mac_pending--;
885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
889 mb(); /* force bnx2x_wait_ramrod() to see the change */
892 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
894 struct bnx2x *bp = netdev_priv(dev_instance);
895 u16 status = bnx2x_ack_int(bp);
899 /* Return here if interrupt is shared and it's not for us */
900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
906 /* Return here if interrupt is disabled */
907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
912 #ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
920 mask = 0x2 << fp->sb_id;
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
940 c_ops = rcu_dereference(bp->cnic_ops);
942 c_ops->cnic_handler(bp->cnic_data, NULL);
949 if (unlikely(status & 0x1)) {
950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
964 /* end of fast path */
970 * General service functions
973 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
976 u32 resource_bit = (1 << resource);
977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
996 /* Validating that the resource is not already taken */
997 lock_status = REG_RD(bp, hw_lock_control_reg);
998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
1006 /* Try to acquire the lock */
1007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
1009 if (lock_status & resource_bit)
1014 DP(NETIF_MSG_HW, "Timeout\n");
1018 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1021 u32 resource_bit = (1 << resource);
1022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
1025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1042 /* Validating that the resource is currently taken */
1043 lock_status = REG_RD(bp, hw_lock_control_reg);
1044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1050 REG_WR(bp, hw_lock_control_reg, resource_bit);
1055 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1085 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1138 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1184 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1186 u32 spio_mask = (1 << spio_num);
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1230 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1232 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245 bp->port.advertising |= ADVERTISED_Asym_Pause;
1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1256 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1258 if (!BP_NOMCP(bp)) {
1261 /* Initialize link parameters structure variables */
1262 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */
1264 if (bp->dev->mtu > 5000)
1265 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1267 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1269 bnx2x_acquire_phy_lock(bp);
1271 if (load_mode == LOAD_DIAG)
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1276 bnx2x_release_phy_lock(bp);
1278 bnx2x_calc_fc_adv(bp);
1280 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282 bnx2x_link_report(bp);
1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1291 void bnx2x_link_set(struct bnx2x *bp)
1293 if (!BP_NOMCP(bp)) {
1294 bnx2x_acquire_phy_lock(bp);
1295 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1296 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1297 bnx2x_release_phy_lock(bp);
1299 bnx2x_calc_fc_adv(bp);
1301 BNX2X_ERR("Bootcode is missing - can not set link\n");
1304 static void bnx2x__link_reset(struct bnx2x *bp)
1306 if (!BP_NOMCP(bp)) {
1307 bnx2x_acquire_phy_lock(bp);
1308 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1309 bnx2x_release_phy_lock(bp);
1311 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1314 u8 bnx2x_link_test(struct bnx2x *bp)
1318 if (!BP_NOMCP(bp)) {
1319 bnx2x_acquire_phy_lock(bp);
1320 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1321 bnx2x_release_phy_lock(bp);
1323 BNX2X_ERR("Bootcode is missing - can not test link\n");
1328 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1330 u32 r_param = bp->link_vars.line_speed / 8;
1331 u32 fair_periodic_timeout_usec;
1334 memset(&(bp->cmng.rs_vars), 0,
1335 sizeof(struct rate_shaping_vars_per_port));
1336 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1338 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1339 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1341 /* this is the threshold below which no timer arming will occur
1342 1.25 coefficient is for the threshold to be a little bigger
1343 than the real time, to compensate for timer in-accuracy */
1344 bp->cmng.rs_vars.rs_threshold =
1345 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1347 /* resolution of fairness timer */
1348 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1349 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1350 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1352 /* this is the threshold below which we won't arm the timer anymore */
1353 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1355 /* we multiply by 1e3/8 to get bytes/msec.
1356 We don't want the credits to pass a credit
1357 of the t_fair*FAIR_MEM (algorithm resolution) */
1358 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1359 /* since each tick is 4 usec */
1360 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1363 /* Calculates the sum of vn_min_rates.
1364 It's needed for further normalizing of the min_rates.
1366 sum of vn_min_rates.
1368 0 - if all the min_rates are 0.
1369 In the later case fainess algorithm should be deactivated.
1370 If not all min_rates are zero then those that are zeroes will be set to 1.
1372 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1375 int port = BP_PORT(bp);
1378 bp->vn_weight_sum = 0;
1379 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1380 int func = 2*vn + port;
1381 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1382 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1383 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1385 /* Skip hidden vns */
1386 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1389 /* If min rate is zero - set it to 1 */
1391 vn_min_rate = DEF_MIN_RATE;
1395 bp->vn_weight_sum += vn_min_rate;
1398 /* ... only if all min rates are zeros - disable fairness */
1400 bp->cmng.flags.cmng_enables &=
1401 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1402 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1403 " fairness will be disabled\n");
1405 bp->cmng.flags.cmng_enables |=
1406 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1409 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1411 struct rate_shaping_vars_per_vn m_rs_vn;
1412 struct fairness_vars_per_vn m_fair_vn;
1413 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1414 u16 vn_min_rate, vn_max_rate;
1417 /* If function is hidden - set min and max to zeroes */
1418 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1423 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1424 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1425 /* If min rate is zero - set it to 1 */
1427 vn_min_rate = DEF_MIN_RATE;
1428 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1429 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1432 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1433 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1435 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1436 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1438 /* global vn counter - maximal Mbps for this vn */
1439 m_rs_vn.vn_counter.rate = vn_max_rate;
1441 /* quota - number of bytes transmitted in this period */
1442 m_rs_vn.vn_counter.quota =
1443 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1445 if (bp->vn_weight_sum) {
1446 /* credit for each period of the fairness algorithm:
1447 number of bytes in T_FAIR (the vn share the port rate).
1448 vn_weight_sum should not be larger than 10000, thus
1449 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1451 m_fair_vn.vn_credit_delta =
1452 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1453 (8 * bp->vn_weight_sum))),
1454 (bp->cmng.fair_vars.fair_threshold * 2));
1455 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1456 m_fair_vn.vn_credit_delta);
1459 /* Store it to internal memory */
1460 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1461 REG_WR(bp, BAR_XSTRORM_INTMEM +
1462 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1463 ((u32 *)(&m_rs_vn))[i]);
1465 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1466 REG_WR(bp, BAR_XSTRORM_INTMEM +
1467 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1468 ((u32 *)(&m_fair_vn))[i]);
1472 /* This function is called upon link interrupt */
1473 static void bnx2x_link_attn(struct bnx2x *bp)
1475 u32 prev_link_status = bp->link_vars.link_status;
1476 /* Make sure that we are synced with the current statistics */
1477 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1479 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1481 if (bp->link_vars.link_up) {
1483 /* dropless flow control */
1484 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1485 int port = BP_PORT(bp);
1486 u32 pause_enabled = 0;
1488 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1491 REG_WR(bp, BAR_USTRORM_INTMEM +
1492 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1496 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1497 struct host_port_stats *pstats;
1499 pstats = bnx2x_sp(bp, port_stats);
1500 /* reset old bmac stats */
1501 memset(&(pstats->mac_stx[0]), 0,
1502 sizeof(struct mac_stx));
1504 if (bp->state == BNX2X_STATE_OPEN)
1505 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1508 /* indicate link status only if link status actually changed */
1509 if (prev_link_status != bp->link_vars.link_status)
1510 bnx2x_link_report(bp);
1513 int port = BP_PORT(bp);
1517 /* Set the attention towards other drivers on the same port */
1518 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1519 if (vn == BP_E1HVN(bp))
1522 func = ((vn << 1) | port);
1523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1524 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1527 if (bp->link_vars.link_up) {
1530 /* Init rate shaping and fairness contexts */
1531 bnx2x_init_port_minmax(bp);
1533 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1534 bnx2x_init_vn_minmax(bp, 2*vn + port);
1536 /* Store it to internal memory */
1538 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1539 REG_WR(bp, BAR_XSTRORM_INTMEM +
1540 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1541 ((u32 *)(&bp->cmng))[i]);
1546 void bnx2x__link_status_update(struct bnx2x *bp)
1548 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1551 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1553 if (bp->link_vars.link_up)
1554 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1556 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1558 bnx2x_calc_vn_weight_sum(bp);
1560 /* indicate link status */
1561 bnx2x_link_report(bp);
1564 static void bnx2x_pmf_update(struct bnx2x *bp)
1566 int port = BP_PORT(bp);
1570 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1572 /* enable nig attention */
1573 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1574 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1577 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1585 * General service functions
1588 /* send the MCP a request, block until there is a reply */
1589 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1591 int func = BP_FUNC(bp);
1592 u32 seq = ++bp->fw_seq;
1595 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1597 mutex_lock(&bp->fw_mb_mutex);
1598 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1599 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1602 /* let the FW do it's magic ... */
1605 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1607 /* Give the FW up to 5 second (500*10ms) */
1608 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1610 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1611 cnt*delay, rc, seq);
1613 /* is this a reply to our command? */
1614 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1615 rc &= FW_MSG_CODE_MASK;
1618 BNX2X_ERR("FW failed to respond!\n");
1622 mutex_unlock(&bp->fw_mb_mutex);
1627 static void bnx2x_e1h_disable(struct bnx2x *bp)
1629 int port = BP_PORT(bp);
1631 netif_tx_disable(bp->dev);
1633 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1635 netif_carrier_off(bp->dev);
1638 static void bnx2x_e1h_enable(struct bnx2x *bp)
1640 int port = BP_PORT(bp);
1642 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1644 /* Tx queue should be only reenabled */
1645 netif_tx_wake_all_queues(bp->dev);
1648 * Should not call netif_carrier_on since it will be called if the link
1649 * is up when checking for link state
1653 static void bnx2x_update_min_max(struct bnx2x *bp)
1655 int port = BP_PORT(bp);
1658 /* Init rate shaping and fairness contexts */
1659 bnx2x_init_port_minmax(bp);
1661 bnx2x_calc_vn_weight_sum(bp);
1663 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1664 bnx2x_init_vn_minmax(bp, 2*vn + port);
1669 /* Set the attention towards other drivers on the same port */
1670 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1671 if (vn == BP_E1HVN(bp))
1674 func = ((vn << 1) | port);
1675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1676 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1679 /* Store it to internal memory */
1680 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1681 REG_WR(bp, BAR_XSTRORM_INTMEM +
1682 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1683 ((u32 *)(&bp->cmng))[i]);
1687 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1689 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1691 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1694 * This is the only place besides the function initialization
1695 * where the bp->flags can change so it is done without any
1698 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1699 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1700 bp->flags |= MF_FUNC_DIS;
1702 bnx2x_e1h_disable(bp);
1704 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1705 bp->flags &= ~MF_FUNC_DIS;
1707 bnx2x_e1h_enable(bp);
1709 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1711 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1713 bnx2x_update_min_max(bp);
1714 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1717 /* Report results to MCP */
1719 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1721 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1724 /* must be called under the spq lock */
1725 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1727 struct eth_spe *next_spe = bp->spq_prod_bd;
1729 if (bp->spq_prod_bd == bp->spq_last_bd) {
1730 bp->spq_prod_bd = bp->spq;
1731 bp->spq_prod_idx = 0;
1732 DP(NETIF_MSG_TIMER, "end of spq\n");
1740 /* must be called under the spq lock */
1741 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1743 int func = BP_FUNC(bp);
1745 /* Make sure that BD data is updated before writing the producer */
1748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1753 /* the slow path queue is odd since completions arrive on the fastpath ring */
1754 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1755 u32 data_hi, u32 data_lo, int common)
1757 struct eth_spe *spe;
1759 #ifdef BNX2X_STOP_ON_ERROR
1760 if (unlikely(bp->panic))
1764 spin_lock_bh(&bp->spq_lock);
1766 if (!bp->spq_left) {
1767 BNX2X_ERR("BUG! SPQ ring full!\n");
1768 spin_unlock_bh(&bp->spq_lock);
1773 spe = bnx2x_sp_get_next(bp);
1775 /* CID needs port number to be encoded int it */
1776 spe->hdr.conn_and_cmd_data =
1777 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1779 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1782 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1784 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1785 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1789 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1790 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1791 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1792 (u32)(U64_LO(bp->spq_mapping) +
1793 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1794 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1796 bnx2x_sp_prod_update(bp);
1797 spin_unlock_bh(&bp->spq_lock);
1801 /* acquire split MCP access lock register */
1802 static int bnx2x_acquire_alr(struct bnx2x *bp)
1808 for (j = 0; j < 1000; j++) {
1810 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1811 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1812 if (val & (1L << 31))
1817 if (!(val & (1L << 31))) {
1818 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1825 /* release split MCP access lock register */
1826 static void bnx2x_release_alr(struct bnx2x *bp)
1828 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1831 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1833 struct host_def_status_block *def_sb = bp->def_status_blk;
1836 barrier(); /* status block is written to by the chip */
1837 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1838 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1841 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1842 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1845 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1846 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1849 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1850 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1853 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1854 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1861 * slow path service functions
1864 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1866 int port = BP_PORT(bp);
1867 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1868 COMMAND_REG_ATTN_BITS_SET);
1869 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1870 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1871 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1872 NIG_REG_MASK_INTERRUPT_PORT0;
1876 if (bp->attn_state & asserted)
1877 BNX2X_ERR("IGU ERROR\n");
1879 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1880 aeu_mask = REG_RD(bp, aeu_addr);
1882 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
1883 aeu_mask, asserted);
1884 aeu_mask &= ~(asserted & 0x3ff);
1885 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1887 REG_WR(bp, aeu_addr, aeu_mask);
1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1890 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1891 bp->attn_state |= asserted;
1892 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1894 if (asserted & ATTN_HARD_WIRED_MASK) {
1895 if (asserted & ATTN_NIG_FOR_FUNC) {
1897 bnx2x_acquire_phy_lock(bp);
1899 /* save nig interrupt mask */
1900 nig_mask = REG_RD(bp, nig_int_mask_addr);
1901 REG_WR(bp, nig_int_mask_addr, 0);
1903 bnx2x_link_attn(bp);
1905 /* handle unicore attn? */
1907 if (asserted & ATTN_SW_TIMER_4_FUNC)
1908 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1910 if (asserted & GPIO_2_FUNC)
1911 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1913 if (asserted & GPIO_3_FUNC)
1914 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1916 if (asserted & GPIO_4_FUNC)
1917 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1920 if (asserted & ATTN_GENERAL_ATTN_1) {
1921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1924 if (asserted & ATTN_GENERAL_ATTN_2) {
1925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1928 if (asserted & ATTN_GENERAL_ATTN_3) {
1929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1933 if (asserted & ATTN_GENERAL_ATTN_4) {
1934 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1935 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1937 if (asserted & ATTN_GENERAL_ATTN_5) {
1938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1941 if (asserted & ATTN_GENERAL_ATTN_6) {
1942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1947 } /* if hardwired */
1949 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1951 REG_WR(bp, hc_addr, asserted);
1953 /* now set back the mask */
1954 if (asserted & ATTN_NIG_FOR_FUNC) {
1955 REG_WR(bp, nig_int_mask_addr, nig_mask);
1956 bnx2x_release_phy_lock(bp);
1960 static inline void bnx2x_fan_failure(struct bnx2x *bp)
1962 int port = BP_PORT(bp);
1964 /* mark the failure */
1967 dev_info.port_hw_config[port].external_phy_config);
1969 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1970 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1971 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1974 /* log the failure */
1975 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1976 " the driver to shutdown the card to prevent permanent"
1977 " damage. Please contact OEM Support for assistance\n");
1980 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1982 int port = BP_PORT(bp);
1984 u32 val, swap_val, swap_override;
1986 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1987 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1989 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1991 val = REG_RD(bp, reg_offset);
1992 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1993 REG_WR(bp, reg_offset, val);
1995 BNX2X_ERR("SPIO5 hw attention\n");
1997 /* Fan failure attention */
1998 switch (bp->link_params.phy[EXT_PHY1].type) {
1999 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2000 /* Low power mode is controlled by GPIO 2 */
2001 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2002 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2003 /* The PHY reset is controlled by GPIO 1 */
2004 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2005 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2008 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2009 /* The PHY reset is controlled by GPIO 1 */
2010 /* fake the port number to cancel the swap done in
2012 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2013 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2014 port = (swap_val && swap_override) ^ 1;
2015 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2016 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2022 bnx2x_fan_failure(bp);
2025 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2026 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2027 bnx2x_acquire_phy_lock(bp);
2028 bnx2x_handle_module_detect_int(&bp->link_params);
2029 bnx2x_release_phy_lock(bp);
2032 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2034 val = REG_RD(bp, reg_offset);
2035 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2036 REG_WR(bp, reg_offset, val);
2038 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2039 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2044 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2048 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2050 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2051 BNX2X_ERR("DB hw attention 0x%x\n", val);
2052 /* DORQ discard attention */
2054 BNX2X_ERR("FATAL error from DORQ\n");
2057 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2059 int port = BP_PORT(bp);
2062 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2063 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2065 val = REG_RD(bp, reg_offset);
2066 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2067 REG_WR(bp, reg_offset, val);
2069 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2070 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2075 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2079 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2081 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2082 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2083 /* CFC error attention */
2085 BNX2X_ERR("FATAL error from CFC\n");
2088 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2090 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2091 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2092 /* RQ_USDMDP_FIFO_OVERFLOW */
2094 BNX2X_ERR("FATAL error from PXP\n");
2097 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2099 int port = BP_PORT(bp);
2102 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2103 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2105 val = REG_RD(bp, reg_offset);
2106 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2107 REG_WR(bp, reg_offset, val);
2109 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2110 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2115 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2119 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2121 if (attn & BNX2X_PMF_LINK_ASSERT) {
2122 int func = BP_FUNC(bp);
2124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2125 bp->mf_config = SHMEM_RD(bp,
2126 mf_cfg.func_mf_config[func].config);
2127 val = SHMEM_RD(bp, func_mb[func].drv_status);
2128 if (val & DRV_STATUS_DCC_EVENT_MASK)
2130 (val & DRV_STATUS_DCC_EVENT_MASK));
2131 bnx2x__link_status_update(bp);
2132 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2133 bnx2x_pmf_update(bp);
2135 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2137 BNX2X_ERR("MC assert!\n");
2138 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2140 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2141 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2144 } else if (attn & BNX2X_MCP_ASSERT) {
2146 BNX2X_ERR("MCP assert!\n");
2147 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2151 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2154 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2155 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2156 if (attn & BNX2X_GRC_TIMEOUT) {
2157 val = CHIP_IS_E1H(bp) ?
2158 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2159 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2161 if (attn & BNX2X_GRC_RSV) {
2162 val = CHIP_IS_E1H(bp) ?
2163 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2164 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2166 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2170 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2171 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2172 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2173 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2174 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2175 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2177 * should be run under rtnl lock
2179 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2181 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2182 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2183 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2189 * should be run under rtnl lock
2191 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2193 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2195 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2201 * should be run under rtnl lock
2203 bool bnx2x_reset_is_done(struct bnx2x *bp)
2205 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2206 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2207 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2211 * should be run under rtnl lock
2213 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2215 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2217 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2219 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2220 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2226 * should be run under rtnl lock
2228 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2230 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2232 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2234 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2235 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2243 * should be run under rtnl lock
2245 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2247 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2250 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2252 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2253 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2256 static inline void _print_next_block(int idx, const char *blk)
2263 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2267 for (i = 0; sig; i++) {
2268 cur_bit = ((u32)0x1 << i);
2269 if (sig & cur_bit) {
2271 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2272 _print_next_block(par_num++, "BRB");
2274 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2275 _print_next_block(par_num++, "PARSER");
2277 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2278 _print_next_block(par_num++, "TSDM");
2280 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2281 _print_next_block(par_num++, "SEARCHER");
2283 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2284 _print_next_block(par_num++, "TSEMI");
2296 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2300 for (i = 0; sig; i++) {
2301 cur_bit = ((u32)0x1 << i);
2302 if (sig & cur_bit) {
2304 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2305 _print_next_block(par_num++, "PBCLIENT");
2307 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2308 _print_next_block(par_num++, "QM");
2310 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2311 _print_next_block(par_num++, "XSDM");
2313 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2314 _print_next_block(par_num++, "XSEMI");
2316 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2317 _print_next_block(par_num++, "DOORBELLQ");
2319 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2320 _print_next_block(par_num++, "VAUX PCI CORE");
2322 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2323 _print_next_block(par_num++, "DEBUG");
2325 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2326 _print_next_block(par_num++, "USDM");
2328 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2329 _print_next_block(par_num++, "USEMI");
2331 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2332 _print_next_block(par_num++, "UPB");
2334 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2335 _print_next_block(par_num++, "CSDM");
2347 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2351 for (i = 0; sig; i++) {
2352 cur_bit = ((u32)0x1 << i);
2353 if (sig & cur_bit) {
2355 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2356 _print_next_block(par_num++, "CSEMI");
2358 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2359 _print_next_block(par_num++, "PXP");
2361 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2362 _print_next_block(par_num++,
2363 "PXPPCICLOCKCLIENT");
2365 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2366 _print_next_block(par_num++, "CFC");
2368 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2369 _print_next_block(par_num++, "CDU");
2371 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2372 _print_next_block(par_num++, "IGU");
2374 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2375 _print_next_block(par_num++, "MISC");
2387 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2391 for (i = 0; sig; i++) {
2392 cur_bit = ((u32)0x1 << i);
2393 if (sig & cur_bit) {
2395 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2396 _print_next_block(par_num++, "MCP ROM");
2398 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2399 _print_next_block(par_num++, "MCP UMP RX");
2401 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2402 _print_next_block(par_num++, "MCP UMP TX");
2404 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2405 _print_next_block(par_num++, "MCP SCPAD");
2417 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2420 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2421 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2423 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2424 "[0]:0x%08x [1]:0x%08x "
2425 "[2]:0x%08x [3]:0x%08x\n",
2426 sig0 & HW_PRTY_ASSERT_SET_0,
2427 sig1 & HW_PRTY_ASSERT_SET_1,
2428 sig2 & HW_PRTY_ASSERT_SET_2,
2429 sig3 & HW_PRTY_ASSERT_SET_3);
2430 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2432 par_num = bnx2x_print_blocks_with_parity0(
2433 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2434 par_num = bnx2x_print_blocks_with_parity1(
2435 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2436 par_num = bnx2x_print_blocks_with_parity2(
2437 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2438 par_num = bnx2x_print_blocks_with_parity3(
2439 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2446 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2448 struct attn_route attn;
2449 int port = BP_PORT(bp);
2451 attn.sig[0] = REG_RD(bp,
2452 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2454 attn.sig[1] = REG_RD(bp,
2455 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2457 attn.sig[2] = REG_RD(bp,
2458 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2460 attn.sig[3] = REG_RD(bp,
2461 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2464 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2468 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2470 struct attn_route attn, *group_mask;
2471 int port = BP_PORT(bp);
2477 /* need to take HW lock because MCP or other port might also
2478 try to handle this event */
2479 bnx2x_acquire_alr(bp);
2481 if (bnx2x_chk_parity_attn(bp)) {
2482 bp->recovery_state = BNX2X_RECOVERY_INIT;
2483 bnx2x_set_reset_in_progress(bp);
2484 schedule_delayed_work(&bp->reset_task, 0);
2485 /* Disable HW interrupts */
2486 bnx2x_int_disable(bp);
2487 bnx2x_release_alr(bp);
2488 /* In case of parity errors don't handle attentions so that
2489 * other function would "see" parity errors.
2494 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2495 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2496 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2497 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2498 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2499 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2501 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2502 if (deasserted & (1 << index)) {
2503 group_mask = &bp->attn_group[index];
2505 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2506 index, group_mask->sig[0], group_mask->sig[1],
2507 group_mask->sig[2], group_mask->sig[3]);
2509 bnx2x_attn_int_deasserted3(bp,
2510 attn.sig[3] & group_mask->sig[3]);
2511 bnx2x_attn_int_deasserted1(bp,
2512 attn.sig[1] & group_mask->sig[1]);
2513 bnx2x_attn_int_deasserted2(bp,
2514 attn.sig[2] & group_mask->sig[2]);
2515 bnx2x_attn_int_deasserted0(bp,
2516 attn.sig[0] & group_mask->sig[0]);
2520 bnx2x_release_alr(bp);
2522 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2525 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2527 REG_WR(bp, reg_addr, val);
2529 if (~bp->attn_state & deasserted)
2530 BNX2X_ERR("IGU ERROR\n");
2532 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2533 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2535 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2536 aeu_mask = REG_RD(bp, reg_addr);
2538 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2539 aeu_mask, deasserted);
2540 aeu_mask |= (deasserted & 0x3ff);
2541 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2543 REG_WR(bp, reg_addr, aeu_mask);
2544 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2546 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2547 bp->attn_state &= ~deasserted;
2548 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2551 static void bnx2x_attn_int(struct bnx2x *bp)
2553 /* read local copy of bits */
2554 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2556 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2558 u32 attn_state = bp->attn_state;
2560 /* look for changed bits */
2561 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2562 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2565 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2566 attn_bits, attn_ack, asserted, deasserted);
2568 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2569 BNX2X_ERR("BAD attention state\n");
2571 /* handle bits that were raised */
2573 bnx2x_attn_int_asserted(bp, asserted);
2576 bnx2x_attn_int_deasserted(bp, deasserted);
2579 static void bnx2x_sp_task(struct work_struct *work)
2581 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2584 /* Return here if interrupt is disabled */
2585 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2586 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2590 status = bnx2x_update_dsb_idx(bp);
2591 /* if (status == 0) */
2592 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2594 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2602 /* CStorm events: STAT_QUERY */
2604 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2608 if (unlikely(status))
2609 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2612 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2614 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2616 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2618 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2620 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2624 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2626 struct net_device *dev = dev_instance;
2627 struct bnx2x *bp = netdev_priv(dev);
2629 /* Return here if interrupt is disabled */
2630 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2631 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2635 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2637 #ifdef BNX2X_STOP_ON_ERROR
2638 if (unlikely(bp->panic))
2644 struct cnic_ops *c_ops;
2647 c_ops = rcu_dereference(bp->cnic_ops);
2649 c_ops->cnic_handler(bp->cnic_data, NULL);
2653 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2658 /* end of slow path */
2660 static void bnx2x_timer(unsigned long data)
2662 struct bnx2x *bp = (struct bnx2x *) data;
2664 if (!netif_running(bp->dev))
2667 if (atomic_read(&bp->intr_sem) != 0)
2671 struct bnx2x_fastpath *fp = &bp->fp[0];
2675 rc = bnx2x_rx_int(fp, 1000);
2678 if (!BP_NOMCP(bp)) {
2679 int func = BP_FUNC(bp);
2683 ++bp->fw_drv_pulse_wr_seq;
2684 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2685 /* TBD - add SYSTEM_TIME */
2686 drv_pulse = bp->fw_drv_pulse_wr_seq;
2687 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2689 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2690 MCP_PULSE_SEQ_MASK);
2691 /* The delta between driver pulse and mcp response
2692 * should be 1 (before mcp response) or 0 (after mcp response)
2694 if ((drv_pulse != mcp_pulse) &&
2695 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2696 /* someone lost a heartbeat... */
2697 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2698 drv_pulse, mcp_pulse);
2702 if (bp->state == BNX2X_STATE_OPEN)
2703 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2706 mod_timer(&bp->timer, jiffies + bp->current_interval);
2709 /* end of Statistics */
2714 * nic init service functions
2717 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2719 int port = BP_PORT(bp);
2722 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2723 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2724 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2725 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2726 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2727 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2730 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2731 dma_addr_t mapping, int sb_id)
2733 int port = BP_PORT(bp);
2734 int func = BP_FUNC(bp);
2739 section = ((u64)mapping) + offsetof(struct host_status_block,
2741 sb->u_status_block.status_block_id = sb_id;
2743 REG_WR(bp, BAR_CSTRORM_INTMEM +
2744 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2745 REG_WR(bp, BAR_CSTRORM_INTMEM +
2746 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2748 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2749 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2751 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2752 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2753 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2756 section = ((u64)mapping) + offsetof(struct host_status_block,
2758 sb->c_status_block.status_block_id = sb_id;
2760 REG_WR(bp, BAR_CSTRORM_INTMEM +
2761 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2762 REG_WR(bp, BAR_CSTRORM_INTMEM +
2763 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2765 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2766 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2768 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2769 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2770 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2772 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2775 static void bnx2x_zero_def_sb(struct bnx2x *bp)
2777 int func = BP_FUNC(bp);
2779 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2780 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2781 sizeof(struct tstorm_def_status_block)/4);
2782 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2783 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2784 sizeof(struct cstorm_def_status_block_u)/4);
2785 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2786 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2787 sizeof(struct cstorm_def_status_block_c)/4);
2788 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2789 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2790 sizeof(struct xstorm_def_status_block)/4);
2793 static void bnx2x_init_def_sb(struct bnx2x *bp,
2794 struct host_def_status_block *def_sb,
2795 dma_addr_t mapping, int sb_id)
2797 int port = BP_PORT(bp);
2798 int func = BP_FUNC(bp);
2799 int index, val, reg_offset;
2803 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2804 atten_status_block);
2805 def_sb->atten_status_block.status_block_id = sb_id;
2809 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2810 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2812 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2813 bp->attn_group[index].sig[0] = REG_RD(bp,
2814 reg_offset + 0x10*index);
2815 bp->attn_group[index].sig[1] = REG_RD(bp,
2816 reg_offset + 0x4 + 0x10*index);
2817 bp->attn_group[index].sig[2] = REG_RD(bp,
2818 reg_offset + 0x8 + 0x10*index);
2819 bp->attn_group[index].sig[3] = REG_RD(bp,
2820 reg_offset + 0xc + 0x10*index);
2823 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2824 HC_REG_ATTN_MSG0_ADDR_L);
2826 REG_WR(bp, reg_offset, U64_LO(section));
2827 REG_WR(bp, reg_offset + 4, U64_HI(section));
2829 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2831 val = REG_RD(bp, reg_offset);
2833 REG_WR(bp, reg_offset, val);
2836 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2837 u_def_status_block);
2838 def_sb->u_def_status_block.status_block_id = sb_id;
2840 REG_WR(bp, BAR_CSTRORM_INTMEM +
2841 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2842 REG_WR(bp, BAR_CSTRORM_INTMEM +
2843 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2845 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2846 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2848 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2849 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2850 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2853 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2854 c_def_status_block);
2855 def_sb->c_def_status_block.status_block_id = sb_id;
2857 REG_WR(bp, BAR_CSTRORM_INTMEM +
2858 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2859 REG_WR(bp, BAR_CSTRORM_INTMEM +
2860 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2862 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2863 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2865 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2866 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2867 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2870 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2871 t_def_status_block);
2872 def_sb->t_def_status_block.status_block_id = sb_id;
2874 REG_WR(bp, BAR_TSTRORM_INTMEM +
2875 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2876 REG_WR(bp, BAR_TSTRORM_INTMEM +
2877 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2879 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2880 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2882 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2883 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2884 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2887 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2888 x_def_status_block);
2889 def_sb->x_def_status_block.status_block_id = sb_id;
2891 REG_WR(bp, BAR_XSTRORM_INTMEM +
2892 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2893 REG_WR(bp, BAR_XSTRORM_INTMEM +
2894 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2896 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2897 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2899 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2900 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2901 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2903 bp->stats_pending = 0;
2904 bp->set_mac_pending = 0;
2906 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2909 void bnx2x_update_coalesce(struct bnx2x *bp)
2911 int port = BP_PORT(bp);
2914 for_each_queue(bp, i) {
2915 int sb_id = bp->fp[i].sb_id;
2917 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2918 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2919 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2920 U_SB_ETH_RX_CQ_INDEX),
2921 bp->rx_ticks/(4 * BNX2X_BTR));
2922 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2923 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2924 U_SB_ETH_RX_CQ_INDEX),
2925 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2927 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2928 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2929 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2930 C_SB_ETH_TX_CQ_INDEX),
2931 bp->tx_ticks/(4 * BNX2X_BTR));
2932 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2933 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2934 C_SB_ETH_TX_CQ_INDEX),
2935 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2939 static void bnx2x_init_sp_ring(struct bnx2x *bp)
2941 int func = BP_FUNC(bp);
2943 spin_lock_init(&bp->spq_lock);
2945 bp->spq_left = MAX_SPQ_PENDING;
2946 bp->spq_prod_idx = 0;
2947 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2948 bp->spq_prod_bd = bp->spq;
2949 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2951 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2952 U64_LO(bp->spq_mapping));
2954 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2955 U64_HI(bp->spq_mapping));
2957 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2961 static void bnx2x_init_context(struct bnx2x *bp)
2966 for_each_queue(bp, i) {
2967 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2968 struct bnx2x_fastpath *fp = &bp->fp[i];
2969 u8 cl_id = fp->cl_id;
2971 context->ustorm_st_context.common.sb_index_numbers =
2972 BNX2X_RX_SB_INDEX_NUM;
2973 context->ustorm_st_context.common.clientId = cl_id;
2974 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2975 context->ustorm_st_context.common.flags =
2976 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2977 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2978 context->ustorm_st_context.common.statistics_counter_id =
2980 context->ustorm_st_context.common.mc_alignment_log_size =
2981 BNX2X_RX_ALIGN_SHIFT;
2982 context->ustorm_st_context.common.bd_buff_size =
2984 context->ustorm_st_context.common.bd_page_base_hi =
2985 U64_HI(fp->rx_desc_mapping);
2986 context->ustorm_st_context.common.bd_page_base_lo =
2987 U64_LO(fp->rx_desc_mapping);
2988 if (!fp->disable_tpa) {
2989 context->ustorm_st_context.common.flags |=
2990 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2991 context->ustorm_st_context.common.sge_buff_size =
2992 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2994 context->ustorm_st_context.common.sge_page_base_hi =
2995 U64_HI(fp->rx_sge_mapping);
2996 context->ustorm_st_context.common.sge_page_base_lo =
2997 U64_LO(fp->rx_sge_mapping);
2999 context->ustorm_st_context.common.max_sges_for_packet =
3000 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
3001 context->ustorm_st_context.common.max_sges_for_packet =
3002 ((context->ustorm_st_context.common.
3003 max_sges_for_packet + PAGES_PER_SGE - 1) &
3004 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3007 context->ustorm_ag_context.cdu_usage =
3008 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3009 CDU_REGION_NUMBER_UCM_AG,
3010 ETH_CONNECTION_TYPE);
3012 context->xstorm_ag_context.cdu_reserved =
3013 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3014 CDU_REGION_NUMBER_XCM_AG,
3015 ETH_CONNECTION_TYPE);
3019 for_each_queue(bp, i) {
3020 struct bnx2x_fastpath *fp = &bp->fp[i];
3021 struct eth_context *context =
3022 bnx2x_sp(bp, context[i].eth);
3024 context->cstorm_st_context.sb_index_number =
3025 C_SB_ETH_TX_CQ_INDEX;
3026 context->cstorm_st_context.status_block_id = fp->sb_id;
3028 context->xstorm_st_context.tx_bd_page_base_hi =
3029 U64_HI(fp->tx_desc_mapping);
3030 context->xstorm_st_context.tx_bd_page_base_lo =
3031 U64_LO(fp->tx_desc_mapping);
3032 context->xstorm_st_context.statistics_data = (fp->cl_id |
3033 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3037 static void bnx2x_init_ind_table(struct bnx2x *bp)
3039 int func = BP_FUNC(bp);
3042 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3046 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
3047 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3048 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3049 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3050 bp->fp->cl_id + (i % bp->num_queues));
3053 void bnx2x_set_client_config(struct bnx2x *bp)
3055 struct tstorm_eth_client_config tstorm_client = {0};
3056 int port = BP_PORT(bp);
3059 tstorm_client.mtu = bp->dev->mtu;
3060 tstorm_client.config_flags =
3061 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3062 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3064 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3065 tstorm_client.config_flags |=
3066 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3067 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3071 for_each_queue(bp, i) {
3072 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3074 REG_WR(bp, BAR_TSTRORM_INTMEM +
3075 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3076 ((u32 *)&tstorm_client)[0]);
3077 REG_WR(bp, BAR_TSTRORM_INTMEM +
3078 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3079 ((u32 *)&tstorm_client)[1]);
3082 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3083 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3086 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3088 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3089 int mode = bp->rx_mode;
3090 int mask = bp->rx_mode_cl_mask;
3091 int func = BP_FUNC(bp);
3092 int port = BP_PORT(bp);
3094 /* All but management unicast packets should pass to the host as well */
3096 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3097 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3098 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3099 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3101 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
3104 case BNX2X_RX_MODE_NONE: /* no Rx */
3105 tstorm_mac_filter.ucast_drop_all = mask;
3106 tstorm_mac_filter.mcast_drop_all = mask;
3107 tstorm_mac_filter.bcast_drop_all = mask;
3110 case BNX2X_RX_MODE_NORMAL:
3111 tstorm_mac_filter.bcast_accept_all = mask;
3114 case BNX2X_RX_MODE_ALLMULTI:
3115 tstorm_mac_filter.mcast_accept_all = mask;
3116 tstorm_mac_filter.bcast_accept_all = mask;
3119 case BNX2X_RX_MODE_PROMISC:
3120 tstorm_mac_filter.ucast_accept_all = mask;
3121 tstorm_mac_filter.mcast_accept_all = mask;
3122 tstorm_mac_filter.bcast_accept_all = mask;
3123 /* pass management unicast packets as well */
3124 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3128 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3133 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3136 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3137 REG_WR(bp, BAR_TSTRORM_INTMEM +
3138 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3139 ((u32 *)&tstorm_mac_filter)[i]);
3141 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3142 ((u32 *)&tstorm_mac_filter)[i]); */
3145 if (mode != BNX2X_RX_MODE_NONE)
3146 bnx2x_set_client_config(bp);
3149 static void bnx2x_init_internal_common(struct bnx2x *bp)
3153 /* Zero this manually as its initialization is
3154 currently missing in the initTool */
3155 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3156 REG_WR(bp, BAR_USTRORM_INTMEM +
3157 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3160 static void bnx2x_init_internal_port(struct bnx2x *bp)
3162 int port = BP_PORT(bp);
3165 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3167 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3168 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3169 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3172 static void bnx2x_init_internal_func(struct bnx2x *bp)
3174 struct tstorm_eth_function_common_config tstorm_config = {0};
3175 struct stats_indication_flags stats_flags = {0};
3176 int port = BP_PORT(bp);
3177 int func = BP_FUNC(bp);
3182 tstorm_config.config_flags = RSS_FLAGS(bp);
3185 tstorm_config.rss_result_mask = MULTI_MASK;
3187 /* Enable TPA if needed */
3188 if (bp->flags & TPA_ENABLE_FLAG)
3189 tstorm_config.config_flags |=
3190 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3193 tstorm_config.config_flags |=
3194 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3196 tstorm_config.leading_client_id = BP_L_ID(bp);
3198 REG_WR(bp, BAR_TSTRORM_INTMEM +
3199 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3200 (*(u32 *)&tstorm_config));
3202 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3203 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3204 bnx2x_set_storm_rx_mode(bp);
3206 for_each_queue(bp, i) {
3207 u8 cl_id = bp->fp[i].cl_id;
3209 /* reset xstorm per client statistics */
3210 offset = BAR_XSTRORM_INTMEM +
3211 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3213 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3214 REG_WR(bp, offset + j*4, 0);
3216 /* reset tstorm per client statistics */
3217 offset = BAR_TSTRORM_INTMEM +
3218 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3220 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3221 REG_WR(bp, offset + j*4, 0);
3223 /* reset ustorm per client statistics */
3224 offset = BAR_USTRORM_INTMEM +
3225 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3227 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3228 REG_WR(bp, offset + j*4, 0);
3231 /* Init statistics related context */
3232 stats_flags.collect_eth = 1;
3234 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3235 ((u32 *)&stats_flags)[0]);
3236 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3237 ((u32 *)&stats_flags)[1]);
3239 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3240 ((u32 *)&stats_flags)[0]);
3241 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3242 ((u32 *)&stats_flags)[1]);
3244 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3245 ((u32 *)&stats_flags)[0]);
3246 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3247 ((u32 *)&stats_flags)[1]);
3249 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3250 ((u32 *)&stats_flags)[0]);
3251 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3252 ((u32 *)&stats_flags)[1]);
3254 REG_WR(bp, BAR_XSTRORM_INTMEM +
3255 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3256 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3257 REG_WR(bp, BAR_XSTRORM_INTMEM +
3258 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3259 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3261 REG_WR(bp, BAR_TSTRORM_INTMEM +
3262 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3263 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3264 REG_WR(bp, BAR_TSTRORM_INTMEM +
3265 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3266 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3268 REG_WR(bp, BAR_USTRORM_INTMEM +
3269 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3270 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3271 REG_WR(bp, BAR_USTRORM_INTMEM +
3272 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3273 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3275 if (CHIP_IS_E1H(bp)) {
3276 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3278 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3280 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3282 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3285 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3289 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3290 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3291 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3292 for_each_queue(bp, i) {
3293 struct bnx2x_fastpath *fp = &bp->fp[i];
3295 REG_WR(bp, BAR_USTRORM_INTMEM +
3296 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3297 U64_LO(fp->rx_comp_mapping));
3298 REG_WR(bp, BAR_USTRORM_INTMEM +
3299 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3300 U64_HI(fp->rx_comp_mapping));
3303 REG_WR(bp, BAR_USTRORM_INTMEM +
3304 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3305 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3306 REG_WR(bp, BAR_USTRORM_INTMEM +
3307 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3308 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3310 REG_WR16(bp, BAR_USTRORM_INTMEM +
3311 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3315 /* dropless flow control */
3316 if (CHIP_IS_E1H(bp)) {
3317 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3319 rx_pause.bd_thr_low = 250;
3320 rx_pause.cqe_thr_low = 250;
3322 rx_pause.sge_thr_low = 0;
3323 rx_pause.bd_thr_high = 350;
3324 rx_pause.cqe_thr_high = 350;
3325 rx_pause.sge_thr_high = 0;
3327 for_each_queue(bp, i) {
3328 struct bnx2x_fastpath *fp = &bp->fp[i];
3330 if (!fp->disable_tpa) {
3331 rx_pause.sge_thr_low = 150;
3332 rx_pause.sge_thr_high = 250;
3336 offset = BAR_USTRORM_INTMEM +
3337 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3340 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3342 REG_WR(bp, offset + j*4,
3343 ((u32 *)&rx_pause)[j]);
3347 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3349 /* Init rate shaping and fairness contexts */
3353 /* During init there is no active link
3354 Until link is up, set link rate to 10Gbps */
3355 bp->link_vars.line_speed = SPEED_10000;
3356 bnx2x_init_port_minmax(bp);
3360 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3361 bnx2x_calc_vn_weight_sum(bp);
3363 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3364 bnx2x_init_vn_minmax(bp, 2*vn + port);
3366 /* Enable rate shaping and fairness */
3367 bp->cmng.flags.cmng_enables |=
3368 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3371 /* rate shaping and fairness are disabled */
3373 "single function mode minmax will be disabled\n");
3377 /* Store cmng structures to internal memory */
3379 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3380 REG_WR(bp, BAR_XSTRORM_INTMEM +
3381 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3382 ((u32 *)(&bp->cmng))[i]);
3385 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3387 switch (load_code) {
3388 case FW_MSG_CODE_DRV_LOAD_COMMON:
3389 bnx2x_init_internal_common(bp);
3392 case FW_MSG_CODE_DRV_LOAD_PORT:
3393 bnx2x_init_internal_port(bp);
3396 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3397 bnx2x_init_internal_func(bp);
3401 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3406 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3410 for_each_queue(bp, i) {
3411 struct bnx2x_fastpath *fp = &bp->fp[i];
3414 fp->state = BNX2X_FP_STATE_CLOSED;
3416 fp->cl_id = BP_L_ID(bp) + i;
3418 fp->sb_id = fp->cl_id + 1;
3420 fp->sb_id = fp->cl_id;
3423 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3424 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3425 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3427 bnx2x_update_fpsb_idx(fp);
3430 /* ensure status block indices were read */
3434 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3436 bnx2x_update_dsb_idx(bp);
3437 bnx2x_update_coalesce(bp);
3438 bnx2x_init_rx_rings(bp);
3439 bnx2x_init_tx_ring(bp);
3440 bnx2x_init_sp_ring(bp);
3441 bnx2x_init_context(bp);
3442 bnx2x_init_internal(bp, load_code);
3443 bnx2x_init_ind_table(bp);
3444 bnx2x_stats_init(bp);
3446 /* At this point, we are ready for interrupts */
3447 atomic_set(&bp->intr_sem, 0);
3449 /* flush all before enabling interrupts */
3453 bnx2x_int_enable(bp);
3455 /* Check for SPIO5 */
3456 bnx2x_attn_int_deasserted0(bp,
3457 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3458 AEU_INPUTS_ATTN_BITS_SPIO5);
3461 /* end of nic init */
3464 * gzip service functions
3467 static int bnx2x_gunzip_init(struct bnx2x *bp)
3469 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3470 &bp->gunzip_mapping, GFP_KERNEL);
3471 if (bp->gunzip_buf == NULL)
3474 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3475 if (bp->strm == NULL)
3478 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3480 if (bp->strm->workspace == NULL)
3490 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3491 bp->gunzip_mapping);
3492 bp->gunzip_buf = NULL;
3495 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3496 " un-compression\n");
3500 static void bnx2x_gunzip_end(struct bnx2x *bp)
3502 kfree(bp->strm->workspace);
3507 if (bp->gunzip_buf) {
3508 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3509 bp->gunzip_mapping);
3510 bp->gunzip_buf = NULL;
3514 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3518 /* check gzip header */
3519 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3520 BNX2X_ERR("Bad gzip header\n");
3528 if (zbuf[3] & FNAME)
3529 while ((zbuf[n++] != 0) && (n < len));
3531 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3532 bp->strm->avail_in = len - n;
3533 bp->strm->next_out = bp->gunzip_buf;
3534 bp->strm->avail_out = FW_BUF_SIZE;
3536 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3540 rc = zlib_inflate(bp->strm, Z_FINISH);
3541 if ((rc != Z_OK) && (rc != Z_STREAM_END))
3542 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3545 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3546 if (bp->gunzip_outlen & 0x3)
3547 netdev_err(bp->dev, "Firmware decompression error:"
3548 " gunzip_outlen (%d) not aligned\n",
3550 bp->gunzip_outlen >>= 2;
3552 zlib_inflateEnd(bp->strm);
3554 if (rc == Z_STREAM_END)
3560 /* nic load/unload */
3563 * General service functions
3566 /* send a NIG loopback debug packet */
3567 static void bnx2x_lb_pckt(struct bnx2x *bp)
3571 /* Ethernet source and destination addresses */
3572 wb_write[0] = 0x55555555;
3573 wb_write[1] = 0x55555555;
3574 wb_write[2] = 0x20; /* SOP */
3575 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3577 /* NON-IP protocol */
3578 wb_write[0] = 0x09000000;
3579 wb_write[1] = 0x55555555;
3580 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
3581 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3584 /* some of the internal memories
3585 * are not directly readable from the driver
3586 * to test them we send debug packets
3588 static int bnx2x_int_mem_test(struct bnx2x *bp)
3594 if (CHIP_REV_IS_FPGA(bp))
3596 else if (CHIP_REV_IS_EMUL(bp))
3601 DP(NETIF_MSG_HW, "start part1\n");
3603 /* Disable inputs of parser neighbor blocks */
3604 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3605 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3606 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3607 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3609 /* Write 0 to parser credits for CFC search request */
3610 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3612 /* send Ethernet packet */
3615 /* TODO do i reset NIG statistic? */
3616 /* Wait until NIG register shows 1 packet of size 0x10 */
3617 count = 1000 * factor;
3620 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3621 val = *bnx2x_sp(bp, wb_data[0]);
3629 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3633 /* Wait until PRS register shows 1 packet */
3634 count = 1000 * factor;
3636 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3644 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3648 /* Reset and init BRB, PRS */
3649 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3651 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3653 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3654 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3656 DP(NETIF_MSG_HW, "part2\n");
3658 /* Disable inputs of parser neighbor blocks */
3659 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3660 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3661 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3662 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3664 /* Write 0 to parser credits for CFC search request */
3665 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3667 /* send 10 Ethernet packets */
3668 for (i = 0; i < 10; i++)
3671 /* Wait until NIG register shows 10 + 1
3672 packets of size 11*0x10 = 0xb0 */
3673 count = 1000 * factor;
3676 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3677 val = *bnx2x_sp(bp, wb_data[0]);
3685 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3689 /* Wait until PRS register shows 2 packets */
3690 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3692 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3694 /* Write 1 to parser credits for CFC search request */
3695 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3697 /* Wait until PRS register shows 3 packets */
3698 msleep(10 * factor);
3699 /* Wait until NIG register shows 1 packet of size 0x10 */
3700 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3702 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3704 /* clear NIG EOP FIFO */
3705 for (i = 0; i < 11; i++)
3706 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3707 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3709 BNX2X_ERR("clear of NIG failed\n");
3713 /* Reset and init BRB, PRS, NIG */
3714 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3716 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3718 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3719 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3722 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3725 /* Enable inputs of parser neighbor blocks */
3726 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3727 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3728 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3729 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3731 DP(NETIF_MSG_HW, "done\n");
3736 static void enable_blocks_attention(struct bnx2x *bp)
3738 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3739 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3740 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3741 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3742 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3743 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3744 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3745 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3746 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3747 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3748 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3749 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3750 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3751 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3752 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3753 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3754 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3755 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3756 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3757 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3758 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3759 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3760 if (CHIP_REV_IS_FPGA(bp))
3761 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3763 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3764 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3765 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3766 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3767 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3768 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3769 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3770 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3771 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3772 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
3775 static const struct {
3778 } bnx2x_parity_mask[] = {
3779 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3780 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3781 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3782 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3783 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3784 {QM_REG_QM_PRTY_MASK, 0x0},
3785 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3786 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3787 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3788 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3789 {CDU_REG_CDU_PRTY_MASK, 0x0},
3790 {CFC_REG_CFC_PRTY_MASK, 0x0},
3791 {DBG_REG_DBG_PRTY_MASK, 0x0},
3792 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3793 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3794 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3795 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3796 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3797 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3798 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3799 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3800 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3801 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3802 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3803 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3804 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3805 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3806 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3809 static void enable_blocks_parity(struct bnx2x *bp)
3811 int i, mask_arr_len =
3812 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3814 for (i = 0; i < mask_arr_len; i++)
3815 REG_WR(bp, bnx2x_parity_mask[i].addr,
3816 bnx2x_parity_mask[i].mask);
3820 static void bnx2x_reset_common(struct bnx2x *bp)
3823 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3825 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3828 static void bnx2x_init_pxp(struct bnx2x *bp)
3831 int r_order, w_order;
3833 pci_read_config_word(bp->pdev,
3834 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3835 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3836 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3838 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3840 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3844 bnx2x_init_pxp_arb(bp, r_order, w_order);
3847 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3857 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3858 SHARED_HW_CFG_FAN_FAILURE_MASK;
3860 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3864 * The fan failure mechanism is usually related to the PHY type since
3865 * the power consumption of the board is affected by the PHY. Currently,
3866 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3868 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3869 for (port = PORT_0; port < PORT_MAX; port++) {
3871 SHMEM_RD(bp, dev_info.port_hw_config[port].
3872 external_phy_config) &
3873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3876 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
3878 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
3880 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3883 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3885 if (is_required == 0)
3888 /* Fan failure is indicated by SPIO 5 */
3889 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3890 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3892 /* set to active low mode */
3893 val = REG_RD(bp, MISC_REG_SPIO_INT);
3894 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3895 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3896 REG_WR(bp, MISC_REG_SPIO_INT, val);
3898 /* enable interrupt to signal the IGU */
3899 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3900 val |= (1 << MISC_REGISTERS_SPIO_5);
3901 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3904 static int bnx2x_init_common(struct bnx2x *bp)
3911 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
3913 bnx2x_reset_common(bp);
3914 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3915 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3917 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3918 if (CHIP_IS_E1H(bp))
3919 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3921 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3923 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3925 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3926 if (CHIP_IS_E1(bp)) {
3927 /* enable HW interrupt from PXP on USDM overflow
3928 bit 16 on INT_MASK_0 */
3929 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3932 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3936 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3937 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3938 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3939 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3940 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3941 /* make sure this value is 0 */
3942 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3944 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3945 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3946 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3947 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3948 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3951 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3953 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3954 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3955 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3958 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3959 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3961 /* let the HW do it's magic ... */
3963 /* finish PXP init */
3964 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3966 BNX2X_ERR("PXP2 CFG failed\n");
3969 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3971 BNX2X_ERR("PXP2 RD_INIT failed\n");
3975 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3976 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3978 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3980 /* clean the DMAE memory */
3982 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3984 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3985 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3986 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3987 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
3989 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3990 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3991 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3992 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3994 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3999 for (i = 0; i < 64; i++) {
4000 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
4001 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
4003 if (CHIP_IS_E1H(bp)) {
4004 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4005 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4010 /* soft reset pulse */
4011 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4012 REG_WR(bp, QM_REG_SOFT_RESET, 0);
4015 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4018 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4019 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4020 if (!CHIP_REV_IS_SLOW(bp)) {
4021 /* enable hw interrupt from doorbell Q */
4022 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4025 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4026 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4027 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4030 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4032 if (CHIP_IS_E1H(bp))
4033 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4035 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4036 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4037 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4038 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4040 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4041 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4042 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4043 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4045 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4046 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4047 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4048 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4051 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4053 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4056 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4057 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4058 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4060 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4061 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4062 REG_WR(bp, i, random32());
4063 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4065 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4066 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4067 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4068 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4069 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4070 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4071 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4072 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4073 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4074 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4076 REG_WR(bp, SRC_REG_SOFT_RST, 0);
4078 if (sizeof(union cdu_context) != 1024)
4079 /* we currently assume that a context is 1024 bytes */
4080 dev_alert(&bp->pdev->dev, "please adjust the size "
4081 "of cdu_context(%ld)\n",
4082 (long)sizeof(union cdu_context));
4084 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4085 val = (4 << 24) + (0 << 12) + 1024;
4086 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4088 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4089 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4090 /* enable context validation interrupt from CFC */
4091 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4093 /* set the thresholds to prevent CFC/CDU race */
4094 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4096 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4097 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4099 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4100 /* Reset PCIE errors for debug */
4101 REG_WR(bp, 0x2814, 0xffffffff);
4102 REG_WR(bp, 0x3820, 0xffffffff);
4104 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4105 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4106 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4107 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4109 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4110 if (CHIP_IS_E1H(bp)) {
4111 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4112 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4115 if (CHIP_REV_IS_SLOW(bp))
4118 /* finish CFC init */
4119 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4121 BNX2X_ERR("CFC LL_INIT failed\n");
4124 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4126 BNX2X_ERR("CFC AC_INIT failed\n");
4129 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4131 BNX2X_ERR("CFC CAM_INIT failed\n");
4134 REG_WR(bp, CFC_REG_DEBUG0, 0);
4136 /* read NIG statistic
4137 to see if this is our first up since powerup */
4138 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4139 val = *bnx2x_sp(bp, wb_data[0]);
4141 /* do internal memory self test */
4142 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4143 BNX2X_ERR("internal mem self test failed\n");
4147 switch (bp->link_params.phy[EXT_PHY1].type) {
4148 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4149 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4150 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4151 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4152 bp->port.need_hw_lock = 1;
4159 bnx2x_setup_fan_failure_detection(bp);
4161 /* clear PXP2 attentions */
4162 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4164 enable_blocks_attention(bp);
4165 if (CHIP_PARITY_SUPPORTED(bp))
4166 enable_blocks_parity(bp);
4168 if (!BP_NOMCP(bp)) {
4169 bnx2x_acquire_phy_lock(bp);
4170 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4171 bnx2x_release_phy_lock(bp);
4173 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4178 static int bnx2x_init_port(struct bnx2x *bp)
4180 int port = BP_PORT(bp);
4181 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4185 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
4187 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4189 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4190 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4192 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4193 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4194 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4195 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4198 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4200 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4201 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4202 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4205 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4207 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4208 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4209 /* no pause for emulation and FPGA */
4214 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4215 else if (bp->dev->mtu > 4096) {
4216 if (bp->flags & ONE_PORT_FLAG)
4220 /* (24*1024 + val*4)/256 */
4221 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4224 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4225 high = low + 56; /* 14*1024/256 */
4227 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4228 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4231 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4233 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4234 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4235 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4236 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4238 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4239 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4240 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4241 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4243 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4244 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4246 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4248 /* configure PBF to work without PAUSE mtu 9000 */
4249 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4251 /* update threshold */
4252 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4253 /* update init credit */
4254 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4257 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4259 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4262 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4264 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4265 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4267 if (CHIP_IS_E1(bp)) {
4268 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4269 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4271 bnx2x_init_block(bp, HC_BLOCK, init_stage);
4273 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4274 /* init aeu_mask_attn_func_0/1:
4275 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4276 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4277 * bits 4-7 are used for "per vn group attention" */
4278 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4279 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4281 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4282 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4283 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4284 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4285 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4287 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4289 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4291 if (CHIP_IS_E1H(bp)) {
4292 /* 0x2 disable e1hov, 0x1 enable */
4293 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4294 (IS_E1HMF(bp) ? 0x1 : 0x2));
4297 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4298 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4299 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4303 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4304 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4306 switch (bp->link_params.phy[EXT_PHY1].type) {
4307 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4309 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4311 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4312 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4314 /* The GPIO should be swapped if the swap register is
4316 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4317 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4319 /* Select function upon port-swap configuration */
4321 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4322 aeu_gpio_mask = (swap_val && swap_override) ?
4323 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4326 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4327 aeu_gpio_mask = (swap_val && swap_override) ?
4328 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4329 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4331 val = REG_RD(bp, offset);
4332 /* add GPIO3 to group */
4333 val |= aeu_gpio_mask;
4334 REG_WR(bp, offset, val);
4336 bp->port.need_hw_lock = 1;
4339 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4340 bp->port.need_hw_lock = 1;
4341 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4342 /* add SPIO 5 to group 0 */
4344 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4345 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4346 val = REG_RD(bp, reg_addr);
4347 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4348 REG_WR(bp, reg_addr, val);
4351 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4352 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4353 bp->port.need_hw_lock = 1;
4358 bnx2x__link_reset(bp);
4363 #define ILT_PER_FUNC (768/2)
4364 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4365 /* the phys address is shifted right 12 bits and has an added
4366 1=valid bit added to the 53rd bit
4367 then since this is a wide register(TM)
4368 we split it into two 32 bit writes
4370 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4371 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4372 #define PXP_ONE_ILT(x) (((x) << 10) | x)
4373 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4376 #define CNIC_ILT_LINES 127
4377 #define CNIC_CTX_PER_ILT 16
4379 #define CNIC_ILT_LINES 0
4382 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4386 if (CHIP_IS_E1H(bp))
4387 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4389 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4391 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4394 static int bnx2x_init_func(struct bnx2x *bp)
4396 int port = BP_PORT(bp);
4397 int func = BP_FUNC(bp);
4401 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
4403 /* set MSI reconfigure capability */
4404 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4405 val = REG_RD(bp, addr);
4406 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4407 REG_WR(bp, addr, val);
4409 i = FUNC_ILT_BASE(func);
4411 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4412 if (CHIP_IS_E1H(bp)) {
4413 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4414 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4416 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4417 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4420 i += 1 + CNIC_ILT_LINES;
4421 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4423 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4425 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4426 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4430 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4432 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4434 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4435 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4439 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4441 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4443 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4444 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4447 /* tell the searcher where the T2 table is */
4448 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4450 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4451 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4453 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4454 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4455 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4457 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4460 if (CHIP_IS_E1H(bp)) {
4461 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4462 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4463 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4464 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4465 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4466 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4467 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4468 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4469 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4471 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4472 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4475 /* HC init per function */
4476 if (CHIP_IS_E1H(bp)) {
4477 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4479 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4480 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4482 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4484 /* Reset PCIE errors for debug */
4485 REG_WR(bp, 0x2114, 0xffffffff);
4486 REG_WR(bp, 0x2120, 0xffffffff);
4487 bnx2x_phy_probe(&bp->link_params);
4491 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4495 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4496 BP_FUNC(bp), load_code);
4499 mutex_init(&bp->dmae_mutex);
4500 rc = bnx2x_gunzip_init(bp);
4504 switch (load_code) {
4505 case FW_MSG_CODE_DRV_LOAD_COMMON:
4506 rc = bnx2x_init_common(bp);
4511 case FW_MSG_CODE_DRV_LOAD_PORT:
4513 rc = bnx2x_init_port(bp);
4518 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4520 rc = bnx2x_init_func(bp);
4526 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4530 if (!BP_NOMCP(bp)) {
4531 int func = BP_FUNC(bp);
4533 bp->fw_drv_pulse_wr_seq =
4534 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4535 DRV_PULSE_SEQ_MASK);
4536 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4539 /* this needs to be done before gunzip end */
4540 bnx2x_zero_def_sb(bp);
4541 for_each_queue(bp, i)
4542 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4544 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4548 bnx2x_gunzip_end(bp);
4553 void bnx2x_free_mem(struct bnx2x *bp)
4556 #define BNX2X_PCI_FREE(x, y, size) \
4559 dma_free_coherent(&bp->pdev->dev, size, x, y); \
4565 #define BNX2X_FREE(x) \
4577 for_each_queue(bp, i) {
4580 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4581 bnx2x_fp(bp, i, status_blk_mapping),
4582 sizeof(struct host_status_block));
4585 for_each_queue(bp, i) {
4587 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4588 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4589 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4590 bnx2x_fp(bp, i, rx_desc_mapping),
4591 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4593 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4594 bnx2x_fp(bp, i, rx_comp_mapping),
4595 sizeof(struct eth_fast_path_rx_cqe) *
4599 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4600 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4601 bnx2x_fp(bp, i, rx_sge_mapping),
4602 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4605 for_each_queue(bp, i) {
4607 /* fastpath tx rings: tx_buf tx_desc */
4608 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4609 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4610 bnx2x_fp(bp, i, tx_desc_mapping),
4611 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4613 /* end of fastpath */
4615 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4616 sizeof(struct host_def_status_block));
4618 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4619 sizeof(struct bnx2x_slowpath));
4622 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4623 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4624 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4625 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4626 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4627 sizeof(struct host_status_block));
4629 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4631 #undef BNX2X_PCI_FREE
4635 int bnx2x_alloc_mem(struct bnx2x *bp)
4638 #define BNX2X_PCI_ALLOC(x, y, size) \
4640 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4642 goto alloc_mem_err; \
4643 memset(x, 0, size); \
4646 #define BNX2X_ALLOC(x, size) \
4648 x = vmalloc(size); \
4650 goto alloc_mem_err; \
4651 memset(x, 0, size); \
4658 for_each_queue(bp, i) {
4659 bnx2x_fp(bp, i, bp) = bp;
4662 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4663 &bnx2x_fp(bp, i, status_blk_mapping),
4664 sizeof(struct host_status_block));
4667 for_each_queue(bp, i) {
4669 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4670 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4671 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4672 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4673 &bnx2x_fp(bp, i, rx_desc_mapping),
4674 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4676 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4677 &bnx2x_fp(bp, i, rx_comp_mapping),
4678 sizeof(struct eth_fast_path_rx_cqe) *
4682 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4683 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4684 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4685 &bnx2x_fp(bp, i, rx_sge_mapping),
4686 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4689 for_each_queue(bp, i) {
4691 /* fastpath tx rings: tx_buf tx_desc */
4692 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4693 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4694 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4695 &bnx2x_fp(bp, i, tx_desc_mapping),
4696 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4698 /* end of fastpath */
4700 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4701 sizeof(struct host_def_status_block));
4703 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4704 sizeof(struct bnx2x_slowpath));
4707 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4709 /* allocate searcher T2 table
4710 we allocate 1/4 of alloc num for T2
4711 (which is not entered into the ILT) */
4712 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4714 /* Initialize T2 (for 1024 connections) */
4715 for (i = 0; i < 16*1024; i += 64)
4716 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4718 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4719 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4721 /* QM queues (128*MAX_CONN) */
4722 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4724 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4725 sizeof(struct host_status_block));
4728 /* Slow path ring */
4729 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4737 #undef BNX2X_PCI_ALLOC
4743 * Init service functions
4747 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4749 * @param bp driver descriptor
4750 * @param set set or clear an entry (1 or 0)
4751 * @param mac pointer to a buffer containing a MAC
4752 * @param cl_bit_vec bit vector of clients to register a MAC for
4753 * @param cam_offset offset in a CAM to use
4754 * @param with_bcast set broadcast MAC as well
4756 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4757 u32 cl_bit_vec, u8 cam_offset,
4760 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4761 int port = BP_PORT(bp);
4764 * unicasts 0-31:port0 32-63:port1
4765 * multicast 64-127:port0 128-191:port1
4767 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4768 config->hdr.offset = cam_offset;
4769 config->hdr.client_id = 0xff;
4770 config->hdr.reserved1 = 0;
4773 config->config_table[0].cam_entry.msb_mac_addr =
4774 swab16(*(u16 *)&mac[0]);
4775 config->config_table[0].cam_entry.middle_mac_addr =
4776 swab16(*(u16 *)&mac[2]);
4777 config->config_table[0].cam_entry.lsb_mac_addr =
4778 swab16(*(u16 *)&mac[4]);
4779 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4781 config->config_table[0].target_table_entry.flags = 0;
4783 CAM_INVALIDATE(config->config_table[0]);
4784 config->config_table[0].target_table_entry.clients_bit_vector =
4785 cpu_to_le32(cl_bit_vec);
4786 config->config_table[0].target_table_entry.vlan_id = 0;
4788 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4789 (set ? "setting" : "clearing"),
4790 config->config_table[0].cam_entry.msb_mac_addr,
4791 config->config_table[0].cam_entry.middle_mac_addr,
4792 config->config_table[0].cam_entry.lsb_mac_addr);
4796 config->config_table[1].cam_entry.msb_mac_addr =
4797 cpu_to_le16(0xffff);
4798 config->config_table[1].cam_entry.middle_mac_addr =
4799 cpu_to_le16(0xffff);
4800 config->config_table[1].cam_entry.lsb_mac_addr =
4801 cpu_to_le16(0xffff);
4802 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4804 config->config_table[1].target_table_entry.flags =
4805 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4807 CAM_INVALIDATE(config->config_table[1]);
4808 config->config_table[1].target_table_entry.clients_bit_vector =
4809 cpu_to_le32(cl_bit_vec);
4810 config->config_table[1].target_table_entry.vlan_id = 0;
4813 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4814 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4815 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4819 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4821 * @param bp driver descriptor
4822 * @param set set or clear an entry (1 or 0)
4823 * @param mac pointer to a buffer containing a MAC
4824 * @param cl_bit_vec bit vector of clients to register a MAC for
4825 * @param cam_offset offset in a CAM to use
4827 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4828 u32 cl_bit_vec, u8 cam_offset)
4830 struct mac_configuration_cmd_e1h *config =
4831 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4833 config->hdr.length = 1;
4834 config->hdr.offset = cam_offset;
4835 config->hdr.client_id = 0xff;
4836 config->hdr.reserved1 = 0;
4839 config->config_table[0].msb_mac_addr =
4840 swab16(*(u16 *)&mac[0]);
4841 config->config_table[0].middle_mac_addr =
4842 swab16(*(u16 *)&mac[2]);
4843 config->config_table[0].lsb_mac_addr =
4844 swab16(*(u16 *)&mac[4]);
4845 config->config_table[0].clients_bit_vector =
4846 cpu_to_le32(cl_bit_vec);
4847 config->config_table[0].vlan_id = 0;
4848 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4850 config->config_table[0].flags = BP_PORT(bp);
4852 config->config_table[0].flags =
4853 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4855 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
4856 (set ? "setting" : "clearing"),
4857 config->config_table[0].msb_mac_addr,
4858 config->config_table[0].middle_mac_addr,
4859 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4861 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4862 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4863 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4866 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4867 int *state_p, int poll)
4869 /* can take a while if any port is running */
4872 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4873 poll ? "polling" : "waiting", state, idx);
4878 bnx2x_rx_int(bp->fp, 10);
4879 /* if index is different from 0
4880 * the reply for some commands will
4881 * be on the non default queue
4884 bnx2x_rx_int(&bp->fp[idx], 10);
4887 mb(); /* state is changed by bnx2x_sp_event() */
4888 if (*state_p == state) {
4889 #ifdef BNX2X_STOP_ON_ERROR
4890 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4902 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4903 poll ? "polling" : "waiting", state, idx);
4904 #ifdef BNX2X_STOP_ON_ERROR
4911 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4913 bp->set_mac_pending++;
4916 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4917 (1 << bp->fp->cl_id), BP_FUNC(bp));
4919 /* Wait for a completion */
4920 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4923 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4925 bp->set_mac_pending++;
4928 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4929 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4932 /* Wait for a completion */
4933 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4938 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4939 * MAC(s). This function will wait until the ramdord completion
4942 * @param bp driver handle
4943 * @param set set or clear the CAM entry
4945 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4947 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4949 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4951 bp->set_mac_pending++;
4954 /* Send a SET_MAC ramrod */
4956 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4957 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4960 /* CAM allocation for E1H
4961 * unicasts: by func number
4962 * multicast: 20+FUNC*20, 20 each
4964 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4965 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4967 /* Wait for a completion when setting */
4968 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4974 int bnx2x_setup_leading(struct bnx2x *bp)
4978 /* reset IGU state */
4979 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4982 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4984 /* Wait for completion */
4985 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4990 int bnx2x_setup_multi(struct bnx2x *bp, int index)
4992 struct bnx2x_fastpath *fp = &bp->fp[index];
4994 /* reset IGU state */
4995 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4998 fp->state = BNX2X_FP_STATE_OPENING;
4999 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
5002 /* Wait for completion */
5003 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
5008 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
5011 switch (bp->multi_mode) {
5012 case ETH_RSS_MODE_DISABLED:
5016 case ETH_RSS_MODE_REGULAR:
5018 bp->num_queues = min_t(u32, num_queues,
5019 BNX2X_MAX_QUEUES(bp));
5021 bp->num_queues = min_t(u32, num_online_cpus(),
5022 BNX2X_MAX_QUEUES(bp));
5034 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5036 struct bnx2x_fastpath *fp = &bp->fp[index];
5039 /* halt the connection */
5040 fp->state = BNX2X_FP_STATE_HALTING;
5041 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5043 /* Wait for completion */
5044 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5046 if (rc) /* timeout */
5049 /* delete cfc entry */
5050 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5052 /* Wait for completion */
5053 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5058 static int bnx2x_stop_leading(struct bnx2x *bp)
5060 __le16 dsb_sp_prod_idx;
5061 /* if the other port is handling traffic,
5062 this can take a lot of time */
5068 /* Send HALT ramrod */
5069 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5070 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5072 /* Wait for completion */
5073 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5074 &(bp->fp[0].state), 1);
5075 if (rc) /* timeout */
5078 dsb_sp_prod_idx = *bp->dsb_sp_prod;
5080 /* Send PORT_DELETE ramrod */
5081 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5083 /* Wait for completion to arrive on default status block
5084 we are going to reset the chip anyway
5085 so there is not much to do if this times out
5087 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5089 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5090 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5091 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5092 #ifdef BNX2X_STOP_ON_ERROR
5100 rmb(); /* Refresh the dsb_sp_prod */
5102 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5103 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5108 static void bnx2x_reset_func(struct bnx2x *bp)
5110 int port = BP_PORT(bp);
5111 int func = BP_FUNC(bp);
5115 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5116 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5119 /* Disable Timer scan */
5120 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5122 * Wait for at least 10ms and up to 2 second for the timers scan to
5125 for (i = 0; i < 200; i++) {
5127 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5132 base = FUNC_ILT_BASE(func);
5133 for (i = base; i < base + ILT_PER_FUNC; i++)
5134 bnx2x_ilt_wr(bp, i, 0);
5137 static void bnx2x_reset_port(struct bnx2x *bp)
5139 int port = BP_PORT(bp);
5142 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5144 /* Do not rcv packets to BRB */
5145 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5146 /* Do not direct rcv packets that are not for MCP to the BRB */
5147 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5148 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5151 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5154 /* Check for BRB port occupancy */
5155 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5157 DP(NETIF_MSG_IFDOWN,
5158 "BRB1 is not empty %d blocks are occupied\n", val);
5160 /* TODO: Close Doorbell port? */
5163 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5165 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5166 BP_FUNC(bp), reset_code);
5168 switch (reset_code) {
5169 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5170 bnx2x_reset_port(bp);
5171 bnx2x_reset_func(bp);
5172 bnx2x_reset_common(bp);
5175 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5176 bnx2x_reset_port(bp);
5177 bnx2x_reset_func(bp);
5180 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5181 bnx2x_reset_func(bp);
5185 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5190 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5192 int port = BP_PORT(bp);
5196 /* Wait until tx fastpath tasks complete */
5197 for_each_queue(bp, i) {
5198 struct bnx2x_fastpath *fp = &bp->fp[i];
5201 while (bnx2x_has_tx_work_unload(fp)) {
5205 BNX2X_ERR("timeout waiting for queue[%d]\n",
5207 #ifdef BNX2X_STOP_ON_ERROR
5218 /* Give HW time to discard old tx messages */
5221 if (CHIP_IS_E1(bp)) {
5222 struct mac_configuration_cmd *config =
5223 bnx2x_sp(bp, mcast_config);
5225 bnx2x_set_eth_mac_addr_e1(bp, 0);
5227 for (i = 0; i < config->hdr.length; i++)
5228 CAM_INVALIDATE(config->config_table[i]);
5230 config->hdr.length = i;
5231 if (CHIP_REV_IS_SLOW(bp))
5232 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5234 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5235 config->hdr.client_id = bp->fp->cl_id;
5236 config->hdr.reserved1 = 0;
5238 bp->set_mac_pending++;
5241 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5242 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5243 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5246 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5248 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5250 for (i = 0; i < MC_HASH_SIZE; i++)
5251 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5253 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5256 /* Clear iSCSI L2 MAC */
5257 mutex_lock(&bp->cnic_mutex);
5258 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5259 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5260 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5262 mutex_unlock(&bp->cnic_mutex);
5265 if (unload_mode == UNLOAD_NORMAL)
5266 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5268 else if (bp->flags & NO_WOL_FLAG)
5269 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5272 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5273 u8 *mac_addr = bp->dev->dev_addr;
5275 /* The mac address is written to entries 1-4 to
5276 preserve entry 0 which is used by the PMF */
5277 u8 entry = (BP_E1HVN(bp) + 1)*8;
5279 val = (mac_addr[0] << 8) | mac_addr[1];
5280 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5282 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5283 (mac_addr[4] << 8) | mac_addr[5];
5284 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5286 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5289 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5291 /* Close multi and leading connections
5292 Completions for ramrods are collected in a synchronous way */
5293 for_each_nondefault_queue(bp, i)
5294 if (bnx2x_stop_multi(bp, i))
5297 rc = bnx2x_stop_leading(bp);
5299 BNX2X_ERR("Stop leading failed!\n");
5300 #ifdef BNX2X_STOP_ON_ERROR
5309 reset_code = bnx2x_fw_command(bp, reset_code);
5311 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
5312 load_count[0], load_count[1], load_count[2]);
5314 load_count[1 + port]--;
5315 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
5316 load_count[0], load_count[1], load_count[2]);
5317 if (load_count[0] == 0)
5318 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5319 else if (load_count[1 + port] == 0)
5320 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5322 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5325 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5326 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5327 bnx2x__link_reset(bp);
5329 /* Reset the chip */
5330 bnx2x_reset_chip(bp, reset_code);
5332 /* Report UNLOAD_DONE to MCP */
5334 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5338 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5342 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5344 if (CHIP_IS_E1(bp)) {
5345 int port = BP_PORT(bp);
5346 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5347 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5349 val = REG_RD(bp, addr);
5351 REG_WR(bp, addr, val);
5352 } else if (CHIP_IS_E1H(bp)) {
5353 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5354 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5355 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5356 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5361 /* Close gates #2, #3 and #4: */
5362 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5366 /* Gates #2 and #4a are closed/opened for "not E1" only */
5367 if (!CHIP_IS_E1(bp)) {
5369 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5370 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5371 close ? (val | 0x1) : (val & (~(u32)1)));
5373 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5374 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5375 close ? (val | 0x1) : (val & (~(u32)1)));
5379 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5380 val = REG_RD(bp, addr);
5381 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5383 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5384 close ? "closing" : "opening");
5388 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5390 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5392 /* Do some magic... */
5393 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5394 *magic_val = val & SHARED_MF_CLP_MAGIC;
5395 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5398 /* Restore the value of the `magic' bit.
5400 * @param pdev Device handle.
5401 * @param magic_val Old value of the `magic' bit.
5403 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5405 /* Restore the `magic' bit value... */
5406 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5407 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5408 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5409 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5410 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5411 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5414 /* Prepares for MCP reset: takes care of CLP configurations.
5417 * @param magic_val Old value of 'magic' bit.
5419 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5422 u32 validity_offset;
5424 DP(NETIF_MSG_HW, "Starting\n");
5426 /* Set `magic' bit in order to save MF config */
5427 if (!CHIP_IS_E1(bp))
5428 bnx2x_clp_reset_prep(bp, magic_val);
5430 /* Get shmem offset */
5431 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5432 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5434 /* Clear validity map flags */
5436 REG_WR(bp, shmem + validity_offset, 0);
5439 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5440 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
5442 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5443 * depending on the HW type.
5447 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5449 /* special handling for emulation and FPGA,
5450 wait 10 times longer */
5451 if (CHIP_REV_IS_SLOW(bp))
5452 msleep(MCP_ONE_TIMEOUT*10);
5454 msleep(MCP_ONE_TIMEOUT);
5457 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5459 u32 shmem, cnt, validity_offset, val;
5464 /* Get shmem offset */
5465 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5467 BNX2X_ERR("Shmem 0 return failure\n");
5472 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5474 /* Wait for MCP to come up */
5475 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5476 /* TBD: its best to check validity map of last port.
5477 * currently checks on port 0.
5479 val = REG_RD(bp, shmem + validity_offset);
5480 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5481 shmem + validity_offset, val);
5483 /* check that shared memory is valid. */
5484 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5485 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5488 bnx2x_mcp_wait_one(bp);
5491 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5493 /* Check that shared memory is valid. This indicates that MCP is up. */
5494 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5495 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5496 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5502 /* Restore the `magic' bit value */
5503 if (!CHIP_IS_E1(bp))
5504 bnx2x_clp_reset_done(bp, magic_val);
5509 static void bnx2x_pxp_prep(struct bnx2x *bp)
5511 if (!CHIP_IS_E1(bp)) {
5512 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5513 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5514 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5520 * Reset the whole chip except for:
5522 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5525 * - MISC (including AEU)
5529 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5531 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5534 MISC_REGISTERS_RESET_REG_1_RST_HC |
5535 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5536 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5539 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5540 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5541 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5542 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5543 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5544 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5545 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5546 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5548 reset_mask1 = 0xffffffff;
5551 reset_mask2 = 0xffff;
5553 reset_mask2 = 0x1ffff;
5555 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5556 reset_mask1 & (~not_reset_mask1));
5557 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5558 reset_mask2 & (~not_reset_mask2));
5563 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5564 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5568 static int bnx2x_process_kill(struct bnx2x *bp)
5572 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5575 /* Empty the Tetris buffer, wait for 1s */
5577 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5578 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5579 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5580 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5581 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5582 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5583 ((port_is_idle_0 & 0x1) == 0x1) &&
5584 ((port_is_idle_1 & 0x1) == 0x1) &&
5585 (pgl_exp_rom2 == 0xffffffff))
5588 } while (cnt-- > 0);
5591 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5593 " outstanding read requests after 1s!\n");
5594 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5595 " port_is_idle_0=0x%08x,"
5596 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5597 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5604 /* Close gates #2, #3 and #4 */
5605 bnx2x_set_234_gates(bp, true);
5607 /* TBD: Indicate that "process kill" is in progress to MCP */
5609 /* Clear "unprepared" bit */
5610 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5613 /* Make sure all is written to the chip before the reset */
5616 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5617 * PSWHST, GRC and PSWRD Tetris buffer.
5621 /* Prepare to chip reset: */
5623 bnx2x_reset_mcp_prep(bp, &val);
5629 /* reset the chip */
5630 bnx2x_process_kill_chip_reset(bp);
5633 /* Recover after reset: */
5635 if (bnx2x_reset_mcp_comp(bp, val))
5641 /* Open the gates #2, #3 and #4 */
5642 bnx2x_set_234_gates(bp, false);
5644 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5645 * reset state, re-enable attentions. */
5650 static int bnx2x_leader_reset(struct bnx2x *bp)
5653 /* Try to recover after the failure */
5654 if (bnx2x_process_kill(bp)) {
5655 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5658 goto exit_leader_reset;
5661 /* Clear "reset is in progress" bit and update the driver state */
5662 bnx2x_set_reset_done(bp);
5663 bp->recovery_state = BNX2X_RECOVERY_DONE;
5667 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5672 /* Assumption: runs under rtnl lock. This together with the fact
5673 * that it's called only from bnx2x_reset_task() ensure that it
5674 * will never be called when netif_running(bp->dev) is false.
5676 static void bnx2x_parity_recover(struct bnx2x *bp)
5678 DP(NETIF_MSG_HW, "Handling parity\n");
5680 switch (bp->recovery_state) {
5681 case BNX2X_RECOVERY_INIT:
5682 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5683 /* Try to get a LEADER_LOCK HW lock */
5684 if (bnx2x_trylock_hw_lock(bp,
5685 HW_LOCK_RESOURCE_RESERVED_08))
5688 /* Stop the driver */
5689 /* If interface has been removed - break */
5690 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5693 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5694 /* Ensure "is_leader" and "recovery_state"
5695 * update values are seen on other CPUs
5700 case BNX2X_RECOVERY_WAIT:
5701 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5702 if (bp->is_leader) {
5703 u32 load_counter = bnx2x_get_load_cnt(bp);
5705 /* Wait until all other functions get
5708 schedule_delayed_work(&bp->reset_task,
5712 /* If all other functions got down -
5713 * try to bring the chip back to
5714 * normal. In any case it's an exit
5715 * point for a leader.
5717 if (bnx2x_leader_reset(bp) ||
5718 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5719 printk(KERN_ERR"%s: Recovery "
5720 "has failed. Power cycle is "
5721 "needed.\n", bp->dev->name);
5722 /* Disconnect this device */
5723 netif_device_detach(bp->dev);
5724 /* Block ifup for all function
5725 * of this ASIC until
5726 * "process kill" or power
5729 bnx2x_set_reset_in_progress(bp);
5730 /* Shut down the power */
5731 bnx2x_set_power_state(bp,
5738 } else { /* non-leader */
5739 if (!bnx2x_reset_is_done(bp)) {
5740 /* Try to get a LEADER_LOCK HW lock as
5741 * long as a former leader may have
5742 * been unloaded by the user or
5743 * released a leadership by another
5746 if (bnx2x_trylock_hw_lock(bp,
5747 HW_LOCK_RESOURCE_RESERVED_08)) {
5748 /* I'm a leader now! Restart a
5755 schedule_delayed_work(&bp->reset_task,
5759 } else { /* A leader has completed
5760 * the "process kill". It's an exit
5761 * point for a non-leader.
5763 bnx2x_nic_load(bp, LOAD_NORMAL);
5764 bp->recovery_state =
5765 BNX2X_RECOVERY_DONE;
5776 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5777 * scheduled on a general queue in order to prevent a dead lock.
5779 static void bnx2x_reset_task(struct work_struct *work)
5781 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5783 #ifdef BNX2X_STOP_ON_ERROR
5784 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5785 " so reset not done to allow debug dump,\n"
5786 KERN_ERR " you will need to reboot when done\n");
5792 if (!netif_running(bp->dev))
5793 goto reset_task_exit;
5795 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5796 bnx2x_parity_recover(bp);
5798 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5799 bnx2x_nic_load(bp, LOAD_NORMAL);
5806 /* end of nic load/unload */
5809 * Init service functions
5812 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5815 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5816 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5817 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5818 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5819 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5820 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5821 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5822 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5824 BNX2X_ERR("Unsupported function index: %d\n", func);
5829 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5831 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5833 /* Flush all outstanding writes */
5836 /* Pretend to be function 0 */
5838 /* Flush the GRC transaction (in the chip) */
5839 new_val = REG_RD(bp, reg);
5841 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5846 /* From now we are in the "like-E1" mode */
5847 bnx2x_int_disable(bp);
5849 /* Flush all outstanding writes */
5852 /* Restore the original funtion settings */
5853 REG_WR(bp, reg, orig_func);
5854 new_val = REG_RD(bp, reg);
5855 if (new_val != orig_func) {
5856 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5857 orig_func, new_val);
5862 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5864 if (CHIP_IS_E1H(bp))
5865 bnx2x_undi_int_disable_e1h(bp, func);
5867 bnx2x_int_disable(bp);
5870 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5874 /* Check if there is any driver already loaded */
5875 val = REG_RD(bp, MISC_REG_UNPREPARED);
5877 /* Check if it is the UNDI driver
5878 * UNDI driver initializes CID offset for normal bell to 0x7
5880 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5881 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5883 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5885 int func = BP_FUNC(bp);
5889 /* clear the UNDI indication */
5890 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5892 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5894 /* try unload UNDI on port 0 */
5897 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5898 DRV_MSG_SEQ_NUMBER_MASK);
5899 reset_code = bnx2x_fw_command(bp, reset_code);
5901 /* if UNDI is loaded on the other port */
5902 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5904 /* send "DONE" for previous unload */
5905 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5907 /* unload UNDI on port 1 */
5910 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5911 DRV_MSG_SEQ_NUMBER_MASK);
5912 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5914 bnx2x_fw_command(bp, reset_code);
5917 /* now it's safe to release the lock */
5918 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5920 bnx2x_undi_int_disable(bp, func);
5922 /* close input traffic and wait for it */
5923 /* Do not rcv packets to BRB */
5925 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5926 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5927 /* Do not direct rcv packets that are not for MCP to
5930 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5931 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5934 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5935 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5938 /* save NIG port swap info */
5939 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5940 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5943 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5946 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5948 /* take the NIG out of reset and restore swap values */
5950 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5951 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5952 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5953 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5955 /* send unload done to the MCP */
5956 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5958 /* restore our func and fw_seq */
5961 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5962 DRV_MSG_SEQ_NUMBER_MASK);
5965 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5969 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5971 u32 val, val2, val3, val4, id;
5974 /* Get the chip revision id and number. */
5975 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5976 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5977 id = ((val & 0xffff) << 16);
5978 val = REG_RD(bp, MISC_REG_CHIP_REV);
5979 id |= ((val & 0xf) << 12);
5980 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5981 id |= ((val & 0xff) << 4);
5982 val = REG_RD(bp, MISC_REG_BOND_ID);
5984 bp->common.chip_id = id;
5985 bp->link_params.chip_id = bp->common.chip_id;
5986 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5988 val = (REG_RD(bp, 0x2874) & 0x55);
5989 if ((bp->common.chip_id & 0x1) ||
5990 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5991 bp->flags |= ONE_PORT_FLAG;
5992 BNX2X_DEV_INFO("single port device\n");
5995 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5996 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5997 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5998 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5999 bp->common.flash_size, bp->common.flash_size);
6001 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6002 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
6003 bp->link_params.shmem_base = bp->common.shmem_base;
6004 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6005 bp->common.shmem_base, bp->common.shmem2_base);
6007 if (!bp->common.shmem_base ||
6008 (bp->common.shmem_base < 0xA0000) ||
6009 (bp->common.shmem_base >= 0xC0000)) {
6010 BNX2X_DEV_INFO("MCP not active\n");
6011 bp->flags |= NO_MCP_FLAG;
6015 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6016 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6017 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6018 BNX2X_ERROR("BAD MCP validity signature\n");
6020 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6021 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6023 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6024 SHARED_HW_CFG_LED_MODE_MASK) >>
6025 SHARED_HW_CFG_LED_MODE_SHIFT);
6027 bp->link_params.feature_config_flags = 0;
6028 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6029 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6030 bp->link_params.feature_config_flags |=
6031 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6033 bp->link_params.feature_config_flags &=
6034 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6036 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6037 bp->common.bc_ver = val;
6038 BNX2X_DEV_INFO("bc_ver %X\n", val);
6039 if (val < BNX2X_BC_VER) {
6040 /* for now only warn
6041 * later we might need to enforce this */
6042 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6043 "please upgrade BC\n", BNX2X_BC_VER, val);
6045 bp->link_params.feature_config_flags |=
6046 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6047 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6049 if (BP_E1HVN(bp) == 0) {
6050 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6051 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6053 /* no WOL capability for E1HVN != 0 */
6054 bp->flags |= NO_WOL_FLAG;
6056 BNX2X_DEV_INFO("%sWoL capable\n",
6057 (bp->flags & NO_WOL_FLAG) ? "not " : "");
6059 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6060 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6061 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6062 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6064 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6065 val, val2, val3, val4);
6068 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6071 int port = BP_PORT(bp);
6072 bp->port.supported = 0;
6073 switch (bp->link_params.num_phys) {
6075 bp->port.supported = bp->link_params.phy[INT_PHY].supported;
6078 bp->port.supported = bp->link_params.phy[EXT_PHY1].supported;
6082 if (!(bp->port.supported)) {
6083 BNX2X_ERR("NVRAM config error. BAD phy config."
6084 "PHY1 config 0x%x\n",
6086 dev_info.port_hw_config[port].external_phy_config));
6090 switch (switch_cfg) {
6092 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6094 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6097 case SWITCH_CFG_10G:
6098 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6100 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6105 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6106 bp->port.link_config);
6109 /* mask what we support according to speed_cap_mask */
6110 if (!(bp->link_params.speed_cap_mask &
6111 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6112 bp->port.supported &= ~SUPPORTED_10baseT_Half;
6114 if (!(bp->link_params.speed_cap_mask &
6115 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6116 bp->port.supported &= ~SUPPORTED_10baseT_Full;
6118 if (!(bp->link_params.speed_cap_mask &
6119 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6120 bp->port.supported &= ~SUPPORTED_100baseT_Half;
6122 if (!(bp->link_params.speed_cap_mask &
6123 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6124 bp->port.supported &= ~SUPPORTED_100baseT_Full;
6126 if (!(bp->link_params.speed_cap_mask &
6127 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6128 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6129 SUPPORTED_1000baseT_Full);
6131 if (!(bp->link_params.speed_cap_mask &
6132 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6133 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
6135 if (!(bp->link_params.speed_cap_mask &
6136 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6137 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
6139 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
6142 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6144 bp->link_params.req_duplex = DUPLEX_FULL;
6146 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6147 case PORT_FEATURE_LINK_SPEED_AUTO:
6148 if (bp->port.supported & SUPPORTED_Autoneg) {
6149 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6150 bp->port.advertising = bp->port.supported;
6152 /* force 10G, no AN */
6153 bp->link_params.req_line_speed = SPEED_10000;
6154 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6159 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6160 if (bp->port.supported & SUPPORTED_10baseT_Full) {
6161 bp->link_params.req_line_speed = SPEED_10;
6162 bp->port.advertising = (ADVERTISED_10baseT_Full |
6165 BNX2X_ERROR("NVRAM config error. "
6166 "Invalid link_config 0x%x"
6167 " speed_cap_mask 0x%x\n",
6168 bp->port.link_config,
6169 bp->link_params.speed_cap_mask);
6174 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6175 if (bp->port.supported & SUPPORTED_10baseT_Half) {
6176 bp->link_params.req_line_speed = SPEED_10;
6177 bp->link_params.req_duplex = DUPLEX_HALF;
6178 bp->port.advertising = (ADVERTISED_10baseT_Half |
6181 BNX2X_ERROR("NVRAM config error. "
6182 "Invalid link_config 0x%x"
6183 " speed_cap_mask 0x%x\n",
6184 bp->port.link_config,
6185 bp->link_params.speed_cap_mask);
6190 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6191 if (bp->port.supported & SUPPORTED_100baseT_Full) {
6192 bp->link_params.req_line_speed = SPEED_100;
6193 bp->port.advertising = (ADVERTISED_100baseT_Full |
6196 BNX2X_ERROR("NVRAM config error. "
6197 "Invalid link_config 0x%x"
6198 " speed_cap_mask 0x%x\n",
6199 bp->port.link_config,
6200 bp->link_params.speed_cap_mask);
6205 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6206 if (bp->port.supported & SUPPORTED_100baseT_Half) {
6207 bp->link_params.req_line_speed = SPEED_100;
6208 bp->link_params.req_duplex = DUPLEX_HALF;
6209 bp->port.advertising = (ADVERTISED_100baseT_Half |
6212 BNX2X_ERROR("NVRAM config error. "
6213 "Invalid link_config 0x%x"
6214 " speed_cap_mask 0x%x\n",
6215 bp->port.link_config,
6216 bp->link_params.speed_cap_mask);
6221 case PORT_FEATURE_LINK_SPEED_1G:
6222 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
6223 bp->link_params.req_line_speed = SPEED_1000;
6224 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6227 BNX2X_ERROR("NVRAM config error. "
6228 "Invalid link_config 0x%x"
6229 " speed_cap_mask 0x%x\n",
6230 bp->port.link_config,
6231 bp->link_params.speed_cap_mask);
6236 case PORT_FEATURE_LINK_SPEED_2_5G:
6237 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
6238 bp->link_params.req_line_speed = SPEED_2500;
6239 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6242 BNX2X_ERROR("NVRAM config error. "
6243 "Invalid link_config 0x%x"
6244 " speed_cap_mask 0x%x\n",
6245 bp->port.link_config,
6246 bp->link_params.speed_cap_mask);
6251 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6252 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6253 case PORT_FEATURE_LINK_SPEED_10G_KR:
6254 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
6255 bp->link_params.req_line_speed = SPEED_10000;
6256 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6259 BNX2X_ERROR("NVRAM config error. "
6260 "Invalid link_config 0x%x"
6261 " speed_cap_mask 0x%x\n",
6262 bp->port.link_config,
6263 bp->link_params.speed_cap_mask);
6269 BNX2X_ERROR("NVRAM config error. "
6270 "BAD link speed link_config 0x%x\n",
6271 bp->port.link_config);
6272 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6273 bp->port.advertising = bp->port.supported;
6277 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6278 PORT_FEATURE_FLOW_CONTROL_MASK);
6279 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
6280 !(bp->port.supported & SUPPORTED_Autoneg))
6281 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6283 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
6284 " advertising 0x%x\n",
6285 bp->link_params.req_line_speed,
6286 bp->link_params.req_duplex,
6287 bp->link_params.req_flow_ctrl, bp->port.advertising);
6290 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6292 mac_hi = cpu_to_be16(mac_hi);
6293 mac_lo = cpu_to_be32(mac_lo);
6294 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6295 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6298 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6300 int port = BP_PORT(bp);
6303 u32 ext_phy_type, ext_phy_config;;
6305 bp->link_params.bp = bp;
6306 bp->link_params.port = port;
6308 bp->link_params.lane_config =
6309 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6311 bp->link_params.speed_cap_mask =
6313 dev_info.port_hw_config[port].speed_capability_mask);
6315 bp->port.link_config =
6316 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6319 /* If the device is capable of WoL, set the default state according
6322 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6323 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6324 (config & PORT_FEATURE_WOL_ENABLED));
6326 BNX2X_DEV_INFO("lane_config 0x%08x"
6327 " speed_cap_mask 0x%08x link_config 0x%08x\n",
6328 bp->link_params.lane_config,
6329 bp->link_params.speed_cap_mask, bp->port.link_config);
6331 bp->link_params.switch_cfg |= (bp->port.link_config &
6332 PORT_FEATURE_CONNECTED_SWITCH_MASK);
6333 bnx2x_phy_probe(&bp->link_params);
6334 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6336 bnx2x_link_settings_requested(bp);
6339 * If connected directly, work with the internal PHY, otherwise, work
6340 * with the external PHY
6344 dev_info.port_hw_config[port].external_phy_config);
6345 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6346 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6347 bp->mdio.prtad = bp->port.phy_addr;
6349 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6350 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6352 XGXS_EXT_PHY_ADDR(ext_phy_config);
6354 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6355 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6356 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6357 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6358 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6361 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6362 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6363 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6367 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6369 int func = BP_FUNC(bp);
6373 bnx2x_get_common_hwinfo(bp);
6377 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6379 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6381 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6382 FUNC_MF_CFG_E1HOV_TAG_MASK);
6383 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6385 BNX2X_DEV_INFO("%s function mode\n",
6386 IS_E1HMF(bp) ? "multi" : "single");
6389 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6391 FUNC_MF_CFG_E1HOV_TAG_MASK);
6392 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6394 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6396 func, bp->e1hov, bp->e1hov);
6398 BNX2X_ERROR("No valid E1HOV for func %d,"
6399 " aborting\n", func);
6404 BNX2X_ERROR("VN %d in single function mode,"
6405 " aborting\n", BP_E1HVN(bp));
6411 if (!BP_NOMCP(bp)) {
6412 bnx2x_get_port_hwinfo(bp);
6414 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6415 DRV_MSG_SEQ_NUMBER_MASK);
6416 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6420 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6421 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6422 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6423 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6424 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6425 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6426 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6427 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6428 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6429 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6430 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6432 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6440 /* only supposed to happen on emulation/FPGA */
6441 BNX2X_ERROR("warning: random MAC workaround active\n");
6442 random_ether_addr(bp->dev->dev_addr);
6443 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6449 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6451 int cnt, i, block_end, rodi;
6452 char vpd_data[BNX2X_VPD_LEN+1];
6453 char str_id_reg[VENDOR_ID_LEN+1];
6454 char str_id_cap[VENDOR_ID_LEN+1];
6457 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6458 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6460 if (cnt < BNX2X_VPD_LEN)
6463 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6464 PCI_VPD_LRDT_RO_DATA);
6469 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6470 pci_vpd_lrdt_size(&vpd_data[i]);
6472 i += PCI_VPD_LRDT_TAG_SIZE;
6474 if (block_end > BNX2X_VPD_LEN)
6477 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6478 PCI_VPD_RO_KEYWORD_MFR_ID);
6482 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6484 if (len != VENDOR_ID_LEN)
6487 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6489 /* vendor specific info */
6490 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6491 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6492 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6493 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6495 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6496 PCI_VPD_RO_KEYWORD_VENDOR0);
6498 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6500 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6502 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6503 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6504 bp->fw_ver[len] = ' ';
6513 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6515 int func = BP_FUNC(bp);
6519 /* Disable interrupt handling until HW is initialized */
6520 atomic_set(&bp->intr_sem, 1);
6521 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6523 mutex_init(&bp->port.phy_mutex);
6524 mutex_init(&bp->fw_mb_mutex);
6525 spin_lock_init(&bp->stats_lock);
6527 mutex_init(&bp->cnic_mutex);
6530 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6531 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6533 rc = bnx2x_get_hwinfo(bp);
6535 bnx2x_read_fwinfo(bp);
6536 /* need to reset chip if undi was active */
6538 bnx2x_undi_unload(bp);
6540 if (CHIP_REV_IS_FPGA(bp))
6541 dev_err(&bp->pdev->dev, "FPGA detected\n");
6543 if (BP_NOMCP(bp) && (func == 0))
6544 dev_err(&bp->pdev->dev, "MCP disabled, "
6545 "must load devices in order!\n");
6547 /* Set multi queue mode */
6548 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6549 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6550 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6551 "requested is not MSI-X\n");
6552 multi_mode = ETH_RSS_MODE_DISABLED;
6554 bp->multi_mode = multi_mode;
6555 bp->int_mode = int_mode;
6557 bp->dev->features |= NETIF_F_GRO;
6561 bp->flags &= ~TPA_ENABLE_FLAG;
6562 bp->dev->features &= ~NETIF_F_LRO;
6564 bp->flags |= TPA_ENABLE_FLAG;
6565 bp->dev->features |= NETIF_F_LRO;
6567 bp->disable_tpa = disable_tpa;
6570 bp->dropless_fc = 0;
6572 bp->dropless_fc = dropless_fc;
6576 bp->tx_ring_size = MAX_TX_AVAIL;
6577 bp->rx_ring_size = MAX_RX_AVAIL;
6581 /* make sure that the numbers are in the right granularity */
6582 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6583 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6585 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6586 bp->current_interval = (poll ? poll : timer_interval);
6588 init_timer(&bp->timer);
6589 bp->timer.expires = jiffies + bp->current_interval;
6590 bp->timer.data = (unsigned long) bp;
6591 bp->timer.function = bnx2x_timer;
6597 /****************************************************************************
6598 * General service functions
6599 ****************************************************************************/
6601 /* called with rtnl_lock */
6602 static int bnx2x_open(struct net_device *dev)
6604 struct bnx2x *bp = netdev_priv(dev);
6606 netif_carrier_off(dev);
6608 bnx2x_set_power_state(bp, PCI_D0);
6610 if (!bnx2x_reset_is_done(bp)) {
6612 /* Reset MCP mail box sequence if there is on going
6617 /* If it's the first function to load and reset done
6618 * is still not cleared it may mean that. We don't
6619 * check the attention state here because it may have
6620 * already been cleared by a "common" reset but we
6621 * shell proceed with "process kill" anyway.
6623 if ((bnx2x_get_load_cnt(bp) == 0) &&
6624 bnx2x_trylock_hw_lock(bp,
6625 HW_LOCK_RESOURCE_RESERVED_08) &&
6626 (!bnx2x_leader_reset(bp))) {
6627 DP(NETIF_MSG_HW, "Recovered in open\n");
6631 bnx2x_set_power_state(bp, PCI_D3hot);
6633 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6634 " completed yet. Try again later. If u still see this"
6635 " message after a few retries then power cycle is"
6636 " required.\n", bp->dev->name);
6642 bp->recovery_state = BNX2X_RECOVERY_DONE;
6644 return bnx2x_nic_load(bp, LOAD_OPEN);
6647 /* called with rtnl_lock */
6648 static int bnx2x_close(struct net_device *dev)
6650 struct bnx2x *bp = netdev_priv(dev);
6652 /* Unload the driver, release IRQs */
6653 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6654 bnx2x_set_power_state(bp, PCI_D3hot);
6659 /* called with netif_tx_lock from dev_mcast.c */
6660 void bnx2x_set_rx_mode(struct net_device *dev)
6662 struct bnx2x *bp = netdev_priv(dev);
6663 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6664 int port = BP_PORT(bp);
6666 if (bp->state != BNX2X_STATE_OPEN) {
6667 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6671 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6673 if (dev->flags & IFF_PROMISC)
6674 rx_mode = BNX2X_RX_MODE_PROMISC;
6676 else if ((dev->flags & IFF_ALLMULTI) ||
6677 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6679 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6681 else { /* some multicasts */
6682 if (CHIP_IS_E1(bp)) {
6684 struct netdev_hw_addr *ha;
6685 struct mac_configuration_cmd *config =
6686 bnx2x_sp(bp, mcast_config);
6689 netdev_for_each_mc_addr(ha, dev) {
6690 config->config_table[i].
6691 cam_entry.msb_mac_addr =
6692 swab16(*(u16 *)&ha->addr[0]);
6693 config->config_table[i].
6694 cam_entry.middle_mac_addr =
6695 swab16(*(u16 *)&ha->addr[2]);
6696 config->config_table[i].
6697 cam_entry.lsb_mac_addr =
6698 swab16(*(u16 *)&ha->addr[4]);
6699 config->config_table[i].cam_entry.flags =
6701 config->config_table[i].
6702 target_table_entry.flags = 0;
6703 config->config_table[i].target_table_entry.
6704 clients_bit_vector =
6705 cpu_to_le32(1 << BP_L_ID(bp));
6706 config->config_table[i].
6707 target_table_entry.vlan_id = 0;
6710 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6711 config->config_table[i].
6712 cam_entry.msb_mac_addr,
6713 config->config_table[i].
6714 cam_entry.middle_mac_addr,
6715 config->config_table[i].
6716 cam_entry.lsb_mac_addr);
6719 old = config->hdr.length;
6721 for (; i < old; i++) {
6722 if (CAM_IS_INVALID(config->
6724 /* already invalidated */
6728 CAM_INVALIDATE(config->
6733 if (CHIP_REV_IS_SLOW(bp))
6734 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6736 offset = BNX2X_MAX_MULTICAST*(1 + port);
6738 config->hdr.length = i;
6739 config->hdr.offset = offset;
6740 config->hdr.client_id = bp->fp->cl_id;
6741 config->hdr.reserved1 = 0;
6743 bp->set_mac_pending++;
6746 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6747 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6748 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6751 /* Accept one or more multicasts */
6752 struct netdev_hw_addr *ha;
6753 u32 mc_filter[MC_HASH_SIZE];
6754 u32 crc, bit, regidx;
6757 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6759 netdev_for_each_mc_addr(ha, dev) {
6760 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6763 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6764 bit = (crc >> 24) & 0xff;
6767 mc_filter[regidx] |= (1 << bit);
6770 for (i = 0; i < MC_HASH_SIZE; i++)
6771 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6776 bp->rx_mode = rx_mode;
6777 bnx2x_set_storm_rx_mode(bp);
6781 /* called with rtnl_lock */
6782 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6783 int devad, u16 addr)
6785 struct bnx2x *bp = netdev_priv(netdev);
6789 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6790 prtad, devad, addr);
6792 /* The HW expects different devad if CL22 is used */
6793 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6795 bnx2x_acquire_phy_lock(bp);
6796 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
6797 bnx2x_release_phy_lock(bp);
6798 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
6805 /* called with rtnl_lock */
6806 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6807 u16 addr, u16 value)
6809 struct bnx2x *bp = netdev_priv(netdev);
6812 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
6813 " value 0x%x\n", prtad, devad, addr, value);
6815 /* The HW expects different devad if CL22 is used */
6816 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6818 bnx2x_acquire_phy_lock(bp);
6819 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
6820 bnx2x_release_phy_lock(bp);
6824 /* called with rtnl_lock */
6825 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6827 struct bnx2x *bp = netdev_priv(dev);
6828 struct mii_ioctl_data *mdio = if_mii(ifr);
6830 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
6831 mdio->phy_id, mdio->reg_num, mdio->val_in);
6833 if (!netif_running(dev))
6836 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
6839 #ifdef CONFIG_NET_POLL_CONTROLLER
6840 static void poll_bnx2x(struct net_device *dev)
6842 struct bnx2x *bp = netdev_priv(dev);
6844 disable_irq(bp->pdev->irq);
6845 bnx2x_interrupt(bp->pdev->irq, dev);
6846 enable_irq(bp->pdev->irq);
6850 static const struct net_device_ops bnx2x_netdev_ops = {
6851 .ndo_open = bnx2x_open,
6852 .ndo_stop = bnx2x_close,
6853 .ndo_start_xmit = bnx2x_start_xmit,
6854 .ndo_set_multicast_list = bnx2x_set_rx_mode,
6855 .ndo_set_mac_address = bnx2x_change_mac_addr,
6856 .ndo_validate_addr = eth_validate_addr,
6857 .ndo_do_ioctl = bnx2x_ioctl,
6858 .ndo_change_mtu = bnx2x_change_mtu,
6859 .ndo_tx_timeout = bnx2x_tx_timeout,
6861 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
6863 #ifdef CONFIG_NET_POLL_CONTROLLER
6864 .ndo_poll_controller = poll_bnx2x,
6868 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
6869 struct net_device *dev)
6874 SET_NETDEV_DEV(dev, &pdev->dev);
6875 bp = netdev_priv(dev);
6880 bp->func = PCI_FUNC(pdev->devfn);
6882 rc = pci_enable_device(pdev);
6884 dev_err(&bp->pdev->dev,
6885 "Cannot enable PCI device, aborting\n");
6889 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6890 dev_err(&bp->pdev->dev,
6891 "Cannot find PCI device base address, aborting\n");
6893 goto err_out_disable;
6896 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
6897 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
6898 " base address, aborting\n");
6900 goto err_out_disable;
6903 if (atomic_read(&pdev->enable_cnt) == 1) {
6904 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6906 dev_err(&bp->pdev->dev,
6907 "Cannot obtain PCI resources, aborting\n");
6908 goto err_out_disable;
6911 pci_set_master(pdev);
6912 pci_save_state(pdev);
6915 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6916 if (bp->pm_cap == 0) {
6917 dev_err(&bp->pdev->dev,
6918 "Cannot find power management capability, aborting\n");
6920 goto err_out_release;
6923 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
6924 if (bp->pcie_cap == 0) {
6925 dev_err(&bp->pdev->dev,
6926 "Cannot find PCI Express capability, aborting\n");
6928 goto err_out_release;
6931 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
6932 bp->flags |= USING_DAC_FLAG;
6933 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
6934 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
6935 " failed, aborting\n");
6937 goto err_out_release;
6940 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6941 dev_err(&bp->pdev->dev,
6942 "System does not support DMA, aborting\n");
6944 goto err_out_release;
6947 dev->mem_start = pci_resource_start(pdev, 0);
6948 dev->base_addr = dev->mem_start;
6949 dev->mem_end = pci_resource_end(pdev, 0);
6951 dev->irq = pdev->irq;
6953 bp->regview = pci_ioremap_bar(pdev, 0);
6955 dev_err(&bp->pdev->dev,
6956 "Cannot map register space, aborting\n");
6958 goto err_out_release;
6961 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
6962 min_t(u64, BNX2X_DB_SIZE,
6963 pci_resource_len(pdev, 2)));
6964 if (!bp->doorbells) {
6965 dev_err(&bp->pdev->dev,
6966 "Cannot map doorbell space, aborting\n");
6971 bnx2x_set_power_state(bp, PCI_D0);
6973 /* clean indirect addresses */
6974 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
6975 PCICFG_VENDOR_ID_OFFSET);
6976 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
6977 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
6978 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
6979 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
6981 /* Reset the load counter */
6982 bnx2x_clear_load_cnt(bp);
6984 dev->watchdog_timeo = TX_TIMEOUT;
6986 dev->netdev_ops = &bnx2x_netdev_ops;
6987 bnx2x_set_ethtool_ops(dev);
6988 dev->features |= NETIF_F_SG;
6989 dev->features |= NETIF_F_HW_CSUM;
6990 if (bp->flags & USING_DAC_FLAG)
6991 dev->features |= NETIF_F_HIGHDMA;
6992 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
6993 dev->features |= NETIF_F_TSO6;
6995 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
6996 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
6998 dev->vlan_features |= NETIF_F_SG;
6999 dev->vlan_features |= NETIF_F_HW_CSUM;
7000 if (bp->flags & USING_DAC_FLAG)
7001 dev->vlan_features |= NETIF_F_HIGHDMA;
7002 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7003 dev->vlan_features |= NETIF_F_TSO6;
7006 /* get_port_hwinfo() will set prtad and mmds properly */
7007 bp->mdio.prtad = MDIO_PRTAD_NONE;
7009 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7011 bp->mdio.mdio_read = bnx2x_mdio_read;
7012 bp->mdio.mdio_write = bnx2x_mdio_write;
7018 iounmap(bp->regview);
7021 if (bp->doorbells) {
7022 iounmap(bp->doorbells);
7023 bp->doorbells = NULL;
7027 if (atomic_read(&pdev->enable_cnt) == 1)
7028 pci_release_regions(pdev);
7031 pci_disable_device(pdev);
7032 pci_set_drvdata(pdev, NULL);
7038 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7039 int *width, int *speed)
7041 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7043 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7045 /* return value of 1=2.5GHz 2=5GHz */
7046 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7049 static int bnx2x_check_firmware(struct bnx2x *bp)
7051 const struct firmware *firmware = bp->firmware;
7052 struct bnx2x_fw_file_hdr *fw_hdr;
7053 struct bnx2x_fw_file_section *sections;
7054 u32 offset, len, num_ops;
7059 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7062 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7063 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7065 /* Make sure none of the offsets and sizes make us read beyond
7066 * the end of the firmware data */
7067 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7068 offset = be32_to_cpu(sections[i].offset);
7069 len = be32_to_cpu(sections[i].len);
7070 if (offset + len > firmware->size) {
7071 dev_err(&bp->pdev->dev,
7072 "Section %d length is out of bounds\n", i);
7077 /* Likewise for the init_ops offsets */
7078 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7079 ops_offsets = (u16 *)(firmware->data + offset);
7080 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7082 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7083 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7084 dev_err(&bp->pdev->dev,
7085 "Section offset %d is out of bounds\n", i);
7090 /* Check FW version */
7091 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7092 fw_ver = firmware->data + offset;
7093 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7094 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7095 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7096 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7097 dev_err(&bp->pdev->dev,
7098 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7099 fw_ver[0], fw_ver[1], fw_ver[2],
7100 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7101 BCM_5710_FW_MINOR_VERSION,
7102 BCM_5710_FW_REVISION_VERSION,
7103 BCM_5710_FW_ENGINEERING_VERSION);
7110 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7112 const __be32 *source = (const __be32 *)_source;
7113 u32 *target = (u32 *)_target;
7116 for (i = 0; i < n/4; i++)
7117 target[i] = be32_to_cpu(source[i]);
7121 Ops array is stored in the following format:
7122 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7124 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7126 const __be32 *source = (const __be32 *)_source;
7127 struct raw_op *target = (struct raw_op *)_target;
7130 for (i = 0, j = 0; i < n/8; i++, j += 2) {
7131 tmp = be32_to_cpu(source[j]);
7132 target[i].op = (tmp >> 24) & 0xff;
7133 target[i].offset = tmp & 0xffffff;
7134 target[i].raw_data = be32_to_cpu(source[j + 1]);
7138 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7140 const __be16 *source = (const __be16 *)_source;
7141 u16 *target = (u16 *)_target;
7144 for (i = 0; i < n/2; i++)
7145 target[i] = be16_to_cpu(source[i]);
7148 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7150 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7151 bp->arr = kmalloc(len, GFP_KERNEL); \
7153 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7156 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7157 (u8 *)bp->arr, len); \
7160 int bnx2x_init_firmware(struct bnx2x *bp)
7162 const char *fw_file_name;
7163 struct bnx2x_fw_file_hdr *fw_hdr;
7167 fw_file_name = FW_FILE_NAME_E1;
7168 else if (CHIP_IS_E1H(bp))
7169 fw_file_name = FW_FILE_NAME_E1H;
7171 BNX2X_ERR("Unsupported chip revision\n");
7175 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7177 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7179 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7180 goto request_firmware_exit;
7183 rc = bnx2x_check_firmware(bp);
7185 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7186 goto request_firmware_exit;
7189 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7191 /* Initialize the pointers to the init arrays */
7193 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7196 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7199 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7202 /* STORMs firmware */
7203 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7204 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7205 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7206 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7207 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7208 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7209 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7210 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7211 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7212 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7213 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7214 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7215 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7216 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7217 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7218 be32_to_cpu(fw_hdr->csem_pram_data.offset);
7222 init_offsets_alloc_err:
7223 kfree(bp->init_ops);
7225 kfree(bp->init_data);
7226 request_firmware_exit:
7227 release_firmware(bp->firmware);
7233 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7234 const struct pci_device_id *ent)
7236 struct net_device *dev = NULL;
7238 int pcie_width, pcie_speed;
7241 /* dev zeroed in init_etherdev */
7242 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7244 dev_err(&pdev->dev, "Cannot allocate net device\n");
7248 bp = netdev_priv(dev);
7249 bp->msg_enable = debug;
7251 pci_set_drvdata(pdev, dev);
7253 rc = bnx2x_init_dev(pdev, dev);
7259 rc = bnx2x_init_bp(bp);
7263 rc = register_netdev(dev);
7265 dev_err(&pdev->dev, "Cannot register net device\n");
7269 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7270 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7271 " IRQ %d, ", board_info[ent->driver_data].name,
7272 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7273 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7274 dev->base_addr, bp->pdev->irq);
7275 pr_cont("node addr %pM\n", dev->dev_addr);
7281 iounmap(bp->regview);
7284 iounmap(bp->doorbells);
7288 if (atomic_read(&pdev->enable_cnt) == 1)
7289 pci_release_regions(pdev);
7291 pci_disable_device(pdev);
7292 pci_set_drvdata(pdev, NULL);
7297 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7299 struct net_device *dev = pci_get_drvdata(pdev);
7303 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7306 bp = netdev_priv(dev);
7308 unregister_netdev(dev);
7310 /* Make sure RESET task is not scheduled before continuing */
7311 cancel_delayed_work_sync(&bp->reset_task);
7314 iounmap(bp->regview);
7317 iounmap(bp->doorbells);
7321 if (atomic_read(&pdev->enable_cnt) == 1)
7322 pci_release_regions(pdev);
7324 pci_disable_device(pdev);
7325 pci_set_drvdata(pdev, NULL);
7328 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7332 bp->state = BNX2X_STATE_ERROR;
7334 bp->rx_mode = BNX2X_RX_MODE_NONE;
7336 bnx2x_netif_stop(bp, 0);
7337 netif_carrier_off(bp->dev);
7339 del_timer_sync(&bp->timer);
7340 bp->stats_state = STATS_STATE_DISABLED;
7341 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7344 bnx2x_free_irq(bp, false);
7346 if (CHIP_IS_E1(bp)) {
7347 struct mac_configuration_cmd *config =
7348 bnx2x_sp(bp, mcast_config);
7350 for (i = 0; i < config->hdr.length; i++)
7351 CAM_INVALIDATE(config->config_table[i]);
7354 /* Free SKBs, SGEs, TPA pool and driver internals */
7355 bnx2x_free_skbs(bp);
7356 for_each_queue(bp, i)
7357 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7358 for_each_queue(bp, i)
7359 netif_napi_del(&bnx2x_fp(bp, i, napi));
7362 bp->state = BNX2X_STATE_CLOSED;
7367 static void bnx2x_eeh_recover(struct bnx2x *bp)
7371 mutex_init(&bp->port.phy_mutex);
7373 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7374 bp->link_params.shmem_base = bp->common.shmem_base;
7375 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7377 if (!bp->common.shmem_base ||
7378 (bp->common.shmem_base < 0xA0000) ||
7379 (bp->common.shmem_base >= 0xC0000)) {
7380 BNX2X_DEV_INFO("MCP not active\n");
7381 bp->flags |= NO_MCP_FLAG;
7385 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7386 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7387 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7388 BNX2X_ERR("BAD MCP validity signature\n");
7390 if (!BP_NOMCP(bp)) {
7391 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7392 & DRV_MSG_SEQ_NUMBER_MASK);
7393 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7398 * bnx2x_io_error_detected - called when PCI error is detected
7399 * @pdev: Pointer to PCI device
7400 * @state: The current pci connection state
7402 * This function is called after a PCI bus error affecting
7403 * this device has been detected.
7405 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7406 pci_channel_state_t state)
7408 struct net_device *dev = pci_get_drvdata(pdev);
7409 struct bnx2x *bp = netdev_priv(dev);
7413 netif_device_detach(dev);
7415 if (state == pci_channel_io_perm_failure) {
7417 return PCI_ERS_RESULT_DISCONNECT;
7420 if (netif_running(dev))
7421 bnx2x_eeh_nic_unload(bp);
7423 pci_disable_device(pdev);
7427 /* Request a slot reset */
7428 return PCI_ERS_RESULT_NEED_RESET;
7432 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7433 * @pdev: Pointer to PCI device
7435 * Restart the card from scratch, as if from a cold-boot.
7437 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7439 struct net_device *dev = pci_get_drvdata(pdev);
7440 struct bnx2x *bp = netdev_priv(dev);
7444 if (pci_enable_device(pdev)) {
7446 "Cannot re-enable PCI device after reset\n");
7448 return PCI_ERS_RESULT_DISCONNECT;
7451 pci_set_master(pdev);
7452 pci_restore_state(pdev);
7454 if (netif_running(dev))
7455 bnx2x_set_power_state(bp, PCI_D0);
7459 return PCI_ERS_RESULT_RECOVERED;
7463 * bnx2x_io_resume - called when traffic can start flowing again
7464 * @pdev: Pointer to PCI device
7466 * This callback is called when the error recovery driver tells us that
7467 * its OK to resume normal operation.
7469 static void bnx2x_io_resume(struct pci_dev *pdev)
7471 struct net_device *dev = pci_get_drvdata(pdev);
7472 struct bnx2x *bp = netdev_priv(dev);
7474 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7475 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7481 bnx2x_eeh_recover(bp);
7483 if (netif_running(dev))
7484 bnx2x_nic_load(bp, LOAD_NORMAL);
7486 netif_device_attach(dev);
7491 static struct pci_error_handlers bnx2x_err_handler = {
7492 .error_detected = bnx2x_io_error_detected,
7493 .slot_reset = bnx2x_io_slot_reset,
7494 .resume = bnx2x_io_resume,
7497 static struct pci_driver bnx2x_pci_driver = {
7498 .name = DRV_MODULE_NAME,
7499 .id_table = bnx2x_pci_tbl,
7500 .probe = bnx2x_init_one,
7501 .remove = __devexit_p(bnx2x_remove_one),
7502 .suspend = bnx2x_suspend,
7503 .resume = bnx2x_resume,
7504 .err_handler = &bnx2x_err_handler,
7507 static int __init bnx2x_init(void)
7511 pr_info("%s", version);
7513 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7514 if (bnx2x_wq == NULL) {
7515 pr_err("Cannot create workqueue\n");
7519 ret = pci_register_driver(&bnx2x_pci_driver);
7521 pr_err("Cannot register driver\n");
7522 destroy_workqueue(bnx2x_wq);
7527 static void __exit bnx2x_cleanup(void)
7529 pci_unregister_driver(&bnx2x_pci_driver);
7531 destroy_workqueue(bnx2x_wq);
7534 module_init(bnx2x_init);
7535 module_exit(bnx2x_cleanup);
7539 /* count denotes the number of new completions we have seen */
7540 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7542 struct eth_spe *spe;
7544 #ifdef BNX2X_STOP_ON_ERROR
7545 if (unlikely(bp->panic))
7549 spin_lock_bh(&bp->spq_lock);
7550 bp->cnic_spq_pending -= count;
7552 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7553 bp->cnic_spq_pending++) {
7555 if (!bp->cnic_kwq_pending)
7558 spe = bnx2x_sp_get_next(bp);
7559 *spe = *bp->cnic_kwq_cons;
7561 bp->cnic_kwq_pending--;
7563 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7564 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7566 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7567 bp->cnic_kwq_cons = bp->cnic_kwq;
7569 bp->cnic_kwq_cons++;
7571 bnx2x_sp_prod_update(bp);
7572 spin_unlock_bh(&bp->spq_lock);
7575 static int bnx2x_cnic_sp_queue(struct net_device *dev,
7576 struct kwqe_16 *kwqes[], u32 count)
7578 struct bnx2x *bp = netdev_priv(dev);
7581 #ifdef BNX2X_STOP_ON_ERROR
7582 if (unlikely(bp->panic))
7586 spin_lock_bh(&bp->spq_lock);
7588 for (i = 0; i < count; i++) {
7589 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7591 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7594 *bp->cnic_kwq_prod = *spe;
7596 bp->cnic_kwq_pending++;
7598 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7599 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7600 spe->data.mac_config_addr.hi,
7601 spe->data.mac_config_addr.lo,
7602 bp->cnic_kwq_pending);
7604 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7605 bp->cnic_kwq_prod = bp->cnic_kwq;
7607 bp->cnic_kwq_prod++;
7610 spin_unlock_bh(&bp->spq_lock);
7612 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7613 bnx2x_cnic_sp_post(bp, 0);
7618 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7620 struct cnic_ops *c_ops;
7623 mutex_lock(&bp->cnic_mutex);
7624 c_ops = bp->cnic_ops;
7626 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7627 mutex_unlock(&bp->cnic_mutex);
7632 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7634 struct cnic_ops *c_ops;
7638 c_ops = rcu_dereference(bp->cnic_ops);
7640 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7647 * for commands that have no data
7649 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7651 struct cnic_ctl_info ctl = {0};
7655 return bnx2x_cnic_ctl_send(bp, &ctl);
7658 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7660 struct cnic_ctl_info ctl;
7662 /* first we tell CNIC and only then we count this as a completion */
7663 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7664 ctl.data.comp.cid = cid;
7666 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7667 bnx2x_cnic_sp_post(bp, 1);
7670 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7672 struct bnx2x *bp = netdev_priv(dev);
7676 case DRV_CTL_CTXTBL_WR_CMD: {
7677 u32 index = ctl->data.io.offset;
7678 dma_addr_t addr = ctl->data.io.dma_addr;
7680 bnx2x_ilt_wr(bp, index, addr);
7684 case DRV_CTL_COMPLETION_CMD: {
7685 int count = ctl->data.comp.comp_count;
7687 bnx2x_cnic_sp_post(bp, count);
7691 /* rtnl_lock is held. */
7692 case DRV_CTL_START_L2_CMD: {
7693 u32 cli = ctl->data.ring.client_id;
7695 bp->rx_mode_cl_mask |= (1 << cli);
7696 bnx2x_set_storm_rx_mode(bp);
7700 /* rtnl_lock is held. */
7701 case DRV_CTL_STOP_L2_CMD: {
7702 u32 cli = ctl->data.ring.client_id;
7704 bp->rx_mode_cl_mask &= ~(1 << cli);
7705 bnx2x_set_storm_rx_mode(bp);
7710 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7717 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7719 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7721 if (bp->flags & USING_MSIX_FLAG) {
7722 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7723 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7724 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7726 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7727 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7729 cp->irq_arr[0].status_blk = bp->cnic_sb;
7730 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7731 cp->irq_arr[1].status_blk = bp->def_status_blk;
7732 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7737 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7740 struct bnx2x *bp = netdev_priv(dev);
7741 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7746 if (atomic_read(&bp->intr_sem) != 0)
7749 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7753 bp->cnic_kwq_cons = bp->cnic_kwq;
7754 bp->cnic_kwq_prod = bp->cnic_kwq;
7755 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7757 bp->cnic_spq_pending = 0;
7758 bp->cnic_kwq_pending = 0;
7760 bp->cnic_data = data;
7763 cp->drv_state = CNIC_DRV_STATE_REGD;
7765 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7767 bnx2x_setup_cnic_irq_info(bp);
7768 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7769 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7770 rcu_assign_pointer(bp->cnic_ops, ops);
7775 static int bnx2x_unregister_cnic(struct net_device *dev)
7777 struct bnx2x *bp = netdev_priv(dev);
7778 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7780 mutex_lock(&bp->cnic_mutex);
7781 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7782 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7783 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7786 rcu_assign_pointer(bp->cnic_ops, NULL);
7787 mutex_unlock(&bp->cnic_mutex);
7789 kfree(bp->cnic_kwq);
7790 bp->cnic_kwq = NULL;
7795 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7797 struct bnx2x *bp = netdev_priv(dev);
7798 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7800 cp->drv_owner = THIS_MODULE;
7801 cp->chip_id = CHIP_ID(bp);
7802 cp->pdev = bp->pdev;
7803 cp->io_base = bp->regview;
7804 cp->io_base2 = bp->doorbells;
7805 cp->max_kwqe_pending = 8;
7806 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
7807 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
7808 cp->ctx_tbl_len = CNIC_ILT_LINES;
7809 cp->starting_cid = BCM_CNIC_CID_START;
7810 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
7811 cp->drv_ctl = bnx2x_drv_ctl;
7812 cp->drv_register_cnic = bnx2x_register_cnic;
7813 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
7817 EXPORT_SYMBOL(bnx2x_cnic_probe);
7819 #endif /* BCM_CNIC */