1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
64 #define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT (5*HZ)
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86 static int multi_mode = 1;
87 module_param(multi_mode, int, 0);
88 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
91 static int num_queues;
92 module_param(num_queues, int, 0);
93 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 static int dropless_fc;
106 module_param(dropless_fc, int, 0);
107 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110 module_param(poll, int, 0);
111 MODULE_PARM_DESC(poll, " Use polling (for debug)");
113 static int mrrs = -1;
114 module_param(mrrs, int, 0);
115 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118 module_param(debug, int, 0);
119 MODULE_PARM_DESC(debug, " Default debug msglevel");
121 static struct workqueue_struct *bnx2x_wq;
123 enum bnx2x_board_type {
129 /* indexed by board_type, above */
132 } board_info[] __devinitdata = {
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
153 * locking is done by mcp
155 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
175 const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
198 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 struct dmae_command dmae;
202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
214 memset(&dmae, 0, sizeof(struct dmae_command));
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
222 DMAE_CMD_ENDIANITY_DW_SWAP |
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
246 mutex_lock(&bp->dmae_mutex);
250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258 BNX2X_ERR("DMAE timeout!\n");
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
269 mutex_unlock(&bp->dmae_mutex);
272 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
274 struct dmae_command dmae;
275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
289 memset(&dmae, 0, sizeof(struct dmae_command));
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
297 DMAE_CMD_ENDIANITY_DW_SWAP |
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
318 mutex_lock(&bp->dmae_mutex);
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
327 while (*wb_comp != DMAE_COMP_VAL) {
330 BNX2X_ERR("DMAE timeout!\n");
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
344 mutex_unlock(&bp->dmae_mutex);
347 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
353 while (len > dmae_wr_max) {
354 bnx2x_write_dmae(bp, phys_addr + offset,
355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
374 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
378 REG_RD_DMAE(bp, reg, wb_data, 2);
380 return HILO_U64(wb_data[0], wb_data[1]);
384 static int bnx2x_mc_assert(struct bnx2x *bp)
388 u32 row0, row1, row2, row3;
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
505 static void bnx2x_fw_dump(struct bnx2x *bp)
513 BNX2X_ERR("NO MCP - can not dump\n");
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520 pr_err("begin fw dump (mark 0x%x)\n", mark);
523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
524 for (word = 0; word < 8; word++)
525 data[word] = htonl(REG_RD(bp, offset + 4*word));
527 pr_cont("%s", (char *)data);
529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
530 for (word = 0; word < 8; word++)
531 data[word] = htonl(REG_RD(bp, offset + 4*word));
533 pr_cont("%s", (char *)data);
535 pr_err("end of fw dump\n");
538 void bnx2x_panic_dump(struct bnx2x *bp)
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
546 BNX2X_ERR("begin crash dump -----------------\n");
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
557 for_each_queue(bp, i) {
558 struct bnx2x_fastpath *fp = &bp->fp[i];
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
563 i, fp->rx_bd_prod, fp->rx_bd_cons,
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
574 for_each_queue(bp, i) {
575 struct bnx2x_fastpath *fp = &bp->fp[i];
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
584 fp->status_blk->c_status_block.status_block_index,
585 fp->tx_db.data.prod);
590 for_each_queue(bp, i) {
591 struct bnx2x_fastpath *fp = &bp->fp[i];
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
595 for (j = start; j != end; j = RX_BD(j + 1)) {
596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
605 for (j = start; j != end; j = RX_SGE(j + 1)) {
606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
624 for_each_queue(bp, i) {
625 struct bnx2x_fastpath *fp = &bp->fp[i];
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
648 BNX2X_ERR("end crash dump -----------------\n");
651 void bnx2x_int_enable(struct bnx2x *bp)
653 int port = BP_PORT(bp);
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
678 REG_WR(bp, addr, val);
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
686 REG_WR(bp, addr, val);
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
698 /* enable nig and gpio3 attention */
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
707 /* Make sure that interrupts are indeed enabled from here on */
711 static void bnx2x_int_disable(struct bnx2x *bp)
713 int port = BP_PORT(bp);
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
725 /* flush all outstanding writes */
728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
733 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
738 /* disable interrupt handling */
739 atomic_inc(&bp->intr_sem);
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
746 /* make sure all ISRs are done */
748 synchronize_irq(bp->msix_table[0].vector);
753 for_each_queue(bp, i)
754 synchronize_irq(bp->msix_table[i + offset].vector);
756 synchronize_irq(bp->pdev->irq);
758 /* make sure sp_task is not running */
759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
766 * General service functions
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
805 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
808 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
809 union eth_rx_cqe *rr_cqe)
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
817 fp->index, cid, command, bp->state,
818 rr_cqe->ramrod_cqe.ramrod_type);
823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
828 fp->state = BNX2X_FP_STATE_OPEN;
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
834 fp->state = BNX2X_FP_STATE_HALTED;
838 BNX2X_ERR("unexpected MC reply (%d) "
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
843 mb(); /* force bnx2x_wait_ramrod() to see the change */
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874 bp->set_mac_pending--;
878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880 bp->set_mac_pending--;
885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
889 mb(); /* force bnx2x_wait_ramrod() to see the change */
892 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
894 struct bnx2x *bp = netdev_priv(dev_instance);
895 u16 status = bnx2x_ack_int(bp);
899 /* Return here if interrupt is shared and it's not for us */
900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
906 /* Return here if interrupt is disabled */
907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
912 #ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
920 mask = 0x2 << fp->sb_id;
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
940 c_ops = rcu_dereference(bp->cnic_ops);
942 c_ops->cnic_handler(bp->cnic_data, NULL);
949 if (unlikely(status & 0x1)) {
950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
964 /* end of fast path */
970 * General service functions
973 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
976 u32 resource_bit = (1 << resource);
977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
996 /* Validating that the resource is not already taken */
997 lock_status = REG_RD(bp, hw_lock_control_reg);
998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
1006 /* Try to acquire the lock */
1007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
1009 if (lock_status & resource_bit)
1014 DP(NETIF_MSG_HW, "Timeout\n");
1018 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1021 u32 resource_bit = (1 << resource);
1022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
1025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1042 /* Validating that the resource is currently taken */
1043 lock_status = REG_RD(bp, hw_lock_control_reg);
1044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1050 REG_WR(bp, hw_lock_control_reg, resource_bit);
1055 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1085 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1138 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1184 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1186 u32 spio_mask = (1 << spio_num);
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1230 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1232 u32 sel_phy_idx = 0;
1233 if (bp->link_vars.link_up) {
1234 sel_phy_idx = EXT_PHY1;
1235 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1236 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1237 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1238 sel_phy_idx = EXT_PHY2;
1241 switch (bnx2x_phy_selection(&bp->link_params)) {
1242 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1243 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1244 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1245 sel_phy_idx = EXT_PHY1;
1247 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1248 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1249 sel_phy_idx = EXT_PHY2;
1254 * The selected actived PHY is always after swapping (in case PHY
1255 * swapping is enabled). So when swapping is enabled, we need to reverse
1259 if (bp->link_params.multi_phy_config &
1260 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1261 if (sel_phy_idx == EXT_PHY1)
1262 sel_phy_idx = EXT_PHY2;
1263 else if (sel_phy_idx == EXT_PHY2)
1264 sel_phy_idx = EXT_PHY1;
1266 return LINK_CONFIG_IDX(sel_phy_idx);
1269 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1271 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1272 switch (bp->link_vars.ieee_fc &
1273 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1274 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1275 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1279 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1280 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1284 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1285 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1289 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1296 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1298 if (!BP_NOMCP(bp)) {
1300 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1301 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1302 /* Initialize link parameters structure variables */
1303 /* It is recommended to turn off RX FC for jumbo frames
1304 for better performance */
1305 if (bp->dev->mtu > 5000)
1306 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1308 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1310 bnx2x_acquire_phy_lock(bp);
1312 if (load_mode == LOAD_DIAG) {
1313 bp->link_params.loopback_mode = LOOPBACK_XGXS;
1314 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1317 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1319 bnx2x_release_phy_lock(bp);
1321 bnx2x_calc_fc_adv(bp);
1323 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1324 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1325 bnx2x_link_report(bp);
1327 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1330 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1334 void bnx2x_link_set(struct bnx2x *bp)
1336 if (!BP_NOMCP(bp)) {
1337 bnx2x_acquire_phy_lock(bp);
1338 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1339 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1340 bnx2x_release_phy_lock(bp);
1342 bnx2x_calc_fc_adv(bp);
1344 BNX2X_ERR("Bootcode is missing - can not set link\n");
1347 static void bnx2x__link_reset(struct bnx2x *bp)
1349 if (!BP_NOMCP(bp)) {
1350 bnx2x_acquire_phy_lock(bp);
1351 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1352 bnx2x_release_phy_lock(bp);
1354 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1357 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1361 if (!BP_NOMCP(bp)) {
1362 bnx2x_acquire_phy_lock(bp);
1363 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1365 bnx2x_release_phy_lock(bp);
1367 BNX2X_ERR("Bootcode is missing - can not test link\n");
1372 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1374 u32 r_param = bp->link_vars.line_speed / 8;
1375 u32 fair_periodic_timeout_usec;
1378 memset(&(bp->cmng.rs_vars), 0,
1379 sizeof(struct rate_shaping_vars_per_port));
1380 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1382 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1383 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1385 /* this is the threshold below which no timer arming will occur
1386 1.25 coefficient is for the threshold to be a little bigger
1387 than the real time, to compensate for timer in-accuracy */
1388 bp->cmng.rs_vars.rs_threshold =
1389 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1391 /* resolution of fairness timer */
1392 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1393 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1394 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1396 /* this is the threshold below which we won't arm the timer anymore */
1397 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1399 /* we multiply by 1e3/8 to get bytes/msec.
1400 We don't want the credits to pass a credit
1401 of the t_fair*FAIR_MEM (algorithm resolution) */
1402 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1403 /* since each tick is 4 usec */
1404 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1407 /* Calculates the sum of vn_min_rates.
1408 It's needed for further normalizing of the min_rates.
1410 sum of vn_min_rates.
1412 0 - if all the min_rates are 0.
1413 In the later case fainess algorithm should be deactivated.
1414 If not all min_rates are zero then those that are zeroes will be set to 1.
1416 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1419 int port = BP_PORT(bp);
1422 bp->vn_weight_sum = 0;
1423 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1424 int func = 2*vn + port;
1425 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1426 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1427 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1429 /* Skip hidden vns */
1430 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1433 /* If min rate is zero - set it to 1 */
1435 vn_min_rate = DEF_MIN_RATE;
1439 bp->vn_weight_sum += vn_min_rate;
1442 /* ... only if all min rates are zeros - disable fairness */
1444 bp->cmng.flags.cmng_enables &=
1445 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1446 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1447 " fairness will be disabled\n");
1449 bp->cmng.flags.cmng_enables |=
1450 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1453 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1455 struct rate_shaping_vars_per_vn m_rs_vn;
1456 struct fairness_vars_per_vn m_fair_vn;
1457 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1458 u16 vn_min_rate, vn_max_rate;
1461 /* If function is hidden - set min and max to zeroes */
1462 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1467 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1468 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1469 /* If min rate is zero - set it to 1 */
1471 vn_min_rate = DEF_MIN_RATE;
1472 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1473 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1476 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1477 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1479 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1480 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1482 /* global vn counter - maximal Mbps for this vn */
1483 m_rs_vn.vn_counter.rate = vn_max_rate;
1485 /* quota - number of bytes transmitted in this period */
1486 m_rs_vn.vn_counter.quota =
1487 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1489 if (bp->vn_weight_sum) {
1490 /* credit for each period of the fairness algorithm:
1491 number of bytes in T_FAIR (the vn share the port rate).
1492 vn_weight_sum should not be larger than 10000, thus
1493 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1495 m_fair_vn.vn_credit_delta =
1496 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1497 (8 * bp->vn_weight_sum))),
1498 (bp->cmng.fair_vars.fair_threshold * 2));
1499 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1500 m_fair_vn.vn_credit_delta);
1503 /* Store it to internal memory */
1504 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1505 REG_WR(bp, BAR_XSTRORM_INTMEM +
1506 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1507 ((u32 *)(&m_rs_vn))[i]);
1509 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1510 REG_WR(bp, BAR_XSTRORM_INTMEM +
1511 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1512 ((u32 *)(&m_fair_vn))[i]);
1516 /* This function is called upon link interrupt */
1517 static void bnx2x_link_attn(struct bnx2x *bp)
1519 u32 prev_link_status = bp->link_vars.link_status;
1520 /* Make sure that we are synced with the current statistics */
1521 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1523 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1525 if (bp->link_vars.link_up) {
1527 /* dropless flow control */
1528 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1529 int port = BP_PORT(bp);
1530 u32 pause_enabled = 0;
1532 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1535 REG_WR(bp, BAR_USTRORM_INTMEM +
1536 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1540 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1541 struct host_port_stats *pstats;
1543 pstats = bnx2x_sp(bp, port_stats);
1544 /* reset old bmac stats */
1545 memset(&(pstats->mac_stx[0]), 0,
1546 sizeof(struct mac_stx));
1548 if (bp->state == BNX2X_STATE_OPEN)
1549 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1552 /* indicate link status only if link status actually changed */
1553 if (prev_link_status != bp->link_vars.link_status)
1554 bnx2x_link_report(bp);
1557 int port = BP_PORT(bp);
1561 /* Set the attention towards other drivers on the same port */
1562 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1563 if (vn == BP_E1HVN(bp))
1566 func = ((vn << 1) | port);
1567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1568 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1571 if (bp->link_vars.link_up) {
1574 /* Init rate shaping and fairness contexts */
1575 bnx2x_init_port_minmax(bp);
1577 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1578 bnx2x_init_vn_minmax(bp, 2*vn + port);
1580 /* Store it to internal memory */
1582 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1583 REG_WR(bp, BAR_XSTRORM_INTMEM +
1584 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1585 ((u32 *)(&bp->cmng))[i]);
1590 void bnx2x__link_status_update(struct bnx2x *bp)
1592 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1595 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1597 if (bp->link_vars.link_up)
1598 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1600 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1602 bnx2x_calc_vn_weight_sum(bp);
1604 /* indicate link status */
1605 bnx2x_link_report(bp);
1608 static void bnx2x_pmf_update(struct bnx2x *bp)
1610 int port = BP_PORT(bp);
1614 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1616 /* enable nig attention */
1617 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1618 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1619 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1621 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1629 * General service functions
1632 /* send the MCP a request, block until there is a reply */
1633 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1635 int func = BP_FUNC(bp);
1636 u32 seq = ++bp->fw_seq;
1639 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1641 mutex_lock(&bp->fw_mb_mutex);
1642 SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
1643 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1644 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1647 /* let the FW do it's magic ... */
1650 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1652 /* Give the FW up to 5 second (500*10ms) */
1653 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1655 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1656 cnt*delay, rc, seq);
1658 /* is this a reply to our command? */
1659 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1660 rc &= FW_MSG_CODE_MASK;
1663 BNX2X_ERR("FW failed to respond!\n");
1667 mutex_unlock(&bp->fw_mb_mutex);
1672 static void bnx2x_e1h_disable(struct bnx2x *bp)
1674 int port = BP_PORT(bp);
1676 netif_tx_disable(bp->dev);
1678 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1680 netif_carrier_off(bp->dev);
1683 static void bnx2x_e1h_enable(struct bnx2x *bp)
1685 int port = BP_PORT(bp);
1687 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1689 /* Tx queue should be only reenabled */
1690 netif_tx_wake_all_queues(bp->dev);
1693 * Should not call netif_carrier_on since it will be called if the link
1694 * is up when checking for link state
1698 static void bnx2x_update_min_max(struct bnx2x *bp)
1700 int port = BP_PORT(bp);
1703 /* Init rate shaping and fairness contexts */
1704 bnx2x_init_port_minmax(bp);
1706 bnx2x_calc_vn_weight_sum(bp);
1708 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1709 bnx2x_init_vn_minmax(bp, 2*vn + port);
1714 /* Set the attention towards other drivers on the same port */
1715 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1716 if (vn == BP_E1HVN(bp))
1719 func = ((vn << 1) | port);
1720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1721 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1724 /* Store it to internal memory */
1725 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1726 REG_WR(bp, BAR_XSTRORM_INTMEM +
1727 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1728 ((u32 *)(&bp->cmng))[i]);
1732 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1734 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1736 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1739 * This is the only place besides the function initialization
1740 * where the bp->flags can change so it is done without any
1743 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1744 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1745 bp->flags |= MF_FUNC_DIS;
1747 bnx2x_e1h_disable(bp);
1749 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1750 bp->flags &= ~MF_FUNC_DIS;
1752 bnx2x_e1h_enable(bp);
1754 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1756 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1758 bnx2x_update_min_max(bp);
1759 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1762 /* Report results to MCP */
1764 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
1766 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
1769 /* must be called under the spq lock */
1770 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1772 struct eth_spe *next_spe = bp->spq_prod_bd;
1774 if (bp->spq_prod_bd == bp->spq_last_bd) {
1775 bp->spq_prod_bd = bp->spq;
1776 bp->spq_prod_idx = 0;
1777 DP(NETIF_MSG_TIMER, "end of spq\n");
1785 /* must be called under the spq lock */
1786 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1788 int func = BP_FUNC(bp);
1790 /* Make sure that BD data is updated before writing the producer */
1793 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1798 /* the slow path queue is odd since completions arrive on the fastpath ring */
1799 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1800 u32 data_hi, u32 data_lo, int common)
1802 struct eth_spe *spe;
1804 #ifdef BNX2X_STOP_ON_ERROR
1805 if (unlikely(bp->panic))
1809 spin_lock_bh(&bp->spq_lock);
1811 if (!bp->spq_left) {
1812 BNX2X_ERR("BUG! SPQ ring full!\n");
1813 spin_unlock_bh(&bp->spq_lock);
1818 spe = bnx2x_sp_get_next(bp);
1820 /* CID needs port number to be encoded int it */
1821 spe->hdr.conn_and_cmd_data =
1822 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1824 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1827 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1829 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1830 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1834 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1835 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1836 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1837 (u32)(U64_LO(bp->spq_mapping) +
1838 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1839 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1841 bnx2x_sp_prod_update(bp);
1842 spin_unlock_bh(&bp->spq_lock);
1846 /* acquire split MCP access lock register */
1847 static int bnx2x_acquire_alr(struct bnx2x *bp)
1853 for (j = 0; j < 1000; j++) {
1855 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1856 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1857 if (val & (1L << 31))
1862 if (!(val & (1L << 31))) {
1863 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1870 /* release split MCP access lock register */
1871 static void bnx2x_release_alr(struct bnx2x *bp)
1873 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1876 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1878 struct host_def_status_block *def_sb = bp->def_status_blk;
1881 barrier(); /* status block is written to by the chip */
1882 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1883 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1886 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1887 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1890 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1891 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1894 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1895 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1898 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1899 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1906 * slow path service functions
1909 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1911 int port = BP_PORT(bp);
1912 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1913 COMMAND_REG_ATTN_BITS_SET);
1914 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1915 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1916 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1917 NIG_REG_MASK_INTERRUPT_PORT0;
1921 if (bp->attn_state & asserted)
1922 BNX2X_ERR("IGU ERROR\n");
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1925 aeu_mask = REG_RD(bp, aeu_addr);
1927 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
1928 aeu_mask, asserted);
1929 aeu_mask &= ~(asserted & 0x3ff);
1930 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1932 REG_WR(bp, aeu_addr, aeu_mask);
1933 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1935 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1936 bp->attn_state |= asserted;
1937 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1939 if (asserted & ATTN_HARD_WIRED_MASK) {
1940 if (asserted & ATTN_NIG_FOR_FUNC) {
1942 bnx2x_acquire_phy_lock(bp);
1944 /* save nig interrupt mask */
1945 nig_mask = REG_RD(bp, nig_int_mask_addr);
1946 REG_WR(bp, nig_int_mask_addr, 0);
1948 bnx2x_link_attn(bp);
1950 /* handle unicore attn? */
1952 if (asserted & ATTN_SW_TIMER_4_FUNC)
1953 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1955 if (asserted & GPIO_2_FUNC)
1956 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1958 if (asserted & GPIO_3_FUNC)
1959 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1961 if (asserted & GPIO_4_FUNC)
1962 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1965 if (asserted & ATTN_GENERAL_ATTN_1) {
1966 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1967 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1969 if (asserted & ATTN_GENERAL_ATTN_2) {
1970 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1971 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1973 if (asserted & ATTN_GENERAL_ATTN_3) {
1974 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1975 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1978 if (asserted & ATTN_GENERAL_ATTN_4) {
1979 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1980 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1982 if (asserted & ATTN_GENERAL_ATTN_5) {
1983 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1984 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1986 if (asserted & ATTN_GENERAL_ATTN_6) {
1987 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1988 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1992 } /* if hardwired */
1994 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1996 REG_WR(bp, hc_addr, asserted);
1998 /* now set back the mask */
1999 if (asserted & ATTN_NIG_FOR_FUNC) {
2000 REG_WR(bp, nig_int_mask_addr, nig_mask);
2001 bnx2x_release_phy_lock(bp);
2005 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2007 int port = BP_PORT(bp);
2009 /* mark the failure */
2012 dev_info.port_hw_config[port].external_phy_config);
2014 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2015 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2016 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2019 /* log the failure */
2020 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2021 " the driver to shutdown the card to prevent permanent"
2022 " damage. Please contact OEM Support for assistance\n");
2025 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2027 int port = BP_PORT(bp);
2031 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2032 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2034 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2036 val = REG_RD(bp, reg_offset);
2037 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2038 REG_WR(bp, reg_offset, val);
2040 BNX2X_ERR("SPIO5 hw attention\n");
2042 /* Fan failure attention */
2043 bnx2x_hw_reset_phy(&bp->link_params);
2044 bnx2x_fan_failure(bp);
2047 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2048 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2049 bnx2x_acquire_phy_lock(bp);
2050 bnx2x_handle_module_detect_int(&bp->link_params);
2051 bnx2x_release_phy_lock(bp);
2054 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2056 val = REG_RD(bp, reg_offset);
2057 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2058 REG_WR(bp, reg_offset, val);
2060 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2061 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2066 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2070 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2072 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2073 BNX2X_ERR("DB hw attention 0x%x\n", val);
2074 /* DORQ discard attention */
2076 BNX2X_ERR("FATAL error from DORQ\n");
2079 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2081 int port = BP_PORT(bp);
2084 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2085 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2087 val = REG_RD(bp, reg_offset);
2088 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2089 REG_WR(bp, reg_offset, val);
2091 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2092 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2097 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2101 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2103 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2104 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2105 /* CFC error attention */
2107 BNX2X_ERR("FATAL error from CFC\n");
2110 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2112 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2113 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2114 /* RQ_USDMDP_FIFO_OVERFLOW */
2116 BNX2X_ERR("FATAL error from PXP\n");
2119 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2121 int port = BP_PORT(bp);
2124 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2125 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2127 val = REG_RD(bp, reg_offset);
2128 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2129 REG_WR(bp, reg_offset, val);
2131 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2132 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2137 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2141 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2143 if (attn & BNX2X_PMF_LINK_ASSERT) {
2144 int func = BP_FUNC(bp);
2146 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2147 bp->mf_config = SHMEM_RD(bp,
2148 mf_cfg.func_mf_config[func].config);
2149 val = SHMEM_RD(bp, func_mb[func].drv_status);
2150 if (val & DRV_STATUS_DCC_EVENT_MASK)
2152 (val & DRV_STATUS_DCC_EVENT_MASK));
2153 bnx2x__link_status_update(bp);
2154 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2155 bnx2x_pmf_update(bp);
2157 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2159 BNX2X_ERR("MC assert!\n");
2160 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2161 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2162 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2163 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2166 } else if (attn & BNX2X_MCP_ASSERT) {
2168 BNX2X_ERR("MCP assert!\n");
2169 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2173 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2176 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2177 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2178 if (attn & BNX2X_GRC_TIMEOUT) {
2179 val = CHIP_IS_E1H(bp) ?
2180 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2181 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2183 if (attn & BNX2X_GRC_RSV) {
2184 val = CHIP_IS_E1H(bp) ?
2185 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2186 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2188 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2192 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2193 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2194 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2195 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2196 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2197 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2199 * should be run under rtnl lock
2201 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2203 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2204 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2205 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2211 * should be run under rtnl lock
2213 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2215 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2217 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2223 * should be run under rtnl lock
2225 bool bnx2x_reset_is_done(struct bnx2x *bp)
2227 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2228 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2229 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2233 * should be run under rtnl lock
2235 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2237 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2239 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2241 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2242 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2248 * should be run under rtnl lock
2250 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2252 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2254 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2256 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2257 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2265 * should be run under rtnl lock
2267 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2269 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2272 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2274 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2275 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2278 static inline void _print_next_block(int idx, const char *blk)
2285 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2289 for (i = 0; sig; i++) {
2290 cur_bit = ((u32)0x1 << i);
2291 if (sig & cur_bit) {
2293 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2294 _print_next_block(par_num++, "BRB");
2296 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2297 _print_next_block(par_num++, "PARSER");
2299 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2300 _print_next_block(par_num++, "TSDM");
2302 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2303 _print_next_block(par_num++, "SEARCHER");
2305 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2306 _print_next_block(par_num++, "TSEMI");
2318 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2322 for (i = 0; sig; i++) {
2323 cur_bit = ((u32)0x1 << i);
2324 if (sig & cur_bit) {
2326 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2327 _print_next_block(par_num++, "PBCLIENT");
2329 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2330 _print_next_block(par_num++, "QM");
2332 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2333 _print_next_block(par_num++, "XSDM");
2335 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2336 _print_next_block(par_num++, "XSEMI");
2338 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2339 _print_next_block(par_num++, "DOORBELLQ");
2341 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2342 _print_next_block(par_num++, "VAUX PCI CORE");
2344 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2345 _print_next_block(par_num++, "DEBUG");
2347 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2348 _print_next_block(par_num++, "USDM");
2350 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2351 _print_next_block(par_num++, "USEMI");
2353 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2354 _print_next_block(par_num++, "UPB");
2356 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2357 _print_next_block(par_num++, "CSDM");
2369 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2373 for (i = 0; sig; i++) {
2374 cur_bit = ((u32)0x1 << i);
2375 if (sig & cur_bit) {
2377 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2378 _print_next_block(par_num++, "CSEMI");
2380 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2381 _print_next_block(par_num++, "PXP");
2383 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2384 _print_next_block(par_num++,
2385 "PXPPCICLOCKCLIENT");
2387 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2388 _print_next_block(par_num++, "CFC");
2390 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2391 _print_next_block(par_num++, "CDU");
2393 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2394 _print_next_block(par_num++, "IGU");
2396 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2397 _print_next_block(par_num++, "MISC");
2409 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2413 for (i = 0; sig; i++) {
2414 cur_bit = ((u32)0x1 << i);
2415 if (sig & cur_bit) {
2417 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2418 _print_next_block(par_num++, "MCP ROM");
2420 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2421 _print_next_block(par_num++, "MCP UMP RX");
2423 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2424 _print_next_block(par_num++, "MCP UMP TX");
2426 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2427 _print_next_block(par_num++, "MCP SCPAD");
2439 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2442 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2443 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2445 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2446 "[0]:0x%08x [1]:0x%08x "
2447 "[2]:0x%08x [3]:0x%08x\n",
2448 sig0 & HW_PRTY_ASSERT_SET_0,
2449 sig1 & HW_PRTY_ASSERT_SET_1,
2450 sig2 & HW_PRTY_ASSERT_SET_2,
2451 sig3 & HW_PRTY_ASSERT_SET_3);
2452 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2454 par_num = bnx2x_print_blocks_with_parity0(
2455 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2456 par_num = bnx2x_print_blocks_with_parity1(
2457 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2458 par_num = bnx2x_print_blocks_with_parity2(
2459 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2460 par_num = bnx2x_print_blocks_with_parity3(
2461 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2468 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2470 struct attn_route attn;
2471 int port = BP_PORT(bp);
2473 attn.sig[0] = REG_RD(bp,
2474 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2476 attn.sig[1] = REG_RD(bp,
2477 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2479 attn.sig[2] = REG_RD(bp,
2480 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2482 attn.sig[3] = REG_RD(bp,
2483 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2486 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2490 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2492 struct attn_route attn, *group_mask;
2493 int port = BP_PORT(bp);
2499 /* need to take HW lock because MCP or other port might also
2500 try to handle this event */
2501 bnx2x_acquire_alr(bp);
2503 if (bnx2x_chk_parity_attn(bp)) {
2504 bp->recovery_state = BNX2X_RECOVERY_INIT;
2505 bnx2x_set_reset_in_progress(bp);
2506 schedule_delayed_work(&bp->reset_task, 0);
2507 /* Disable HW interrupts */
2508 bnx2x_int_disable(bp);
2509 bnx2x_release_alr(bp);
2510 /* In case of parity errors don't handle attentions so that
2511 * other function would "see" parity errors.
2516 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2517 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2518 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2519 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2520 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2521 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2523 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2524 if (deasserted & (1 << index)) {
2525 group_mask = &bp->attn_group[index];
2527 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2528 index, group_mask->sig[0], group_mask->sig[1],
2529 group_mask->sig[2], group_mask->sig[3]);
2531 bnx2x_attn_int_deasserted3(bp,
2532 attn.sig[3] & group_mask->sig[3]);
2533 bnx2x_attn_int_deasserted1(bp,
2534 attn.sig[1] & group_mask->sig[1]);
2535 bnx2x_attn_int_deasserted2(bp,
2536 attn.sig[2] & group_mask->sig[2]);
2537 bnx2x_attn_int_deasserted0(bp,
2538 attn.sig[0] & group_mask->sig[0]);
2542 bnx2x_release_alr(bp);
2544 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2547 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2549 REG_WR(bp, reg_addr, val);
2551 if (~bp->attn_state & deasserted)
2552 BNX2X_ERR("IGU ERROR\n");
2554 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2555 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2557 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2558 aeu_mask = REG_RD(bp, reg_addr);
2560 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2561 aeu_mask, deasserted);
2562 aeu_mask |= (deasserted & 0x3ff);
2563 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2565 REG_WR(bp, reg_addr, aeu_mask);
2566 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2568 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2569 bp->attn_state &= ~deasserted;
2570 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2573 static void bnx2x_attn_int(struct bnx2x *bp)
2575 /* read local copy of bits */
2576 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2578 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2580 u32 attn_state = bp->attn_state;
2582 /* look for changed bits */
2583 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2584 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2587 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2588 attn_bits, attn_ack, asserted, deasserted);
2590 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2591 BNX2X_ERR("BAD attention state\n");
2593 /* handle bits that were raised */
2595 bnx2x_attn_int_asserted(bp, asserted);
2598 bnx2x_attn_int_deasserted(bp, deasserted);
2601 static void bnx2x_sp_task(struct work_struct *work)
2603 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2606 /* Return here if interrupt is disabled */
2607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2612 status = bnx2x_update_dsb_idx(bp);
2613 /* if (status == 0) */
2614 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2616 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2624 /* CStorm events: STAT_QUERY */
2626 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2630 if (unlikely(status))
2631 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2634 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2636 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2638 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2640 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2642 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2646 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2648 struct net_device *dev = dev_instance;
2649 struct bnx2x *bp = netdev_priv(dev);
2651 /* Return here if interrupt is disabled */
2652 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2653 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2657 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2659 #ifdef BNX2X_STOP_ON_ERROR
2660 if (unlikely(bp->panic))
2666 struct cnic_ops *c_ops;
2669 c_ops = rcu_dereference(bp->cnic_ops);
2671 c_ops->cnic_handler(bp->cnic_data, NULL);
2675 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2680 /* end of slow path */
2682 static void bnx2x_timer(unsigned long data)
2684 struct bnx2x *bp = (struct bnx2x *) data;
2686 if (!netif_running(bp->dev))
2689 if (atomic_read(&bp->intr_sem) != 0)
2693 struct bnx2x_fastpath *fp = &bp->fp[0];
2697 rc = bnx2x_rx_int(fp, 1000);
2700 if (!BP_NOMCP(bp)) {
2701 int func = BP_FUNC(bp);
2705 ++bp->fw_drv_pulse_wr_seq;
2706 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2707 /* TBD - add SYSTEM_TIME */
2708 drv_pulse = bp->fw_drv_pulse_wr_seq;
2709 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2711 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2712 MCP_PULSE_SEQ_MASK);
2713 /* The delta between driver pulse and mcp response
2714 * should be 1 (before mcp response) or 0 (after mcp response)
2716 if ((drv_pulse != mcp_pulse) &&
2717 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2718 /* someone lost a heartbeat... */
2719 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2720 drv_pulse, mcp_pulse);
2724 if (bp->state == BNX2X_STATE_OPEN)
2725 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2728 mod_timer(&bp->timer, jiffies + bp->current_interval);
2731 /* end of Statistics */
2736 * nic init service functions
2739 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2741 int port = BP_PORT(bp);
2744 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2745 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2746 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2747 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2748 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2749 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2752 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2753 dma_addr_t mapping, int sb_id)
2755 int port = BP_PORT(bp);
2756 int func = BP_FUNC(bp);
2761 section = ((u64)mapping) + offsetof(struct host_status_block,
2763 sb->u_status_block.status_block_id = sb_id;
2765 REG_WR(bp, BAR_CSTRORM_INTMEM +
2766 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2767 REG_WR(bp, BAR_CSTRORM_INTMEM +
2768 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2770 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2771 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2773 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2774 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2775 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2778 section = ((u64)mapping) + offsetof(struct host_status_block,
2780 sb->c_status_block.status_block_id = sb_id;
2782 REG_WR(bp, BAR_CSTRORM_INTMEM +
2783 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2784 REG_WR(bp, BAR_CSTRORM_INTMEM +
2785 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2787 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2788 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2790 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2791 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2792 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2794 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2797 static void bnx2x_zero_def_sb(struct bnx2x *bp)
2799 int func = BP_FUNC(bp);
2801 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2802 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2803 sizeof(struct tstorm_def_status_block)/4);
2804 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2805 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2806 sizeof(struct cstorm_def_status_block_u)/4);
2807 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2808 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2809 sizeof(struct cstorm_def_status_block_c)/4);
2810 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2811 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2812 sizeof(struct xstorm_def_status_block)/4);
2815 static void bnx2x_init_def_sb(struct bnx2x *bp,
2816 struct host_def_status_block *def_sb,
2817 dma_addr_t mapping, int sb_id)
2819 int port = BP_PORT(bp);
2820 int func = BP_FUNC(bp);
2821 int index, val, reg_offset;
2825 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2826 atten_status_block);
2827 def_sb->atten_status_block.status_block_id = sb_id;
2831 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2832 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2834 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2835 bp->attn_group[index].sig[0] = REG_RD(bp,
2836 reg_offset + 0x10*index);
2837 bp->attn_group[index].sig[1] = REG_RD(bp,
2838 reg_offset + 0x4 + 0x10*index);
2839 bp->attn_group[index].sig[2] = REG_RD(bp,
2840 reg_offset + 0x8 + 0x10*index);
2841 bp->attn_group[index].sig[3] = REG_RD(bp,
2842 reg_offset + 0xc + 0x10*index);
2845 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2846 HC_REG_ATTN_MSG0_ADDR_L);
2848 REG_WR(bp, reg_offset, U64_LO(section));
2849 REG_WR(bp, reg_offset + 4, U64_HI(section));
2851 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2853 val = REG_RD(bp, reg_offset);
2855 REG_WR(bp, reg_offset, val);
2858 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2859 u_def_status_block);
2860 def_sb->u_def_status_block.status_block_id = sb_id;
2862 REG_WR(bp, BAR_CSTRORM_INTMEM +
2863 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2864 REG_WR(bp, BAR_CSTRORM_INTMEM +
2865 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2867 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2868 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2870 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2871 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2872 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2875 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2876 c_def_status_block);
2877 def_sb->c_def_status_block.status_block_id = sb_id;
2879 REG_WR(bp, BAR_CSTRORM_INTMEM +
2880 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2881 REG_WR(bp, BAR_CSTRORM_INTMEM +
2882 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2884 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2885 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2887 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2888 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2889 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2892 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2893 t_def_status_block);
2894 def_sb->t_def_status_block.status_block_id = sb_id;
2896 REG_WR(bp, BAR_TSTRORM_INTMEM +
2897 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2898 REG_WR(bp, BAR_TSTRORM_INTMEM +
2899 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2901 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2902 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2904 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2905 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2906 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2909 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2910 x_def_status_block);
2911 def_sb->x_def_status_block.status_block_id = sb_id;
2913 REG_WR(bp, BAR_XSTRORM_INTMEM +
2914 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2915 REG_WR(bp, BAR_XSTRORM_INTMEM +
2916 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2918 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2919 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2921 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2922 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2923 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2925 bp->stats_pending = 0;
2926 bp->set_mac_pending = 0;
2928 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2931 void bnx2x_update_coalesce(struct bnx2x *bp)
2933 int port = BP_PORT(bp);
2936 for_each_queue(bp, i) {
2937 int sb_id = bp->fp[i].sb_id;
2939 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2940 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2941 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2942 U_SB_ETH_RX_CQ_INDEX),
2943 bp->rx_ticks/(4 * BNX2X_BTR));
2944 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2945 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2946 U_SB_ETH_RX_CQ_INDEX),
2947 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2949 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2950 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2951 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2952 C_SB_ETH_TX_CQ_INDEX),
2953 bp->tx_ticks/(4 * BNX2X_BTR));
2954 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2955 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2956 C_SB_ETH_TX_CQ_INDEX),
2957 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2961 static void bnx2x_init_sp_ring(struct bnx2x *bp)
2963 int func = BP_FUNC(bp);
2965 spin_lock_init(&bp->spq_lock);
2967 bp->spq_left = MAX_SPQ_PENDING;
2968 bp->spq_prod_idx = 0;
2969 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2970 bp->spq_prod_bd = bp->spq;
2971 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2973 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2974 U64_LO(bp->spq_mapping));
2976 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2977 U64_HI(bp->spq_mapping));
2979 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2983 static void bnx2x_init_context(struct bnx2x *bp)
2988 for_each_queue(bp, i) {
2989 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2990 struct bnx2x_fastpath *fp = &bp->fp[i];
2991 u8 cl_id = fp->cl_id;
2993 context->ustorm_st_context.common.sb_index_numbers =
2994 BNX2X_RX_SB_INDEX_NUM;
2995 context->ustorm_st_context.common.clientId = cl_id;
2996 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2997 context->ustorm_st_context.common.flags =
2998 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2999 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
3000 context->ustorm_st_context.common.statistics_counter_id =
3002 context->ustorm_st_context.common.mc_alignment_log_size =
3003 BNX2X_RX_ALIGN_SHIFT;
3004 context->ustorm_st_context.common.bd_buff_size =
3006 context->ustorm_st_context.common.bd_page_base_hi =
3007 U64_HI(fp->rx_desc_mapping);
3008 context->ustorm_st_context.common.bd_page_base_lo =
3009 U64_LO(fp->rx_desc_mapping);
3010 if (!fp->disable_tpa) {
3011 context->ustorm_st_context.common.flags |=
3012 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
3013 context->ustorm_st_context.common.sge_buff_size =
3014 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
3016 context->ustorm_st_context.common.sge_page_base_hi =
3017 U64_HI(fp->rx_sge_mapping);
3018 context->ustorm_st_context.common.sge_page_base_lo =
3019 U64_LO(fp->rx_sge_mapping);
3021 context->ustorm_st_context.common.max_sges_for_packet =
3022 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
3023 context->ustorm_st_context.common.max_sges_for_packet =
3024 ((context->ustorm_st_context.common.
3025 max_sges_for_packet + PAGES_PER_SGE - 1) &
3026 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3029 context->ustorm_ag_context.cdu_usage =
3030 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3031 CDU_REGION_NUMBER_UCM_AG,
3032 ETH_CONNECTION_TYPE);
3034 context->xstorm_ag_context.cdu_reserved =
3035 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3036 CDU_REGION_NUMBER_XCM_AG,
3037 ETH_CONNECTION_TYPE);
3041 for_each_queue(bp, i) {
3042 struct bnx2x_fastpath *fp = &bp->fp[i];
3043 struct eth_context *context =
3044 bnx2x_sp(bp, context[i].eth);
3046 context->cstorm_st_context.sb_index_number =
3047 C_SB_ETH_TX_CQ_INDEX;
3048 context->cstorm_st_context.status_block_id = fp->sb_id;
3050 context->xstorm_st_context.tx_bd_page_base_hi =
3051 U64_HI(fp->tx_desc_mapping);
3052 context->xstorm_st_context.tx_bd_page_base_lo =
3053 U64_LO(fp->tx_desc_mapping);
3054 context->xstorm_st_context.statistics_data = (fp->cl_id |
3055 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3059 static void bnx2x_init_ind_table(struct bnx2x *bp)
3061 int func = BP_FUNC(bp);
3064 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3068 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
3069 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3070 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3071 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3072 bp->fp->cl_id + (i % bp->num_queues));
3075 void bnx2x_set_client_config(struct bnx2x *bp)
3077 struct tstorm_eth_client_config tstorm_client = {0};
3078 int port = BP_PORT(bp);
3081 tstorm_client.mtu = bp->dev->mtu;
3082 tstorm_client.config_flags =
3083 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3084 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3086 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3087 tstorm_client.config_flags |=
3088 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3089 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3093 for_each_queue(bp, i) {
3094 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3096 REG_WR(bp, BAR_TSTRORM_INTMEM +
3097 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3098 ((u32 *)&tstorm_client)[0]);
3099 REG_WR(bp, BAR_TSTRORM_INTMEM +
3100 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3101 ((u32 *)&tstorm_client)[1]);
3104 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3105 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3108 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3110 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3111 int mode = bp->rx_mode;
3112 int mask = bp->rx_mode_cl_mask;
3113 int func = BP_FUNC(bp);
3114 int port = BP_PORT(bp);
3116 /* All but management unicast packets should pass to the host as well */
3118 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3119 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3120 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3121 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3123 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
3126 case BNX2X_RX_MODE_NONE: /* no Rx */
3127 tstorm_mac_filter.ucast_drop_all = mask;
3128 tstorm_mac_filter.mcast_drop_all = mask;
3129 tstorm_mac_filter.bcast_drop_all = mask;
3132 case BNX2X_RX_MODE_NORMAL:
3133 tstorm_mac_filter.bcast_accept_all = mask;
3136 case BNX2X_RX_MODE_ALLMULTI:
3137 tstorm_mac_filter.mcast_accept_all = mask;
3138 tstorm_mac_filter.bcast_accept_all = mask;
3141 case BNX2X_RX_MODE_PROMISC:
3142 tstorm_mac_filter.ucast_accept_all = mask;
3143 tstorm_mac_filter.mcast_accept_all = mask;
3144 tstorm_mac_filter.bcast_accept_all = mask;
3145 /* pass management unicast packets as well */
3146 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3150 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3155 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3158 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3159 REG_WR(bp, BAR_TSTRORM_INTMEM +
3160 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3161 ((u32 *)&tstorm_mac_filter)[i]);
3163 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3164 ((u32 *)&tstorm_mac_filter)[i]); */
3167 if (mode != BNX2X_RX_MODE_NONE)
3168 bnx2x_set_client_config(bp);
3171 static void bnx2x_init_internal_common(struct bnx2x *bp)
3175 /* Zero this manually as its initialization is
3176 currently missing in the initTool */
3177 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3178 REG_WR(bp, BAR_USTRORM_INTMEM +
3179 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3182 static void bnx2x_init_internal_port(struct bnx2x *bp)
3184 int port = BP_PORT(bp);
3187 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3189 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3190 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3191 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3194 static void bnx2x_init_internal_func(struct bnx2x *bp)
3196 struct tstorm_eth_function_common_config tstorm_config = {0};
3197 struct stats_indication_flags stats_flags = {0};
3198 int port = BP_PORT(bp);
3199 int func = BP_FUNC(bp);
3204 tstorm_config.config_flags = RSS_FLAGS(bp);
3207 tstorm_config.rss_result_mask = MULTI_MASK;
3209 /* Enable TPA if needed */
3210 if (bp->flags & TPA_ENABLE_FLAG)
3211 tstorm_config.config_flags |=
3212 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3215 tstorm_config.config_flags |=
3216 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3218 tstorm_config.leading_client_id = BP_L_ID(bp);
3220 REG_WR(bp, BAR_TSTRORM_INTMEM +
3221 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3222 (*(u32 *)&tstorm_config));
3224 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3225 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3226 bnx2x_set_storm_rx_mode(bp);
3228 for_each_queue(bp, i) {
3229 u8 cl_id = bp->fp[i].cl_id;
3231 /* reset xstorm per client statistics */
3232 offset = BAR_XSTRORM_INTMEM +
3233 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3235 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3236 REG_WR(bp, offset + j*4, 0);
3238 /* reset tstorm per client statistics */
3239 offset = BAR_TSTRORM_INTMEM +
3240 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3242 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3243 REG_WR(bp, offset + j*4, 0);
3245 /* reset ustorm per client statistics */
3246 offset = BAR_USTRORM_INTMEM +
3247 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3249 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3250 REG_WR(bp, offset + j*4, 0);
3253 /* Init statistics related context */
3254 stats_flags.collect_eth = 1;
3256 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3257 ((u32 *)&stats_flags)[0]);
3258 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3259 ((u32 *)&stats_flags)[1]);
3261 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3262 ((u32 *)&stats_flags)[0]);
3263 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3264 ((u32 *)&stats_flags)[1]);
3266 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3267 ((u32 *)&stats_flags)[0]);
3268 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3269 ((u32 *)&stats_flags)[1]);
3271 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3272 ((u32 *)&stats_flags)[0]);
3273 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3274 ((u32 *)&stats_flags)[1]);
3276 REG_WR(bp, BAR_XSTRORM_INTMEM +
3277 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3278 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3279 REG_WR(bp, BAR_XSTRORM_INTMEM +
3280 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3281 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3283 REG_WR(bp, BAR_TSTRORM_INTMEM +
3284 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3285 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3286 REG_WR(bp, BAR_TSTRORM_INTMEM +
3287 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3288 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3290 REG_WR(bp, BAR_USTRORM_INTMEM +
3291 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3292 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3293 REG_WR(bp, BAR_USTRORM_INTMEM +
3294 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3295 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3297 if (CHIP_IS_E1H(bp)) {
3298 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3300 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3302 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3304 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3307 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3311 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3312 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3313 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3314 for_each_queue(bp, i) {
3315 struct bnx2x_fastpath *fp = &bp->fp[i];
3317 REG_WR(bp, BAR_USTRORM_INTMEM +
3318 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3319 U64_LO(fp->rx_comp_mapping));
3320 REG_WR(bp, BAR_USTRORM_INTMEM +
3321 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3322 U64_HI(fp->rx_comp_mapping));
3325 REG_WR(bp, BAR_USTRORM_INTMEM +
3326 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3327 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3328 REG_WR(bp, BAR_USTRORM_INTMEM +
3329 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3330 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3332 REG_WR16(bp, BAR_USTRORM_INTMEM +
3333 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3337 /* dropless flow control */
3338 if (CHIP_IS_E1H(bp)) {
3339 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3341 rx_pause.bd_thr_low = 250;
3342 rx_pause.cqe_thr_low = 250;
3344 rx_pause.sge_thr_low = 0;
3345 rx_pause.bd_thr_high = 350;
3346 rx_pause.cqe_thr_high = 350;
3347 rx_pause.sge_thr_high = 0;
3349 for_each_queue(bp, i) {
3350 struct bnx2x_fastpath *fp = &bp->fp[i];
3352 if (!fp->disable_tpa) {
3353 rx_pause.sge_thr_low = 150;
3354 rx_pause.sge_thr_high = 250;
3358 offset = BAR_USTRORM_INTMEM +
3359 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3362 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3364 REG_WR(bp, offset + j*4,
3365 ((u32 *)&rx_pause)[j]);
3369 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3371 /* Init rate shaping and fairness contexts */
3375 /* During init there is no active link
3376 Until link is up, set link rate to 10Gbps */
3377 bp->link_vars.line_speed = SPEED_10000;
3378 bnx2x_init_port_minmax(bp);
3382 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3383 bnx2x_calc_vn_weight_sum(bp);
3385 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3386 bnx2x_init_vn_minmax(bp, 2*vn + port);
3388 /* Enable rate shaping and fairness */
3389 bp->cmng.flags.cmng_enables |=
3390 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3393 /* rate shaping and fairness are disabled */
3395 "single function mode minmax will be disabled\n");
3399 /* Store cmng structures to internal memory */
3401 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3402 REG_WR(bp, BAR_XSTRORM_INTMEM +
3403 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3404 ((u32 *)(&bp->cmng))[i]);
3407 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3409 switch (load_code) {
3410 case FW_MSG_CODE_DRV_LOAD_COMMON:
3411 bnx2x_init_internal_common(bp);
3414 case FW_MSG_CODE_DRV_LOAD_PORT:
3415 bnx2x_init_internal_port(bp);
3418 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3419 bnx2x_init_internal_func(bp);
3423 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3428 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3432 for_each_queue(bp, i) {
3433 struct bnx2x_fastpath *fp = &bp->fp[i];
3436 fp->state = BNX2X_FP_STATE_CLOSED;
3438 fp->cl_id = BP_L_ID(bp) + i;
3440 fp->sb_id = fp->cl_id + 1;
3442 fp->sb_id = fp->cl_id;
3445 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3446 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3447 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3449 bnx2x_update_fpsb_idx(fp);
3452 /* ensure status block indices were read */
3456 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3458 bnx2x_update_dsb_idx(bp);
3459 bnx2x_update_coalesce(bp);
3460 bnx2x_init_rx_rings(bp);
3461 bnx2x_init_tx_ring(bp);
3462 bnx2x_init_sp_ring(bp);
3463 bnx2x_init_context(bp);
3464 bnx2x_init_internal(bp, load_code);
3465 bnx2x_init_ind_table(bp);
3466 bnx2x_stats_init(bp);
3468 /* At this point, we are ready for interrupts */
3469 atomic_set(&bp->intr_sem, 0);
3471 /* flush all before enabling interrupts */
3475 bnx2x_int_enable(bp);
3477 /* Check for SPIO5 */
3478 bnx2x_attn_int_deasserted0(bp,
3479 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3480 AEU_INPUTS_ATTN_BITS_SPIO5);
3483 /* end of nic init */
3486 * gzip service functions
3489 static int bnx2x_gunzip_init(struct bnx2x *bp)
3491 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3492 &bp->gunzip_mapping, GFP_KERNEL);
3493 if (bp->gunzip_buf == NULL)
3496 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3497 if (bp->strm == NULL)
3500 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3502 if (bp->strm->workspace == NULL)
3512 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3513 bp->gunzip_mapping);
3514 bp->gunzip_buf = NULL;
3517 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3518 " un-compression\n");
3522 static void bnx2x_gunzip_end(struct bnx2x *bp)
3524 kfree(bp->strm->workspace);
3529 if (bp->gunzip_buf) {
3530 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3531 bp->gunzip_mapping);
3532 bp->gunzip_buf = NULL;
3536 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3540 /* check gzip header */
3541 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3542 BNX2X_ERR("Bad gzip header\n");
3550 if (zbuf[3] & FNAME)
3551 while ((zbuf[n++] != 0) && (n < len));
3553 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3554 bp->strm->avail_in = len - n;
3555 bp->strm->next_out = bp->gunzip_buf;
3556 bp->strm->avail_out = FW_BUF_SIZE;
3558 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3562 rc = zlib_inflate(bp->strm, Z_FINISH);
3563 if ((rc != Z_OK) && (rc != Z_STREAM_END))
3564 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3567 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3568 if (bp->gunzip_outlen & 0x3)
3569 netdev_err(bp->dev, "Firmware decompression error:"
3570 " gunzip_outlen (%d) not aligned\n",
3572 bp->gunzip_outlen >>= 2;
3574 zlib_inflateEnd(bp->strm);
3576 if (rc == Z_STREAM_END)
3582 /* nic load/unload */
3585 * General service functions
3588 /* send a NIG loopback debug packet */
3589 static void bnx2x_lb_pckt(struct bnx2x *bp)
3593 /* Ethernet source and destination addresses */
3594 wb_write[0] = 0x55555555;
3595 wb_write[1] = 0x55555555;
3596 wb_write[2] = 0x20; /* SOP */
3597 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3599 /* NON-IP protocol */
3600 wb_write[0] = 0x09000000;
3601 wb_write[1] = 0x55555555;
3602 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
3603 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3606 /* some of the internal memories
3607 * are not directly readable from the driver
3608 * to test them we send debug packets
3610 static int bnx2x_int_mem_test(struct bnx2x *bp)
3616 if (CHIP_REV_IS_FPGA(bp))
3618 else if (CHIP_REV_IS_EMUL(bp))
3623 DP(NETIF_MSG_HW, "start part1\n");
3625 /* Disable inputs of parser neighbor blocks */
3626 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3627 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3628 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3629 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3631 /* Write 0 to parser credits for CFC search request */
3632 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3634 /* send Ethernet packet */
3637 /* TODO do i reset NIG statistic? */
3638 /* Wait until NIG register shows 1 packet of size 0x10 */
3639 count = 1000 * factor;
3642 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3643 val = *bnx2x_sp(bp, wb_data[0]);
3651 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3655 /* Wait until PRS register shows 1 packet */
3656 count = 1000 * factor;
3658 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3666 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3670 /* Reset and init BRB, PRS */
3671 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3675 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3676 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3678 DP(NETIF_MSG_HW, "part2\n");
3680 /* Disable inputs of parser neighbor blocks */
3681 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3682 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3683 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3684 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3686 /* Write 0 to parser credits for CFC search request */
3687 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3689 /* send 10 Ethernet packets */
3690 for (i = 0; i < 10; i++)
3693 /* Wait until NIG register shows 10 + 1
3694 packets of size 11*0x10 = 0xb0 */
3695 count = 1000 * factor;
3698 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3699 val = *bnx2x_sp(bp, wb_data[0]);
3707 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3711 /* Wait until PRS register shows 2 packets */
3712 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3714 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3716 /* Write 1 to parser credits for CFC search request */
3717 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3719 /* Wait until PRS register shows 3 packets */
3720 msleep(10 * factor);
3721 /* Wait until NIG register shows 1 packet of size 0x10 */
3722 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3724 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3726 /* clear NIG EOP FIFO */
3727 for (i = 0; i < 11; i++)
3728 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3729 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3731 BNX2X_ERR("clear of NIG failed\n");
3735 /* Reset and init BRB, PRS, NIG */
3736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3738 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3740 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3741 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3744 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3747 /* Enable inputs of parser neighbor blocks */
3748 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3749 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3750 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3751 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3753 DP(NETIF_MSG_HW, "done\n");
3758 static void enable_blocks_attention(struct bnx2x *bp)
3760 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3761 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3762 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3763 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3764 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3765 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3766 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3767 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3768 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3769 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3770 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3771 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3772 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3773 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3774 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3775 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3776 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3777 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3778 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3779 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3780 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3781 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3782 if (CHIP_REV_IS_FPGA(bp))
3783 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3785 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3786 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3787 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3788 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3789 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3790 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3791 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3792 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3793 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3794 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
3797 static const struct {
3800 } bnx2x_parity_mask[] = {
3801 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3802 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3803 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3804 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3805 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3806 {QM_REG_QM_PRTY_MASK, 0x0},
3807 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3808 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3809 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3810 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3811 {CDU_REG_CDU_PRTY_MASK, 0x0},
3812 {CFC_REG_CFC_PRTY_MASK, 0x0},
3813 {DBG_REG_DBG_PRTY_MASK, 0x0},
3814 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3815 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3816 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3817 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3818 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3819 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3820 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3821 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3822 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3823 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3824 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3825 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3826 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3827 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3828 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3831 static void enable_blocks_parity(struct bnx2x *bp)
3833 int i, mask_arr_len =
3834 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3836 for (i = 0; i < mask_arr_len; i++)
3837 REG_WR(bp, bnx2x_parity_mask[i].addr,
3838 bnx2x_parity_mask[i].mask);
3842 static void bnx2x_reset_common(struct bnx2x *bp)
3845 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3847 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3850 static void bnx2x_init_pxp(struct bnx2x *bp)
3853 int r_order, w_order;
3855 pci_read_config_word(bp->pdev,
3856 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3857 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3858 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3860 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3862 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3866 bnx2x_init_pxp_arb(bp, r_order, w_order);
3869 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3879 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3880 SHARED_HW_CFG_FAN_FAILURE_MASK;
3882 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3886 * The fan failure mechanism is usually related to the PHY type since
3887 * the power consumption of the board is affected by the PHY. Currently,
3888 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3890 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3891 for (port = PORT_0; port < PORT_MAX; port++) {
3893 bnx2x_fan_failure_det_req(
3895 bp->common.shmem_base,
3896 bp->common.shmem2_base,
3900 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3902 if (is_required == 0)
3905 /* Fan failure is indicated by SPIO 5 */
3906 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3907 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3909 /* set to active low mode */
3910 val = REG_RD(bp, MISC_REG_SPIO_INT);
3911 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3912 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3913 REG_WR(bp, MISC_REG_SPIO_INT, val);
3915 /* enable interrupt to signal the IGU */
3916 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3917 val |= (1 << MISC_REGISTERS_SPIO_5);
3918 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3921 static int bnx2x_init_common(struct bnx2x *bp)
3928 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
3930 bnx2x_reset_common(bp);
3931 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3932 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3934 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3935 if (CHIP_IS_E1H(bp))
3936 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3938 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3940 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3942 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3943 if (CHIP_IS_E1(bp)) {
3944 /* enable HW interrupt from PXP on USDM overflow
3945 bit 16 on INT_MASK_0 */
3946 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3949 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3953 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3954 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3955 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3956 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3957 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3958 /* make sure this value is 0 */
3959 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3961 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3962 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3963 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3964 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3965 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3968 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3970 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3971 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3972 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3975 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3976 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3978 /* let the HW do it's magic ... */
3980 /* finish PXP init */
3981 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3983 BNX2X_ERR("PXP2 CFG failed\n");
3986 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3988 BNX2X_ERR("PXP2 RD_INIT failed\n");
3992 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3993 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3995 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3997 /* clean the DMAE memory */
3999 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
4001 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4002 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4003 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4004 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
4006 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4007 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4008 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4009 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4011 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
4016 for (i = 0; i < 64; i++) {
4017 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
4018 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
4020 if (CHIP_IS_E1H(bp)) {
4021 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4022 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4027 /* soft reset pulse */
4028 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4029 REG_WR(bp, QM_REG_SOFT_RESET, 0);
4032 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4035 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4036 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4037 if (!CHIP_REV_IS_SLOW(bp)) {
4038 /* enable hw interrupt from doorbell Q */
4039 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4042 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4043 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4044 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4047 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4049 if (CHIP_IS_E1H(bp))
4050 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4052 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4053 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4054 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4055 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4057 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4058 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4059 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4060 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4062 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4063 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4064 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4065 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4068 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4070 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4073 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4074 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4075 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4077 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4078 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4079 REG_WR(bp, i, random32());
4080 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4082 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4083 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4084 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4085 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4086 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4087 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4088 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4089 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4090 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4091 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4093 REG_WR(bp, SRC_REG_SOFT_RST, 0);
4095 if (sizeof(union cdu_context) != 1024)
4096 /* we currently assume that a context is 1024 bytes */
4097 dev_alert(&bp->pdev->dev, "please adjust the size "
4098 "of cdu_context(%ld)\n",
4099 (long)sizeof(union cdu_context));
4101 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4102 val = (4 << 24) + (0 << 12) + 1024;
4103 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4105 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4106 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4107 /* enable context validation interrupt from CFC */
4108 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4110 /* set the thresholds to prevent CFC/CDU race */
4111 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4113 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4114 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4116 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4117 /* Reset PCIE errors for debug */
4118 REG_WR(bp, 0x2814, 0xffffffff);
4119 REG_WR(bp, 0x3820, 0xffffffff);
4121 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4122 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4123 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4124 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4126 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4127 if (CHIP_IS_E1H(bp)) {
4128 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4129 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4132 if (CHIP_REV_IS_SLOW(bp))
4135 /* finish CFC init */
4136 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4138 BNX2X_ERR("CFC LL_INIT failed\n");
4141 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4143 BNX2X_ERR("CFC AC_INIT failed\n");
4146 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4148 BNX2X_ERR("CFC CAM_INIT failed\n");
4151 REG_WR(bp, CFC_REG_DEBUG0, 0);
4153 /* read NIG statistic
4154 to see if this is our first up since powerup */
4155 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4156 val = *bnx2x_sp(bp, wb_data[0]);
4158 /* do internal memory self test */
4159 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4160 BNX2X_ERR("internal mem self test failed\n");
4164 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4165 bp->common.shmem_base,
4166 bp->common.shmem2_base);
4168 bnx2x_setup_fan_failure_detection(bp);
4170 /* clear PXP2 attentions */
4171 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4173 enable_blocks_attention(bp);
4174 if (CHIP_PARITY_SUPPORTED(bp))
4175 enable_blocks_parity(bp);
4177 if (!BP_NOMCP(bp)) {
4178 bnx2x_acquire_phy_lock(bp);
4179 bnx2x_common_init_phy(bp, bp->common.shmem_base,
4180 bp->common.shmem2_base);
4181 bnx2x_release_phy_lock(bp);
4183 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4188 static int bnx2x_init_port(struct bnx2x *bp)
4190 int port = BP_PORT(bp);
4191 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4195 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
4197 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4199 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4200 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4202 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4203 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4204 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4205 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4208 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4210 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4211 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4212 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4215 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4217 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4218 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4219 /* no pause for emulation and FPGA */
4224 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4225 else if (bp->dev->mtu > 4096) {
4226 if (bp->flags & ONE_PORT_FLAG)
4230 /* (24*1024 + val*4)/256 */
4231 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4234 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4235 high = low + 56; /* 14*1024/256 */
4237 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4238 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4241 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4243 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4244 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4245 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4246 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4248 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4249 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4250 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4251 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4253 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4254 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4256 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4258 /* configure PBF to work without PAUSE mtu 9000 */
4259 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4261 /* update threshold */
4262 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4263 /* update init credit */
4264 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4267 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4269 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4272 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4274 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4275 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4277 if (CHIP_IS_E1(bp)) {
4278 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4279 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4281 bnx2x_init_block(bp, HC_BLOCK, init_stage);
4283 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4284 /* init aeu_mask_attn_func_0/1:
4285 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4286 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4287 * bits 4-7 are used for "per vn group attention" */
4288 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4289 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4291 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4292 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4293 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4294 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4295 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4297 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4299 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4301 if (CHIP_IS_E1H(bp)) {
4302 /* 0x2 disable e1hov, 0x1 enable */
4303 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4304 (IS_E1HMF(bp) ? 0x1 : 0x2));
4307 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4308 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4309 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4313 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4314 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4315 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4316 bp->common.shmem_base,
4317 bp->common.shmem2_base);
4318 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
4319 bp->common.shmem2_base, port)) {
4320 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4321 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4322 val = REG_RD(bp, reg_addr);
4323 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4324 REG_WR(bp, reg_addr, val);
4326 bnx2x__link_reset(bp);
4331 #define ILT_PER_FUNC (768/2)
4332 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4333 /* the phys address is shifted right 12 bits and has an added
4334 1=valid bit added to the 53rd bit
4335 then since this is a wide register(TM)
4336 we split it into two 32 bit writes
4338 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4339 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4340 #define PXP_ONE_ILT(x) (((x) << 10) | x)
4341 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4344 #define CNIC_ILT_LINES 127
4345 #define CNIC_CTX_PER_ILT 16
4347 #define CNIC_ILT_LINES 0
4350 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4354 if (CHIP_IS_E1H(bp))
4355 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4357 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4359 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4362 static int bnx2x_init_func(struct bnx2x *bp)
4364 int port = BP_PORT(bp);
4365 int func = BP_FUNC(bp);
4369 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
4371 /* set MSI reconfigure capability */
4372 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4373 val = REG_RD(bp, addr);
4374 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4375 REG_WR(bp, addr, val);
4377 i = FUNC_ILT_BASE(func);
4379 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4380 if (CHIP_IS_E1H(bp)) {
4381 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4382 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4384 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4385 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4388 i += 1 + CNIC_ILT_LINES;
4389 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4391 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4393 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4394 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4398 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4400 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4402 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4403 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4407 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4409 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4411 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4412 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4415 /* tell the searcher where the T2 table is */
4416 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4418 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4419 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4421 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4422 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4423 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4425 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4428 if (CHIP_IS_E1H(bp)) {
4429 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4430 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4431 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4432 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4433 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4434 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4435 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4436 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4437 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4439 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4440 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4443 /* HC init per function */
4444 if (CHIP_IS_E1H(bp)) {
4445 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4447 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4448 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4450 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4452 /* Reset PCIE errors for debug */
4453 REG_WR(bp, 0x2114, 0xffffffff);
4454 REG_WR(bp, 0x2120, 0xffffffff);
4455 bnx2x_phy_probe(&bp->link_params);
4459 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4463 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4464 BP_FUNC(bp), load_code);
4467 mutex_init(&bp->dmae_mutex);
4468 rc = bnx2x_gunzip_init(bp);
4472 switch (load_code) {
4473 case FW_MSG_CODE_DRV_LOAD_COMMON:
4474 rc = bnx2x_init_common(bp);
4479 case FW_MSG_CODE_DRV_LOAD_PORT:
4481 rc = bnx2x_init_port(bp);
4486 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4488 rc = bnx2x_init_func(bp);
4494 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4498 if (!BP_NOMCP(bp)) {
4499 int func = BP_FUNC(bp);
4501 bp->fw_drv_pulse_wr_seq =
4502 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4503 DRV_PULSE_SEQ_MASK);
4504 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4507 /* this needs to be done before gunzip end */
4508 bnx2x_zero_def_sb(bp);
4509 for_each_queue(bp, i)
4510 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4512 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4516 bnx2x_gunzip_end(bp);
4521 void bnx2x_free_mem(struct bnx2x *bp)
4524 #define BNX2X_PCI_FREE(x, y, size) \
4527 dma_free_coherent(&bp->pdev->dev, size, x, y); \
4533 #define BNX2X_FREE(x) \
4545 for_each_queue(bp, i) {
4548 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4549 bnx2x_fp(bp, i, status_blk_mapping),
4550 sizeof(struct host_status_block));
4553 for_each_queue(bp, i) {
4555 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4556 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4557 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4558 bnx2x_fp(bp, i, rx_desc_mapping),
4559 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4561 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4562 bnx2x_fp(bp, i, rx_comp_mapping),
4563 sizeof(struct eth_fast_path_rx_cqe) *
4567 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4568 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4569 bnx2x_fp(bp, i, rx_sge_mapping),
4570 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4573 for_each_queue(bp, i) {
4575 /* fastpath tx rings: tx_buf tx_desc */
4576 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4577 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4578 bnx2x_fp(bp, i, tx_desc_mapping),
4579 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4581 /* end of fastpath */
4583 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4584 sizeof(struct host_def_status_block));
4586 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4587 sizeof(struct bnx2x_slowpath));
4590 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4591 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4592 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4593 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4594 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4595 sizeof(struct host_status_block));
4597 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4599 #undef BNX2X_PCI_FREE
4603 int bnx2x_alloc_mem(struct bnx2x *bp)
4606 #define BNX2X_PCI_ALLOC(x, y, size) \
4608 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4610 goto alloc_mem_err; \
4611 memset(x, 0, size); \
4614 #define BNX2X_ALLOC(x, size) \
4616 x = vmalloc(size); \
4618 goto alloc_mem_err; \
4619 memset(x, 0, size); \
4626 for_each_queue(bp, i) {
4627 bnx2x_fp(bp, i, bp) = bp;
4630 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4631 &bnx2x_fp(bp, i, status_blk_mapping),
4632 sizeof(struct host_status_block));
4635 for_each_queue(bp, i) {
4637 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4638 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4639 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4640 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4641 &bnx2x_fp(bp, i, rx_desc_mapping),
4642 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4644 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4645 &bnx2x_fp(bp, i, rx_comp_mapping),
4646 sizeof(struct eth_fast_path_rx_cqe) *
4650 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4651 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4652 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4653 &bnx2x_fp(bp, i, rx_sge_mapping),
4654 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4657 for_each_queue(bp, i) {
4659 /* fastpath tx rings: tx_buf tx_desc */
4660 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4661 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4662 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4663 &bnx2x_fp(bp, i, tx_desc_mapping),
4664 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4666 /* end of fastpath */
4668 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4669 sizeof(struct host_def_status_block));
4671 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4672 sizeof(struct bnx2x_slowpath));
4675 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4677 /* allocate searcher T2 table
4678 we allocate 1/4 of alloc num for T2
4679 (which is not entered into the ILT) */
4680 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4682 /* Initialize T2 (for 1024 connections) */
4683 for (i = 0; i < 16*1024; i += 64)
4684 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4686 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4687 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4689 /* QM queues (128*MAX_CONN) */
4690 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4692 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4693 sizeof(struct host_status_block));
4696 /* Slow path ring */
4697 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4705 #undef BNX2X_PCI_ALLOC
4711 * Init service functions
4715 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4717 * @param bp driver descriptor
4718 * @param set set or clear an entry (1 or 0)
4719 * @param mac pointer to a buffer containing a MAC
4720 * @param cl_bit_vec bit vector of clients to register a MAC for
4721 * @param cam_offset offset in a CAM to use
4722 * @param with_bcast set broadcast MAC as well
4724 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4725 u32 cl_bit_vec, u8 cam_offset,
4728 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4729 int port = BP_PORT(bp);
4732 * unicasts 0-31:port0 32-63:port1
4733 * multicast 64-127:port0 128-191:port1
4735 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4736 config->hdr.offset = cam_offset;
4737 config->hdr.client_id = 0xff;
4738 config->hdr.reserved1 = 0;
4741 config->config_table[0].cam_entry.msb_mac_addr =
4742 swab16(*(u16 *)&mac[0]);
4743 config->config_table[0].cam_entry.middle_mac_addr =
4744 swab16(*(u16 *)&mac[2]);
4745 config->config_table[0].cam_entry.lsb_mac_addr =
4746 swab16(*(u16 *)&mac[4]);
4747 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4749 config->config_table[0].target_table_entry.flags = 0;
4751 CAM_INVALIDATE(config->config_table[0]);
4752 config->config_table[0].target_table_entry.clients_bit_vector =
4753 cpu_to_le32(cl_bit_vec);
4754 config->config_table[0].target_table_entry.vlan_id = 0;
4756 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4757 (set ? "setting" : "clearing"),
4758 config->config_table[0].cam_entry.msb_mac_addr,
4759 config->config_table[0].cam_entry.middle_mac_addr,
4760 config->config_table[0].cam_entry.lsb_mac_addr);
4764 config->config_table[1].cam_entry.msb_mac_addr =
4765 cpu_to_le16(0xffff);
4766 config->config_table[1].cam_entry.middle_mac_addr =
4767 cpu_to_le16(0xffff);
4768 config->config_table[1].cam_entry.lsb_mac_addr =
4769 cpu_to_le16(0xffff);
4770 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4772 config->config_table[1].target_table_entry.flags =
4773 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4775 CAM_INVALIDATE(config->config_table[1]);
4776 config->config_table[1].target_table_entry.clients_bit_vector =
4777 cpu_to_le32(cl_bit_vec);
4778 config->config_table[1].target_table_entry.vlan_id = 0;
4781 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4782 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4783 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4787 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4789 * @param bp driver descriptor
4790 * @param set set or clear an entry (1 or 0)
4791 * @param mac pointer to a buffer containing a MAC
4792 * @param cl_bit_vec bit vector of clients to register a MAC for
4793 * @param cam_offset offset in a CAM to use
4795 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4796 u32 cl_bit_vec, u8 cam_offset)
4798 struct mac_configuration_cmd_e1h *config =
4799 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4801 config->hdr.length = 1;
4802 config->hdr.offset = cam_offset;
4803 config->hdr.client_id = 0xff;
4804 config->hdr.reserved1 = 0;
4807 config->config_table[0].msb_mac_addr =
4808 swab16(*(u16 *)&mac[0]);
4809 config->config_table[0].middle_mac_addr =
4810 swab16(*(u16 *)&mac[2]);
4811 config->config_table[0].lsb_mac_addr =
4812 swab16(*(u16 *)&mac[4]);
4813 config->config_table[0].clients_bit_vector =
4814 cpu_to_le32(cl_bit_vec);
4815 config->config_table[0].vlan_id = 0;
4816 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4818 config->config_table[0].flags = BP_PORT(bp);
4820 config->config_table[0].flags =
4821 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4823 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
4824 (set ? "setting" : "clearing"),
4825 config->config_table[0].msb_mac_addr,
4826 config->config_table[0].middle_mac_addr,
4827 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4829 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4830 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4831 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4834 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4835 int *state_p, int poll)
4837 /* can take a while if any port is running */
4840 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4841 poll ? "polling" : "waiting", state, idx);
4846 bnx2x_rx_int(bp->fp, 10);
4847 /* if index is different from 0
4848 * the reply for some commands will
4849 * be on the non default queue
4852 bnx2x_rx_int(&bp->fp[idx], 10);
4855 mb(); /* state is changed by bnx2x_sp_event() */
4856 if (*state_p == state) {
4857 #ifdef BNX2X_STOP_ON_ERROR
4858 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4870 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4871 poll ? "polling" : "waiting", state, idx);
4872 #ifdef BNX2X_STOP_ON_ERROR
4879 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4881 bp->set_mac_pending++;
4884 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4885 (1 << bp->fp->cl_id), BP_FUNC(bp));
4887 /* Wait for a completion */
4888 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4891 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4893 bp->set_mac_pending++;
4896 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4897 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4900 /* Wait for a completion */
4901 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4906 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4907 * MAC(s). This function will wait until the ramdord completion
4910 * @param bp driver handle
4911 * @param set set or clear the CAM entry
4913 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4915 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4917 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4919 bp->set_mac_pending++;
4922 /* Send a SET_MAC ramrod */
4924 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4925 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4928 /* CAM allocation for E1H
4929 * unicasts: by func number
4930 * multicast: 20+FUNC*20, 20 each
4932 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4933 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4935 /* Wait for a completion when setting */
4936 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4942 int bnx2x_setup_leading(struct bnx2x *bp)
4946 /* reset IGU state */
4947 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4950 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4952 /* Wait for completion */
4953 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4958 int bnx2x_setup_multi(struct bnx2x *bp, int index)
4960 struct bnx2x_fastpath *fp = &bp->fp[index];
4962 /* reset IGU state */
4963 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4966 fp->state = BNX2X_FP_STATE_OPENING;
4967 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4970 /* Wait for completion */
4971 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
4976 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
4979 switch (bp->multi_mode) {
4980 case ETH_RSS_MODE_DISABLED:
4984 case ETH_RSS_MODE_REGULAR:
4986 bp->num_queues = min_t(u32, num_queues,
4987 BNX2X_MAX_QUEUES(bp));
4989 bp->num_queues = min_t(u32, num_online_cpus(),
4990 BNX2X_MAX_QUEUES(bp));
5002 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5004 struct bnx2x_fastpath *fp = &bp->fp[index];
5007 /* halt the connection */
5008 fp->state = BNX2X_FP_STATE_HALTING;
5009 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5011 /* Wait for completion */
5012 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5014 if (rc) /* timeout */
5017 /* delete cfc entry */
5018 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5020 /* Wait for completion */
5021 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5026 static int bnx2x_stop_leading(struct bnx2x *bp)
5028 __le16 dsb_sp_prod_idx;
5029 /* if the other port is handling traffic,
5030 this can take a lot of time */
5036 /* Send HALT ramrod */
5037 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5038 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5040 /* Wait for completion */
5041 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5042 &(bp->fp[0].state), 1);
5043 if (rc) /* timeout */
5046 dsb_sp_prod_idx = *bp->dsb_sp_prod;
5048 /* Send PORT_DELETE ramrod */
5049 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5051 /* Wait for completion to arrive on default status block
5052 we are going to reset the chip anyway
5053 so there is not much to do if this times out
5055 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5057 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5058 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5059 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5060 #ifdef BNX2X_STOP_ON_ERROR
5068 rmb(); /* Refresh the dsb_sp_prod */
5070 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5071 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5076 static void bnx2x_reset_func(struct bnx2x *bp)
5078 int port = BP_PORT(bp);
5079 int func = BP_FUNC(bp);
5083 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5084 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5087 /* Disable Timer scan */
5088 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5090 * Wait for at least 10ms and up to 2 second for the timers scan to
5093 for (i = 0; i < 200; i++) {
5095 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5100 base = FUNC_ILT_BASE(func);
5101 for (i = base; i < base + ILT_PER_FUNC; i++)
5102 bnx2x_ilt_wr(bp, i, 0);
5105 static void bnx2x_reset_port(struct bnx2x *bp)
5107 int port = BP_PORT(bp);
5110 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5112 /* Do not rcv packets to BRB */
5113 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5114 /* Do not direct rcv packets that are not for MCP to the BRB */
5115 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5116 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5119 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5122 /* Check for BRB port occupancy */
5123 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5125 DP(NETIF_MSG_IFDOWN,
5126 "BRB1 is not empty %d blocks are occupied\n", val);
5128 /* TODO: Close Doorbell port? */
5131 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5133 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5134 BP_FUNC(bp), reset_code);
5136 switch (reset_code) {
5137 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5138 bnx2x_reset_port(bp);
5139 bnx2x_reset_func(bp);
5140 bnx2x_reset_common(bp);
5143 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5144 bnx2x_reset_port(bp);
5145 bnx2x_reset_func(bp);
5148 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5149 bnx2x_reset_func(bp);
5153 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5158 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5160 int port = BP_PORT(bp);
5164 /* Wait until tx fastpath tasks complete */
5165 for_each_queue(bp, i) {
5166 struct bnx2x_fastpath *fp = &bp->fp[i];
5169 while (bnx2x_has_tx_work_unload(fp)) {
5173 BNX2X_ERR("timeout waiting for queue[%d]\n",
5175 #ifdef BNX2X_STOP_ON_ERROR
5186 /* Give HW time to discard old tx messages */
5189 if (CHIP_IS_E1(bp)) {
5190 struct mac_configuration_cmd *config =
5191 bnx2x_sp(bp, mcast_config);
5193 bnx2x_set_eth_mac_addr_e1(bp, 0);
5195 for (i = 0; i < config->hdr.length; i++)
5196 CAM_INVALIDATE(config->config_table[i]);
5198 config->hdr.length = i;
5199 if (CHIP_REV_IS_SLOW(bp))
5200 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5202 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5203 config->hdr.client_id = bp->fp->cl_id;
5204 config->hdr.reserved1 = 0;
5206 bp->set_mac_pending++;
5209 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5210 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5211 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5214 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5216 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5218 for (i = 0; i < MC_HASH_SIZE; i++)
5219 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5221 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5224 /* Clear iSCSI L2 MAC */
5225 mutex_lock(&bp->cnic_mutex);
5226 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5227 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5228 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5230 mutex_unlock(&bp->cnic_mutex);
5233 if (unload_mode == UNLOAD_NORMAL)
5234 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5236 else if (bp->flags & NO_WOL_FLAG)
5237 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5240 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5241 u8 *mac_addr = bp->dev->dev_addr;
5243 /* The mac address is written to entries 1-4 to
5244 preserve entry 0 which is used by the PMF */
5245 u8 entry = (BP_E1HVN(bp) + 1)*8;
5247 val = (mac_addr[0] << 8) | mac_addr[1];
5248 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5250 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5251 (mac_addr[4] << 8) | mac_addr[5];
5252 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5254 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5257 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5259 /* Close multi and leading connections
5260 Completions for ramrods are collected in a synchronous way */
5261 for_each_nondefault_queue(bp, i)
5262 if (bnx2x_stop_multi(bp, i))
5265 rc = bnx2x_stop_leading(bp);
5267 BNX2X_ERR("Stop leading failed!\n");
5268 #ifdef BNX2X_STOP_ON_ERROR
5277 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5279 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
5280 load_count[0], load_count[1], load_count[2]);
5282 load_count[1 + port]--;
5283 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
5284 load_count[0], load_count[1], load_count[2]);
5285 if (load_count[0] == 0)
5286 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5287 else if (load_count[1 + port] == 0)
5288 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5290 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5293 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5294 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5295 bnx2x__link_reset(bp);
5297 /* Reset the chip */
5298 bnx2x_reset_chip(bp, reset_code);
5300 /* Report UNLOAD_DONE to MCP */
5302 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5306 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5310 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5312 if (CHIP_IS_E1(bp)) {
5313 int port = BP_PORT(bp);
5314 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5315 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5317 val = REG_RD(bp, addr);
5319 REG_WR(bp, addr, val);
5320 } else if (CHIP_IS_E1H(bp)) {
5321 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5322 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5323 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5324 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5329 /* Close gates #2, #3 and #4: */
5330 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5334 /* Gates #2 and #4a are closed/opened for "not E1" only */
5335 if (!CHIP_IS_E1(bp)) {
5337 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5338 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5339 close ? (val | 0x1) : (val & (~(u32)1)));
5341 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5342 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5343 close ? (val | 0x1) : (val & (~(u32)1)));
5347 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5348 val = REG_RD(bp, addr);
5349 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5351 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5352 close ? "closing" : "opening");
5356 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5358 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5360 /* Do some magic... */
5361 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5362 *magic_val = val & SHARED_MF_CLP_MAGIC;
5363 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5366 /* Restore the value of the `magic' bit.
5368 * @param pdev Device handle.
5369 * @param magic_val Old value of the `magic' bit.
5371 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5373 /* Restore the `magic' bit value... */
5374 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5375 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5376 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5377 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5378 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5379 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5382 /* Prepares for MCP reset: takes care of CLP configurations.
5385 * @param magic_val Old value of 'magic' bit.
5387 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5390 u32 validity_offset;
5392 DP(NETIF_MSG_HW, "Starting\n");
5394 /* Set `magic' bit in order to save MF config */
5395 if (!CHIP_IS_E1(bp))
5396 bnx2x_clp_reset_prep(bp, magic_val);
5398 /* Get shmem offset */
5399 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5400 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5402 /* Clear validity map flags */
5404 REG_WR(bp, shmem + validity_offset, 0);
5407 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5408 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
5410 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5411 * depending on the HW type.
5415 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5417 /* special handling for emulation and FPGA,
5418 wait 10 times longer */
5419 if (CHIP_REV_IS_SLOW(bp))
5420 msleep(MCP_ONE_TIMEOUT*10);
5422 msleep(MCP_ONE_TIMEOUT);
5425 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5427 u32 shmem, cnt, validity_offset, val;
5432 /* Get shmem offset */
5433 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5435 BNX2X_ERR("Shmem 0 return failure\n");
5440 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5442 /* Wait for MCP to come up */
5443 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5444 /* TBD: its best to check validity map of last port.
5445 * currently checks on port 0.
5447 val = REG_RD(bp, shmem + validity_offset);
5448 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5449 shmem + validity_offset, val);
5451 /* check that shared memory is valid. */
5452 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5453 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5456 bnx2x_mcp_wait_one(bp);
5459 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5461 /* Check that shared memory is valid. This indicates that MCP is up. */
5462 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5463 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5464 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5470 /* Restore the `magic' bit value */
5471 if (!CHIP_IS_E1(bp))
5472 bnx2x_clp_reset_done(bp, magic_val);
5477 static void bnx2x_pxp_prep(struct bnx2x *bp)
5479 if (!CHIP_IS_E1(bp)) {
5480 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5481 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5482 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5488 * Reset the whole chip except for:
5490 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5493 * - MISC (including AEU)
5497 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5499 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5502 MISC_REGISTERS_RESET_REG_1_RST_HC |
5503 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5504 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5507 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5508 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5509 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5510 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5511 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5512 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5513 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5514 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5516 reset_mask1 = 0xffffffff;
5519 reset_mask2 = 0xffff;
5521 reset_mask2 = 0x1ffff;
5523 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5524 reset_mask1 & (~not_reset_mask1));
5525 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5526 reset_mask2 & (~not_reset_mask2));
5531 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5532 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5536 static int bnx2x_process_kill(struct bnx2x *bp)
5540 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5543 /* Empty the Tetris buffer, wait for 1s */
5545 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5546 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5547 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5548 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5549 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5550 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5551 ((port_is_idle_0 & 0x1) == 0x1) &&
5552 ((port_is_idle_1 & 0x1) == 0x1) &&
5553 (pgl_exp_rom2 == 0xffffffff))
5556 } while (cnt-- > 0);
5559 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5561 " outstanding read requests after 1s!\n");
5562 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5563 " port_is_idle_0=0x%08x,"
5564 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5565 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5572 /* Close gates #2, #3 and #4 */
5573 bnx2x_set_234_gates(bp, true);
5575 /* TBD: Indicate that "process kill" is in progress to MCP */
5577 /* Clear "unprepared" bit */
5578 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5581 /* Make sure all is written to the chip before the reset */
5584 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5585 * PSWHST, GRC and PSWRD Tetris buffer.
5589 /* Prepare to chip reset: */
5591 bnx2x_reset_mcp_prep(bp, &val);
5597 /* reset the chip */
5598 bnx2x_process_kill_chip_reset(bp);
5601 /* Recover after reset: */
5603 if (bnx2x_reset_mcp_comp(bp, val))
5609 /* Open the gates #2, #3 and #4 */
5610 bnx2x_set_234_gates(bp, false);
5612 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5613 * reset state, re-enable attentions. */
5618 static int bnx2x_leader_reset(struct bnx2x *bp)
5621 /* Try to recover after the failure */
5622 if (bnx2x_process_kill(bp)) {
5623 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5626 goto exit_leader_reset;
5629 /* Clear "reset is in progress" bit and update the driver state */
5630 bnx2x_set_reset_done(bp);
5631 bp->recovery_state = BNX2X_RECOVERY_DONE;
5635 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5640 /* Assumption: runs under rtnl lock. This together with the fact
5641 * that it's called only from bnx2x_reset_task() ensure that it
5642 * will never be called when netif_running(bp->dev) is false.
5644 static void bnx2x_parity_recover(struct bnx2x *bp)
5646 DP(NETIF_MSG_HW, "Handling parity\n");
5648 switch (bp->recovery_state) {
5649 case BNX2X_RECOVERY_INIT:
5650 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5651 /* Try to get a LEADER_LOCK HW lock */
5652 if (bnx2x_trylock_hw_lock(bp,
5653 HW_LOCK_RESOURCE_RESERVED_08))
5656 /* Stop the driver */
5657 /* If interface has been removed - break */
5658 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5661 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5662 /* Ensure "is_leader" and "recovery_state"
5663 * update values are seen on other CPUs
5668 case BNX2X_RECOVERY_WAIT:
5669 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5670 if (bp->is_leader) {
5671 u32 load_counter = bnx2x_get_load_cnt(bp);
5673 /* Wait until all other functions get
5676 schedule_delayed_work(&bp->reset_task,
5680 /* If all other functions got down -
5681 * try to bring the chip back to
5682 * normal. In any case it's an exit
5683 * point for a leader.
5685 if (bnx2x_leader_reset(bp) ||
5686 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5687 printk(KERN_ERR"%s: Recovery "
5688 "has failed. Power cycle is "
5689 "needed.\n", bp->dev->name);
5690 /* Disconnect this device */
5691 netif_device_detach(bp->dev);
5692 /* Block ifup for all function
5693 * of this ASIC until
5694 * "process kill" or power
5697 bnx2x_set_reset_in_progress(bp);
5698 /* Shut down the power */
5699 bnx2x_set_power_state(bp,
5706 } else { /* non-leader */
5707 if (!bnx2x_reset_is_done(bp)) {
5708 /* Try to get a LEADER_LOCK HW lock as
5709 * long as a former leader may have
5710 * been unloaded by the user or
5711 * released a leadership by another
5714 if (bnx2x_trylock_hw_lock(bp,
5715 HW_LOCK_RESOURCE_RESERVED_08)) {
5716 /* I'm a leader now! Restart a
5723 schedule_delayed_work(&bp->reset_task,
5727 } else { /* A leader has completed
5728 * the "process kill". It's an exit
5729 * point for a non-leader.
5731 bnx2x_nic_load(bp, LOAD_NORMAL);
5732 bp->recovery_state =
5733 BNX2X_RECOVERY_DONE;
5744 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5745 * scheduled on a general queue in order to prevent a dead lock.
5747 static void bnx2x_reset_task(struct work_struct *work)
5749 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5751 #ifdef BNX2X_STOP_ON_ERROR
5752 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5753 " so reset not done to allow debug dump,\n"
5754 KERN_ERR " you will need to reboot when done\n");
5760 if (!netif_running(bp->dev))
5761 goto reset_task_exit;
5763 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5764 bnx2x_parity_recover(bp);
5766 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5767 bnx2x_nic_load(bp, LOAD_NORMAL);
5774 /* end of nic load/unload */
5777 * Init service functions
5780 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5783 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5784 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5785 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5786 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5787 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5788 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5789 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5790 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5792 BNX2X_ERR("Unsupported function index: %d\n", func);
5797 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5799 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5801 /* Flush all outstanding writes */
5804 /* Pretend to be function 0 */
5806 /* Flush the GRC transaction (in the chip) */
5807 new_val = REG_RD(bp, reg);
5809 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5814 /* From now we are in the "like-E1" mode */
5815 bnx2x_int_disable(bp);
5817 /* Flush all outstanding writes */
5820 /* Restore the original funtion settings */
5821 REG_WR(bp, reg, orig_func);
5822 new_val = REG_RD(bp, reg);
5823 if (new_val != orig_func) {
5824 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5825 orig_func, new_val);
5830 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5832 if (CHIP_IS_E1H(bp))
5833 bnx2x_undi_int_disable_e1h(bp, func);
5835 bnx2x_int_disable(bp);
5838 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5842 /* Check if there is any driver already loaded */
5843 val = REG_RD(bp, MISC_REG_UNPREPARED);
5845 /* Check if it is the UNDI driver
5846 * UNDI driver initializes CID offset for normal bell to 0x7
5848 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5849 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5851 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5853 int func = BP_FUNC(bp);
5857 /* clear the UNDI indication */
5858 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5860 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5862 /* try unload UNDI on port 0 */
5865 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5866 DRV_MSG_SEQ_NUMBER_MASK);
5867 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5869 /* if UNDI is loaded on the other port */
5870 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5872 /* send "DONE" for previous unload */
5873 bnx2x_fw_command(bp,
5874 DRV_MSG_CODE_UNLOAD_DONE, 0);
5876 /* unload UNDI on port 1 */
5879 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5880 DRV_MSG_SEQ_NUMBER_MASK);
5881 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5883 bnx2x_fw_command(bp, reset_code, 0);
5886 /* now it's safe to release the lock */
5887 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5889 bnx2x_undi_int_disable(bp, func);
5891 /* close input traffic and wait for it */
5892 /* Do not rcv packets to BRB */
5894 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5895 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5896 /* Do not direct rcv packets that are not for MCP to
5899 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5900 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5903 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5904 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5907 /* save NIG port swap info */
5908 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5909 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5912 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5915 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5917 /* take the NIG out of reset and restore swap values */
5919 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5920 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5921 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5922 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5924 /* send unload done to the MCP */
5925 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5927 /* restore our func and fw_seq */
5930 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5931 DRV_MSG_SEQ_NUMBER_MASK);
5934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5938 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5940 u32 val, val2, val3, val4, id;
5943 /* Get the chip revision id and number. */
5944 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5945 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5946 id = ((val & 0xffff) << 16);
5947 val = REG_RD(bp, MISC_REG_CHIP_REV);
5948 id |= ((val & 0xf) << 12);
5949 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5950 id |= ((val & 0xff) << 4);
5951 val = REG_RD(bp, MISC_REG_BOND_ID);
5953 bp->common.chip_id = id;
5954 bp->link_params.chip_id = bp->common.chip_id;
5955 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5957 val = (REG_RD(bp, 0x2874) & 0x55);
5958 if ((bp->common.chip_id & 0x1) ||
5959 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5960 bp->flags |= ONE_PORT_FLAG;
5961 BNX2X_DEV_INFO("single port device\n");
5964 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5965 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5966 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5967 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5968 bp->common.flash_size, bp->common.flash_size);
5970 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5971 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5972 bp->link_params.shmem_base = bp->common.shmem_base;
5973 bp->link_params.shmem2_base = bp->common.shmem2_base;
5974 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
5975 bp->common.shmem_base, bp->common.shmem2_base);
5977 if (!bp->common.shmem_base ||
5978 (bp->common.shmem_base < 0xA0000) ||
5979 (bp->common.shmem_base >= 0xC0000)) {
5980 BNX2X_DEV_INFO("MCP not active\n");
5981 bp->flags |= NO_MCP_FLAG;
5985 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
5986 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5987 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5988 BNX2X_ERROR("BAD MCP validity signature\n");
5990 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
5991 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
5993 bp->link_params.hw_led_mode = ((bp->common.hw_config &
5994 SHARED_HW_CFG_LED_MODE_MASK) >>
5995 SHARED_HW_CFG_LED_MODE_SHIFT);
5997 bp->link_params.feature_config_flags = 0;
5998 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
5999 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6000 bp->link_params.feature_config_flags |=
6001 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6003 bp->link_params.feature_config_flags &=
6004 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6006 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6007 bp->common.bc_ver = val;
6008 BNX2X_DEV_INFO("bc_ver %X\n", val);
6009 if (val < BNX2X_BC_VER) {
6010 /* for now only warn
6011 * later we might need to enforce this */
6012 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6013 "please upgrade BC\n", BNX2X_BC_VER, val);
6015 bp->link_params.feature_config_flags |=
6016 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
6017 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6018 bp->link_params.feature_config_flags |=
6019 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6020 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
6022 if (BP_E1HVN(bp) == 0) {
6023 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6024 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6026 /* no WOL capability for E1HVN != 0 */
6027 bp->flags |= NO_WOL_FLAG;
6029 BNX2X_DEV_INFO("%sWoL capable\n",
6030 (bp->flags & NO_WOL_FLAG) ? "not " : "");
6032 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6033 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6034 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6035 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6037 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6038 val, val2, val3, val4);
6041 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6044 int cfg_size = 0, idx, port = BP_PORT(bp);
6046 /* Aggregation of supported attributes of all external phys */
6047 bp->port.supported[0] = 0;
6048 bp->port.supported[1] = 0;
6049 switch (bp->link_params.num_phys) {
6051 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6055 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6059 if (bp->link_params.multi_phy_config &
6060 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
6061 bp->port.supported[1] =
6062 bp->link_params.phy[EXT_PHY1].supported;
6063 bp->port.supported[0] =
6064 bp->link_params.phy[EXT_PHY2].supported;
6066 bp->port.supported[0] =
6067 bp->link_params.phy[EXT_PHY1].supported;
6068 bp->port.supported[1] =
6069 bp->link_params.phy[EXT_PHY2].supported;
6075 if (!(bp->port.supported[0] || bp->port.supported[1])) {
6076 BNX2X_ERR("NVRAM config error. BAD phy config."
6077 "PHY1 config 0x%x, PHY2 config 0x%x\n",
6079 dev_info.port_hw_config[port].external_phy_config),
6081 dev_info.port_hw_config[port].external_phy_config2));
6085 switch (switch_cfg) {
6087 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6089 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6092 case SWITCH_CFG_10G:
6093 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6095 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6100 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6101 bp->port.link_config[0]);
6104 /* mask what we support according to speed_cap_mask per configuration */
6105 for (idx = 0; idx < cfg_size; idx++) {
6106 if (!(bp->link_params.speed_cap_mask[idx] &
6107 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6108 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
6110 if (!(bp->link_params.speed_cap_mask[idx] &
6111 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6112 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
6114 if (!(bp->link_params.speed_cap_mask[idx] &
6115 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6116 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
6118 if (!(bp->link_params.speed_cap_mask[idx] &
6119 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6120 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
6122 if (!(bp->link_params.speed_cap_mask[idx] &
6123 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6124 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
6125 SUPPORTED_1000baseT_Full);
6127 if (!(bp->link_params.speed_cap_mask[idx] &
6128 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6129 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
6131 if (!(bp->link_params.speed_cap_mask[idx] &
6132 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6133 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
6137 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
6138 bp->port.supported[1]);
6141 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6143 u32 link_config, idx, cfg_size = 0;
6144 bp->port.advertising[0] = 0;
6145 bp->port.advertising[1] = 0;
6146 switch (bp->link_params.num_phys) {
6155 for (idx = 0; idx < cfg_size; idx++) {
6156 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6157 link_config = bp->port.link_config[idx];
6158 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6159 case PORT_FEATURE_LINK_SPEED_AUTO:
6160 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6161 bp->link_params.req_line_speed[idx] =
6163 bp->port.advertising[idx] |=
6164 bp->port.supported[idx];
6166 /* force 10G, no AN */
6167 bp->link_params.req_line_speed[idx] =
6169 bp->port.advertising[idx] |=
6170 (ADVERTISED_10000baseT_Full |
6176 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6177 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6178 bp->link_params.req_line_speed[idx] =
6180 bp->port.advertising[idx] |=
6181 (ADVERTISED_10baseT_Full |
6184 BNX2X_ERROR("NVRAM config error. "
6185 "Invalid link_config 0x%x"
6186 " speed_cap_mask 0x%x\n",
6188 bp->link_params.speed_cap_mask[idx]);
6193 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6194 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6195 bp->link_params.req_line_speed[idx] =
6197 bp->link_params.req_duplex[idx] =
6199 bp->port.advertising[idx] |=
6200 (ADVERTISED_10baseT_Half |
6203 BNX2X_ERROR("NVRAM config error. "
6204 "Invalid link_config 0x%x"
6205 " speed_cap_mask 0x%x\n",
6207 bp->link_params.speed_cap_mask[idx]);
6212 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6213 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
6214 bp->link_params.req_line_speed[idx] =
6216 bp->port.advertising[idx] |=
6217 (ADVERTISED_100baseT_Full |
6220 BNX2X_ERROR("NVRAM config error. "
6221 "Invalid link_config 0x%x"
6222 " speed_cap_mask 0x%x\n",
6224 bp->link_params.speed_cap_mask[idx]);
6229 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6230 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
6231 bp->link_params.req_line_speed[idx] = SPEED_100;
6232 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
6233 bp->port.advertising[idx] |=
6234 (ADVERTISED_100baseT_Half |
6237 BNX2X_ERROR("NVRAM config error. "
6238 "Invalid link_config 0x%x"
6239 " speed_cap_mask 0x%x\n",
6241 bp->link_params.speed_cap_mask[idx]);
6246 case PORT_FEATURE_LINK_SPEED_1G:
6247 if (bp->port.supported[idx] &
6248 SUPPORTED_1000baseT_Full) {
6249 bp->link_params.req_line_speed[idx] =
6251 bp->port.advertising[idx] |=
6252 (ADVERTISED_1000baseT_Full |
6255 BNX2X_ERROR("NVRAM config error. "
6256 "Invalid link_config 0x%x"
6257 " speed_cap_mask 0x%x\n",
6259 bp->link_params.speed_cap_mask[idx]);
6264 case PORT_FEATURE_LINK_SPEED_2_5G:
6265 if (bp->port.supported[idx] &
6266 SUPPORTED_2500baseX_Full) {
6267 bp->link_params.req_line_speed[idx] =
6269 bp->port.advertising[idx] |=
6270 (ADVERTISED_2500baseX_Full |
6273 BNX2X_ERROR("NVRAM config error. "
6274 "Invalid link_config 0x%x"
6275 " speed_cap_mask 0x%x\n",
6277 bp->link_params.speed_cap_mask[idx]);
6282 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6283 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6284 case PORT_FEATURE_LINK_SPEED_10G_KR:
6285 if (bp->port.supported[idx] &
6286 SUPPORTED_10000baseT_Full) {
6287 bp->link_params.req_line_speed[idx] =
6289 bp->port.advertising[idx] |=
6290 (ADVERTISED_10000baseT_Full |
6293 BNX2X_ERROR("NVRAM config error. "
6294 "Invalid link_config 0x%x"
6295 " speed_cap_mask 0x%x\n",
6297 bp->link_params.speed_cap_mask[idx]);
6303 BNX2X_ERROR("NVRAM config error. "
6304 "BAD link speed link_config 0x%x\n",
6306 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
6307 bp->port.advertising[idx] = bp->port.supported[idx];
6311 bp->link_params.req_flow_ctrl[idx] = (link_config &
6312 PORT_FEATURE_FLOW_CONTROL_MASK);
6313 if ((bp->link_params.req_flow_ctrl[idx] ==
6314 BNX2X_FLOW_CTRL_AUTO) &&
6315 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
6316 bp->link_params.req_flow_ctrl[idx] =
6317 BNX2X_FLOW_CTRL_NONE;
6320 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
6321 " 0x%x advertising 0x%x\n",
6322 bp->link_params.req_line_speed[idx],
6323 bp->link_params.req_duplex[idx],
6324 bp->link_params.req_flow_ctrl[idx],
6325 bp->port.advertising[idx]);
6329 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6331 mac_hi = cpu_to_be16(mac_hi);
6332 mac_lo = cpu_to_be32(mac_lo);
6333 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6334 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6337 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6339 int port = BP_PORT(bp);
6342 u32 ext_phy_type, ext_phy_config;;
6344 bp->link_params.bp = bp;
6345 bp->link_params.port = port;
6347 bp->link_params.lane_config =
6348 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6350 bp->link_params.speed_cap_mask[0] =
6352 dev_info.port_hw_config[port].speed_capability_mask);
6353 bp->link_params.speed_cap_mask[1] =
6355 dev_info.port_hw_config[port].speed_capability_mask2);
6356 bp->port.link_config[0] =
6357 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6359 bp->port.link_config[1] =
6360 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
6362 bp->link_params.multi_phy_config =
6363 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
6364 /* If the device is capable of WoL, set the default state according
6367 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6368 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6369 (config & PORT_FEATURE_WOL_ENABLED));
6371 BNX2X_DEV_INFO("lane_config 0x%08x"
6372 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
6373 bp->link_params.lane_config,
6374 bp->link_params.speed_cap_mask[0],
6375 bp->port.link_config[0]);
6377 bp->link_params.switch_cfg = (bp->port.link_config[0] &
6378 PORT_FEATURE_CONNECTED_SWITCH_MASK);
6379 bnx2x_phy_probe(&bp->link_params);
6380 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6382 bnx2x_link_settings_requested(bp);
6385 * If connected directly, work with the internal PHY, otherwise, work
6386 * with the external PHY
6390 dev_info.port_hw_config[port].external_phy_config);
6391 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6392 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6393 bp->mdio.prtad = bp->port.phy_addr;
6395 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6396 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6398 XGXS_EXT_PHY_ADDR(ext_phy_config);
6400 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6401 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6402 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6403 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6404 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6407 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6408 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6409 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6413 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6415 int func = BP_FUNC(bp);
6419 bnx2x_get_common_hwinfo(bp);
6423 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6425 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6427 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6428 FUNC_MF_CFG_E1HOV_TAG_MASK);
6429 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6431 BNX2X_DEV_INFO("%s function mode\n",
6432 IS_E1HMF(bp) ? "multi" : "single");
6435 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6437 FUNC_MF_CFG_E1HOV_TAG_MASK);
6438 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6440 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6442 func, bp->e1hov, bp->e1hov);
6444 BNX2X_ERROR("No valid E1HOV for func %d,"
6445 " aborting\n", func);
6450 BNX2X_ERROR("VN %d in single function mode,"
6451 " aborting\n", BP_E1HVN(bp));
6457 if (!BP_NOMCP(bp)) {
6458 bnx2x_get_port_hwinfo(bp);
6460 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6461 DRV_MSG_SEQ_NUMBER_MASK);
6462 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6466 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6467 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6468 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6469 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6470 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6471 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6472 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6473 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6474 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6475 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6476 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6478 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6486 /* only supposed to happen on emulation/FPGA */
6487 BNX2X_ERROR("warning: random MAC workaround active\n");
6488 random_ether_addr(bp->dev->dev_addr);
6489 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6495 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6497 int cnt, i, block_end, rodi;
6498 char vpd_data[BNX2X_VPD_LEN+1];
6499 char str_id_reg[VENDOR_ID_LEN+1];
6500 char str_id_cap[VENDOR_ID_LEN+1];
6503 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6504 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6506 if (cnt < BNX2X_VPD_LEN)
6509 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6510 PCI_VPD_LRDT_RO_DATA);
6515 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6516 pci_vpd_lrdt_size(&vpd_data[i]);
6518 i += PCI_VPD_LRDT_TAG_SIZE;
6520 if (block_end > BNX2X_VPD_LEN)
6523 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6524 PCI_VPD_RO_KEYWORD_MFR_ID);
6528 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6530 if (len != VENDOR_ID_LEN)
6533 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6535 /* vendor specific info */
6536 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6537 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6538 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6539 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6541 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6542 PCI_VPD_RO_KEYWORD_VENDOR0);
6544 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6546 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6548 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6549 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6550 bp->fw_ver[len] = ' ';
6559 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6561 int func = BP_FUNC(bp);
6565 /* Disable interrupt handling until HW is initialized */
6566 atomic_set(&bp->intr_sem, 1);
6567 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6569 mutex_init(&bp->port.phy_mutex);
6570 mutex_init(&bp->fw_mb_mutex);
6571 spin_lock_init(&bp->stats_lock);
6573 mutex_init(&bp->cnic_mutex);
6576 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6577 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6579 rc = bnx2x_get_hwinfo(bp);
6581 bnx2x_read_fwinfo(bp);
6582 /* need to reset chip if undi was active */
6584 bnx2x_undi_unload(bp);
6586 if (CHIP_REV_IS_FPGA(bp))
6587 dev_err(&bp->pdev->dev, "FPGA detected\n");
6589 if (BP_NOMCP(bp) && (func == 0))
6590 dev_err(&bp->pdev->dev, "MCP disabled, "
6591 "must load devices in order!\n");
6593 /* Set multi queue mode */
6594 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6595 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6596 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6597 "requested is not MSI-X\n");
6598 multi_mode = ETH_RSS_MODE_DISABLED;
6600 bp->multi_mode = multi_mode;
6601 bp->int_mode = int_mode;
6603 bp->dev->features |= NETIF_F_GRO;
6607 bp->flags &= ~TPA_ENABLE_FLAG;
6608 bp->dev->features &= ~NETIF_F_LRO;
6610 bp->flags |= TPA_ENABLE_FLAG;
6611 bp->dev->features |= NETIF_F_LRO;
6613 bp->disable_tpa = disable_tpa;
6616 bp->dropless_fc = 0;
6618 bp->dropless_fc = dropless_fc;
6622 bp->tx_ring_size = MAX_TX_AVAIL;
6623 bp->rx_ring_size = MAX_RX_AVAIL;
6627 /* make sure that the numbers are in the right granularity */
6628 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6629 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6631 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6632 bp->current_interval = (poll ? poll : timer_interval);
6634 init_timer(&bp->timer);
6635 bp->timer.expires = jiffies + bp->current_interval;
6636 bp->timer.data = (unsigned long) bp;
6637 bp->timer.function = bnx2x_timer;
6643 /****************************************************************************
6644 * General service functions
6645 ****************************************************************************/
6647 /* called with rtnl_lock */
6648 static int bnx2x_open(struct net_device *dev)
6650 struct bnx2x *bp = netdev_priv(dev);
6652 netif_carrier_off(dev);
6654 bnx2x_set_power_state(bp, PCI_D0);
6656 if (!bnx2x_reset_is_done(bp)) {
6658 /* Reset MCP mail box sequence if there is on going
6663 /* If it's the first function to load and reset done
6664 * is still not cleared it may mean that. We don't
6665 * check the attention state here because it may have
6666 * already been cleared by a "common" reset but we
6667 * shell proceed with "process kill" anyway.
6669 if ((bnx2x_get_load_cnt(bp) == 0) &&
6670 bnx2x_trylock_hw_lock(bp,
6671 HW_LOCK_RESOURCE_RESERVED_08) &&
6672 (!bnx2x_leader_reset(bp))) {
6673 DP(NETIF_MSG_HW, "Recovered in open\n");
6677 bnx2x_set_power_state(bp, PCI_D3hot);
6679 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6680 " completed yet. Try again later. If u still see this"
6681 " message after a few retries then power cycle is"
6682 " required.\n", bp->dev->name);
6688 bp->recovery_state = BNX2X_RECOVERY_DONE;
6690 return bnx2x_nic_load(bp, LOAD_OPEN);
6693 /* called with rtnl_lock */
6694 static int bnx2x_close(struct net_device *dev)
6696 struct bnx2x *bp = netdev_priv(dev);
6698 /* Unload the driver, release IRQs */
6699 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6700 bnx2x_set_power_state(bp, PCI_D3hot);
6705 /* called with netif_tx_lock from dev_mcast.c */
6706 void bnx2x_set_rx_mode(struct net_device *dev)
6708 struct bnx2x *bp = netdev_priv(dev);
6709 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6710 int port = BP_PORT(bp);
6712 if (bp->state != BNX2X_STATE_OPEN) {
6713 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6717 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6719 if (dev->flags & IFF_PROMISC)
6720 rx_mode = BNX2X_RX_MODE_PROMISC;
6722 else if ((dev->flags & IFF_ALLMULTI) ||
6723 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6725 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6727 else { /* some multicasts */
6728 if (CHIP_IS_E1(bp)) {
6730 struct netdev_hw_addr *ha;
6731 struct mac_configuration_cmd *config =
6732 bnx2x_sp(bp, mcast_config);
6735 netdev_for_each_mc_addr(ha, dev) {
6736 config->config_table[i].
6737 cam_entry.msb_mac_addr =
6738 swab16(*(u16 *)&ha->addr[0]);
6739 config->config_table[i].
6740 cam_entry.middle_mac_addr =
6741 swab16(*(u16 *)&ha->addr[2]);
6742 config->config_table[i].
6743 cam_entry.lsb_mac_addr =
6744 swab16(*(u16 *)&ha->addr[4]);
6745 config->config_table[i].cam_entry.flags =
6747 config->config_table[i].
6748 target_table_entry.flags = 0;
6749 config->config_table[i].target_table_entry.
6750 clients_bit_vector =
6751 cpu_to_le32(1 << BP_L_ID(bp));
6752 config->config_table[i].
6753 target_table_entry.vlan_id = 0;
6756 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6757 config->config_table[i].
6758 cam_entry.msb_mac_addr,
6759 config->config_table[i].
6760 cam_entry.middle_mac_addr,
6761 config->config_table[i].
6762 cam_entry.lsb_mac_addr);
6765 old = config->hdr.length;
6767 for (; i < old; i++) {
6768 if (CAM_IS_INVALID(config->
6770 /* already invalidated */
6774 CAM_INVALIDATE(config->
6779 if (CHIP_REV_IS_SLOW(bp))
6780 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6782 offset = BNX2X_MAX_MULTICAST*(1 + port);
6784 config->hdr.length = i;
6785 config->hdr.offset = offset;
6786 config->hdr.client_id = bp->fp->cl_id;
6787 config->hdr.reserved1 = 0;
6789 bp->set_mac_pending++;
6792 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6793 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6794 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6797 /* Accept one or more multicasts */
6798 struct netdev_hw_addr *ha;
6799 u32 mc_filter[MC_HASH_SIZE];
6800 u32 crc, bit, regidx;
6803 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6805 netdev_for_each_mc_addr(ha, dev) {
6806 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6809 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6810 bit = (crc >> 24) & 0xff;
6813 mc_filter[regidx] |= (1 << bit);
6816 for (i = 0; i < MC_HASH_SIZE; i++)
6817 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6822 bp->rx_mode = rx_mode;
6823 bnx2x_set_storm_rx_mode(bp);
6827 /* called with rtnl_lock */
6828 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6829 int devad, u16 addr)
6831 struct bnx2x *bp = netdev_priv(netdev);
6835 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6836 prtad, devad, addr);
6838 /* The HW expects different devad if CL22 is used */
6839 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6841 bnx2x_acquire_phy_lock(bp);
6842 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
6843 bnx2x_release_phy_lock(bp);
6844 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
6851 /* called with rtnl_lock */
6852 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6853 u16 addr, u16 value)
6855 struct bnx2x *bp = netdev_priv(netdev);
6858 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
6859 " value 0x%x\n", prtad, devad, addr, value);
6861 /* The HW expects different devad if CL22 is used */
6862 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6864 bnx2x_acquire_phy_lock(bp);
6865 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
6866 bnx2x_release_phy_lock(bp);
6870 /* called with rtnl_lock */
6871 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6873 struct bnx2x *bp = netdev_priv(dev);
6874 struct mii_ioctl_data *mdio = if_mii(ifr);
6876 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
6877 mdio->phy_id, mdio->reg_num, mdio->val_in);
6879 if (!netif_running(dev))
6882 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
6885 #ifdef CONFIG_NET_POLL_CONTROLLER
6886 static void poll_bnx2x(struct net_device *dev)
6888 struct bnx2x *bp = netdev_priv(dev);
6890 disable_irq(bp->pdev->irq);
6891 bnx2x_interrupt(bp->pdev->irq, dev);
6892 enable_irq(bp->pdev->irq);
6896 static const struct net_device_ops bnx2x_netdev_ops = {
6897 .ndo_open = bnx2x_open,
6898 .ndo_stop = bnx2x_close,
6899 .ndo_start_xmit = bnx2x_start_xmit,
6900 .ndo_set_multicast_list = bnx2x_set_rx_mode,
6901 .ndo_set_mac_address = bnx2x_change_mac_addr,
6902 .ndo_validate_addr = eth_validate_addr,
6903 .ndo_do_ioctl = bnx2x_ioctl,
6904 .ndo_change_mtu = bnx2x_change_mtu,
6905 .ndo_tx_timeout = bnx2x_tx_timeout,
6907 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
6909 #ifdef CONFIG_NET_POLL_CONTROLLER
6910 .ndo_poll_controller = poll_bnx2x,
6914 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
6915 struct net_device *dev)
6920 SET_NETDEV_DEV(dev, &pdev->dev);
6921 bp = netdev_priv(dev);
6926 bp->func = PCI_FUNC(pdev->devfn);
6928 rc = pci_enable_device(pdev);
6930 dev_err(&bp->pdev->dev,
6931 "Cannot enable PCI device, aborting\n");
6935 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6936 dev_err(&bp->pdev->dev,
6937 "Cannot find PCI device base address, aborting\n");
6939 goto err_out_disable;
6942 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
6943 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
6944 " base address, aborting\n");
6946 goto err_out_disable;
6949 if (atomic_read(&pdev->enable_cnt) == 1) {
6950 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6952 dev_err(&bp->pdev->dev,
6953 "Cannot obtain PCI resources, aborting\n");
6954 goto err_out_disable;
6957 pci_set_master(pdev);
6958 pci_save_state(pdev);
6961 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6962 if (bp->pm_cap == 0) {
6963 dev_err(&bp->pdev->dev,
6964 "Cannot find power management capability, aborting\n");
6966 goto err_out_release;
6969 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
6970 if (bp->pcie_cap == 0) {
6971 dev_err(&bp->pdev->dev,
6972 "Cannot find PCI Express capability, aborting\n");
6974 goto err_out_release;
6977 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
6978 bp->flags |= USING_DAC_FLAG;
6979 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
6980 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
6981 " failed, aborting\n");
6983 goto err_out_release;
6986 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6987 dev_err(&bp->pdev->dev,
6988 "System does not support DMA, aborting\n");
6990 goto err_out_release;
6993 dev->mem_start = pci_resource_start(pdev, 0);
6994 dev->base_addr = dev->mem_start;
6995 dev->mem_end = pci_resource_end(pdev, 0);
6997 dev->irq = pdev->irq;
6999 bp->regview = pci_ioremap_bar(pdev, 0);
7001 dev_err(&bp->pdev->dev,
7002 "Cannot map register space, aborting\n");
7004 goto err_out_release;
7007 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7008 min_t(u64, BNX2X_DB_SIZE,
7009 pci_resource_len(pdev, 2)));
7010 if (!bp->doorbells) {
7011 dev_err(&bp->pdev->dev,
7012 "Cannot map doorbell space, aborting\n");
7017 bnx2x_set_power_state(bp, PCI_D0);
7019 /* clean indirect addresses */
7020 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7021 PCICFG_VENDOR_ID_OFFSET);
7022 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7023 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7024 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7025 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7027 /* Reset the load counter */
7028 bnx2x_clear_load_cnt(bp);
7030 dev->watchdog_timeo = TX_TIMEOUT;
7032 dev->netdev_ops = &bnx2x_netdev_ops;
7033 bnx2x_set_ethtool_ops(dev);
7034 dev->features |= NETIF_F_SG;
7035 dev->features |= NETIF_F_HW_CSUM;
7036 if (bp->flags & USING_DAC_FLAG)
7037 dev->features |= NETIF_F_HIGHDMA;
7038 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7039 dev->features |= NETIF_F_TSO6;
7041 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7042 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7044 dev->vlan_features |= NETIF_F_SG;
7045 dev->vlan_features |= NETIF_F_HW_CSUM;
7046 if (bp->flags & USING_DAC_FLAG)
7047 dev->vlan_features |= NETIF_F_HIGHDMA;
7048 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7049 dev->vlan_features |= NETIF_F_TSO6;
7052 /* get_port_hwinfo() will set prtad and mmds properly */
7053 bp->mdio.prtad = MDIO_PRTAD_NONE;
7055 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7057 bp->mdio.mdio_read = bnx2x_mdio_read;
7058 bp->mdio.mdio_write = bnx2x_mdio_write;
7064 iounmap(bp->regview);
7067 if (bp->doorbells) {
7068 iounmap(bp->doorbells);
7069 bp->doorbells = NULL;
7073 if (atomic_read(&pdev->enable_cnt) == 1)
7074 pci_release_regions(pdev);
7077 pci_disable_device(pdev);
7078 pci_set_drvdata(pdev, NULL);
7084 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7085 int *width, int *speed)
7087 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7089 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7091 /* return value of 1=2.5GHz 2=5GHz */
7092 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7095 static int bnx2x_check_firmware(struct bnx2x *bp)
7097 const struct firmware *firmware = bp->firmware;
7098 struct bnx2x_fw_file_hdr *fw_hdr;
7099 struct bnx2x_fw_file_section *sections;
7100 u32 offset, len, num_ops;
7105 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7108 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7109 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7111 /* Make sure none of the offsets and sizes make us read beyond
7112 * the end of the firmware data */
7113 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7114 offset = be32_to_cpu(sections[i].offset);
7115 len = be32_to_cpu(sections[i].len);
7116 if (offset + len > firmware->size) {
7117 dev_err(&bp->pdev->dev,
7118 "Section %d length is out of bounds\n", i);
7123 /* Likewise for the init_ops offsets */
7124 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7125 ops_offsets = (u16 *)(firmware->data + offset);
7126 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7128 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7129 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7130 dev_err(&bp->pdev->dev,
7131 "Section offset %d is out of bounds\n", i);
7136 /* Check FW version */
7137 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7138 fw_ver = firmware->data + offset;
7139 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7140 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7141 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7142 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7143 dev_err(&bp->pdev->dev,
7144 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7145 fw_ver[0], fw_ver[1], fw_ver[2],
7146 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7147 BCM_5710_FW_MINOR_VERSION,
7148 BCM_5710_FW_REVISION_VERSION,
7149 BCM_5710_FW_ENGINEERING_VERSION);
7156 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7158 const __be32 *source = (const __be32 *)_source;
7159 u32 *target = (u32 *)_target;
7162 for (i = 0; i < n/4; i++)
7163 target[i] = be32_to_cpu(source[i]);
7167 Ops array is stored in the following format:
7168 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7170 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7172 const __be32 *source = (const __be32 *)_source;
7173 struct raw_op *target = (struct raw_op *)_target;
7176 for (i = 0, j = 0; i < n/8; i++, j += 2) {
7177 tmp = be32_to_cpu(source[j]);
7178 target[i].op = (tmp >> 24) & 0xff;
7179 target[i].offset = tmp & 0xffffff;
7180 target[i].raw_data = be32_to_cpu(source[j + 1]);
7184 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7186 const __be16 *source = (const __be16 *)_source;
7187 u16 *target = (u16 *)_target;
7190 for (i = 0; i < n/2; i++)
7191 target[i] = be16_to_cpu(source[i]);
7194 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7196 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7197 bp->arr = kmalloc(len, GFP_KERNEL); \
7199 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7202 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7203 (u8 *)bp->arr, len); \
7206 int bnx2x_init_firmware(struct bnx2x *bp)
7208 const char *fw_file_name;
7209 struct bnx2x_fw_file_hdr *fw_hdr;
7213 fw_file_name = FW_FILE_NAME_E1;
7214 else if (CHIP_IS_E1H(bp))
7215 fw_file_name = FW_FILE_NAME_E1H;
7217 BNX2X_ERR("Unsupported chip revision\n");
7221 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7223 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7225 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7226 goto request_firmware_exit;
7229 rc = bnx2x_check_firmware(bp);
7231 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7232 goto request_firmware_exit;
7235 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7237 /* Initialize the pointers to the init arrays */
7239 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7242 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7245 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7248 /* STORMs firmware */
7249 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7250 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7251 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7252 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7253 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7254 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7255 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7256 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7257 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7258 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7259 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7260 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7261 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7262 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7263 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7264 be32_to_cpu(fw_hdr->csem_pram_data.offset);
7268 init_offsets_alloc_err:
7269 kfree(bp->init_ops);
7271 kfree(bp->init_data);
7272 request_firmware_exit:
7273 release_firmware(bp->firmware);
7279 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7280 const struct pci_device_id *ent)
7282 struct net_device *dev = NULL;
7284 int pcie_width, pcie_speed;
7287 /* dev zeroed in init_etherdev */
7288 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7290 dev_err(&pdev->dev, "Cannot allocate net device\n");
7294 bp = netdev_priv(dev);
7295 bp->msg_enable = debug;
7297 pci_set_drvdata(pdev, dev);
7299 rc = bnx2x_init_dev(pdev, dev);
7305 rc = bnx2x_init_bp(bp);
7309 rc = register_netdev(dev);
7311 dev_err(&pdev->dev, "Cannot register net device\n");
7315 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7316 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7317 " IRQ %d, ", board_info[ent->driver_data].name,
7318 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7319 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7320 dev->base_addr, bp->pdev->irq);
7321 pr_cont("node addr %pM\n", dev->dev_addr);
7327 iounmap(bp->regview);
7330 iounmap(bp->doorbells);
7334 if (atomic_read(&pdev->enable_cnt) == 1)
7335 pci_release_regions(pdev);
7337 pci_disable_device(pdev);
7338 pci_set_drvdata(pdev, NULL);
7343 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7345 struct net_device *dev = pci_get_drvdata(pdev);
7349 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7352 bp = netdev_priv(dev);
7354 unregister_netdev(dev);
7356 /* Make sure RESET task is not scheduled before continuing */
7357 cancel_delayed_work_sync(&bp->reset_task);
7360 iounmap(bp->regview);
7363 iounmap(bp->doorbells);
7367 if (atomic_read(&pdev->enable_cnt) == 1)
7368 pci_release_regions(pdev);
7370 pci_disable_device(pdev);
7371 pci_set_drvdata(pdev, NULL);
7374 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7378 bp->state = BNX2X_STATE_ERROR;
7380 bp->rx_mode = BNX2X_RX_MODE_NONE;
7382 bnx2x_netif_stop(bp, 0);
7383 netif_carrier_off(bp->dev);
7385 del_timer_sync(&bp->timer);
7386 bp->stats_state = STATS_STATE_DISABLED;
7387 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7390 bnx2x_free_irq(bp, false);
7392 if (CHIP_IS_E1(bp)) {
7393 struct mac_configuration_cmd *config =
7394 bnx2x_sp(bp, mcast_config);
7396 for (i = 0; i < config->hdr.length; i++)
7397 CAM_INVALIDATE(config->config_table[i]);
7400 /* Free SKBs, SGEs, TPA pool and driver internals */
7401 bnx2x_free_skbs(bp);
7402 for_each_queue(bp, i)
7403 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7404 for_each_queue(bp, i)
7405 netif_napi_del(&bnx2x_fp(bp, i, napi));
7408 bp->state = BNX2X_STATE_CLOSED;
7413 static void bnx2x_eeh_recover(struct bnx2x *bp)
7417 mutex_init(&bp->port.phy_mutex);
7419 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7420 bp->link_params.shmem_base = bp->common.shmem_base;
7421 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7423 if (!bp->common.shmem_base ||
7424 (bp->common.shmem_base < 0xA0000) ||
7425 (bp->common.shmem_base >= 0xC0000)) {
7426 BNX2X_DEV_INFO("MCP not active\n");
7427 bp->flags |= NO_MCP_FLAG;
7431 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7432 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7433 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7434 BNX2X_ERR("BAD MCP validity signature\n");
7436 if (!BP_NOMCP(bp)) {
7437 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7438 & DRV_MSG_SEQ_NUMBER_MASK);
7439 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7444 * bnx2x_io_error_detected - called when PCI error is detected
7445 * @pdev: Pointer to PCI device
7446 * @state: The current pci connection state
7448 * This function is called after a PCI bus error affecting
7449 * this device has been detected.
7451 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7452 pci_channel_state_t state)
7454 struct net_device *dev = pci_get_drvdata(pdev);
7455 struct bnx2x *bp = netdev_priv(dev);
7459 netif_device_detach(dev);
7461 if (state == pci_channel_io_perm_failure) {
7463 return PCI_ERS_RESULT_DISCONNECT;
7466 if (netif_running(dev))
7467 bnx2x_eeh_nic_unload(bp);
7469 pci_disable_device(pdev);
7473 /* Request a slot reset */
7474 return PCI_ERS_RESULT_NEED_RESET;
7478 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7479 * @pdev: Pointer to PCI device
7481 * Restart the card from scratch, as if from a cold-boot.
7483 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7485 struct net_device *dev = pci_get_drvdata(pdev);
7486 struct bnx2x *bp = netdev_priv(dev);
7490 if (pci_enable_device(pdev)) {
7492 "Cannot re-enable PCI device after reset\n");
7494 return PCI_ERS_RESULT_DISCONNECT;
7497 pci_set_master(pdev);
7498 pci_restore_state(pdev);
7500 if (netif_running(dev))
7501 bnx2x_set_power_state(bp, PCI_D0);
7505 return PCI_ERS_RESULT_RECOVERED;
7509 * bnx2x_io_resume - called when traffic can start flowing again
7510 * @pdev: Pointer to PCI device
7512 * This callback is called when the error recovery driver tells us that
7513 * its OK to resume normal operation.
7515 static void bnx2x_io_resume(struct pci_dev *pdev)
7517 struct net_device *dev = pci_get_drvdata(pdev);
7518 struct bnx2x *bp = netdev_priv(dev);
7520 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7521 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7527 bnx2x_eeh_recover(bp);
7529 if (netif_running(dev))
7530 bnx2x_nic_load(bp, LOAD_NORMAL);
7532 netif_device_attach(dev);
7537 static struct pci_error_handlers bnx2x_err_handler = {
7538 .error_detected = bnx2x_io_error_detected,
7539 .slot_reset = bnx2x_io_slot_reset,
7540 .resume = bnx2x_io_resume,
7543 static struct pci_driver bnx2x_pci_driver = {
7544 .name = DRV_MODULE_NAME,
7545 .id_table = bnx2x_pci_tbl,
7546 .probe = bnx2x_init_one,
7547 .remove = __devexit_p(bnx2x_remove_one),
7548 .suspend = bnx2x_suspend,
7549 .resume = bnx2x_resume,
7550 .err_handler = &bnx2x_err_handler,
7553 static int __init bnx2x_init(void)
7557 pr_info("%s", version);
7559 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7560 if (bnx2x_wq == NULL) {
7561 pr_err("Cannot create workqueue\n");
7565 ret = pci_register_driver(&bnx2x_pci_driver);
7567 pr_err("Cannot register driver\n");
7568 destroy_workqueue(bnx2x_wq);
7573 static void __exit bnx2x_cleanup(void)
7575 pci_unregister_driver(&bnx2x_pci_driver);
7577 destroy_workqueue(bnx2x_wq);
7580 module_init(bnx2x_init);
7581 module_exit(bnx2x_cleanup);
7585 /* count denotes the number of new completions we have seen */
7586 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7588 struct eth_spe *spe;
7590 #ifdef BNX2X_STOP_ON_ERROR
7591 if (unlikely(bp->panic))
7595 spin_lock_bh(&bp->spq_lock);
7596 bp->cnic_spq_pending -= count;
7598 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7599 bp->cnic_spq_pending++) {
7601 if (!bp->cnic_kwq_pending)
7604 spe = bnx2x_sp_get_next(bp);
7605 *spe = *bp->cnic_kwq_cons;
7607 bp->cnic_kwq_pending--;
7609 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7610 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7612 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7613 bp->cnic_kwq_cons = bp->cnic_kwq;
7615 bp->cnic_kwq_cons++;
7617 bnx2x_sp_prod_update(bp);
7618 spin_unlock_bh(&bp->spq_lock);
7621 static int bnx2x_cnic_sp_queue(struct net_device *dev,
7622 struct kwqe_16 *kwqes[], u32 count)
7624 struct bnx2x *bp = netdev_priv(dev);
7627 #ifdef BNX2X_STOP_ON_ERROR
7628 if (unlikely(bp->panic))
7632 spin_lock_bh(&bp->spq_lock);
7634 for (i = 0; i < count; i++) {
7635 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7637 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7640 *bp->cnic_kwq_prod = *spe;
7642 bp->cnic_kwq_pending++;
7644 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7645 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7646 spe->data.mac_config_addr.hi,
7647 spe->data.mac_config_addr.lo,
7648 bp->cnic_kwq_pending);
7650 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7651 bp->cnic_kwq_prod = bp->cnic_kwq;
7653 bp->cnic_kwq_prod++;
7656 spin_unlock_bh(&bp->spq_lock);
7658 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7659 bnx2x_cnic_sp_post(bp, 0);
7664 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7666 struct cnic_ops *c_ops;
7669 mutex_lock(&bp->cnic_mutex);
7670 c_ops = bp->cnic_ops;
7672 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7673 mutex_unlock(&bp->cnic_mutex);
7678 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7680 struct cnic_ops *c_ops;
7684 c_ops = rcu_dereference(bp->cnic_ops);
7686 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7693 * for commands that have no data
7695 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7697 struct cnic_ctl_info ctl = {0};
7701 return bnx2x_cnic_ctl_send(bp, &ctl);
7704 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7706 struct cnic_ctl_info ctl;
7708 /* first we tell CNIC and only then we count this as a completion */
7709 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7710 ctl.data.comp.cid = cid;
7712 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7713 bnx2x_cnic_sp_post(bp, 1);
7716 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7718 struct bnx2x *bp = netdev_priv(dev);
7722 case DRV_CTL_CTXTBL_WR_CMD: {
7723 u32 index = ctl->data.io.offset;
7724 dma_addr_t addr = ctl->data.io.dma_addr;
7726 bnx2x_ilt_wr(bp, index, addr);
7730 case DRV_CTL_COMPLETION_CMD: {
7731 int count = ctl->data.comp.comp_count;
7733 bnx2x_cnic_sp_post(bp, count);
7737 /* rtnl_lock is held. */
7738 case DRV_CTL_START_L2_CMD: {
7739 u32 cli = ctl->data.ring.client_id;
7741 bp->rx_mode_cl_mask |= (1 << cli);
7742 bnx2x_set_storm_rx_mode(bp);
7746 /* rtnl_lock is held. */
7747 case DRV_CTL_STOP_L2_CMD: {
7748 u32 cli = ctl->data.ring.client_id;
7750 bp->rx_mode_cl_mask &= ~(1 << cli);
7751 bnx2x_set_storm_rx_mode(bp);
7756 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7763 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7765 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7767 if (bp->flags & USING_MSIX_FLAG) {
7768 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7769 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7770 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7772 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7773 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7775 cp->irq_arr[0].status_blk = bp->cnic_sb;
7776 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7777 cp->irq_arr[1].status_blk = bp->def_status_blk;
7778 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7783 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7786 struct bnx2x *bp = netdev_priv(dev);
7787 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7792 if (atomic_read(&bp->intr_sem) != 0)
7795 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7799 bp->cnic_kwq_cons = bp->cnic_kwq;
7800 bp->cnic_kwq_prod = bp->cnic_kwq;
7801 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7803 bp->cnic_spq_pending = 0;
7804 bp->cnic_kwq_pending = 0;
7806 bp->cnic_data = data;
7809 cp->drv_state = CNIC_DRV_STATE_REGD;
7811 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7813 bnx2x_setup_cnic_irq_info(bp);
7814 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7815 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7816 rcu_assign_pointer(bp->cnic_ops, ops);
7821 static int bnx2x_unregister_cnic(struct net_device *dev)
7823 struct bnx2x *bp = netdev_priv(dev);
7824 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7826 mutex_lock(&bp->cnic_mutex);
7827 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7828 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7829 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7832 rcu_assign_pointer(bp->cnic_ops, NULL);
7833 mutex_unlock(&bp->cnic_mutex);
7835 kfree(bp->cnic_kwq);
7836 bp->cnic_kwq = NULL;
7841 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7843 struct bnx2x *bp = netdev_priv(dev);
7844 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7846 cp->drv_owner = THIS_MODULE;
7847 cp->chip_id = CHIP_ID(bp);
7848 cp->pdev = bp->pdev;
7849 cp->io_base = bp->regview;
7850 cp->io_base2 = bp->doorbells;
7851 cp->max_kwqe_pending = 8;
7852 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
7853 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
7854 cp->ctx_tbl_len = CNIC_ILT_LINES;
7855 cp->starting_cid = BCM_CNIC_CID_START;
7856 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
7857 cp->drv_ctl = bnx2x_drv_ctl;
7858 cp->drv_register_cnic = bnx2x_register_cnic;
7859 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
7863 EXPORT_SYMBOL(bnx2x_cnic_probe);
7865 #endif /* BCM_CNIC */