1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.1-8"
61 #define DRV_MODULE_RELDATE "2010/04/01"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version[] __devinitdata =
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
126 static struct workqueue_struct *bnx2x_wq;
128 enum bnx2x_board_type {
134 /* indexed by board_type, above */
137 } board_info[] __devinitdata = {
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
158 * locking is done by mcp
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
180 static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
207 struct dmae_command dmae;
208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
220 memset(&dmae, 0, sizeof(struct dmae_command));
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
228 DMAE_CMD_ENDIANITY_DW_SWAP |
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
252 mutex_lock(&bp->dmae_mutex);
256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
264 BNX2X_ERR("DMAE timeout!\n");
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
275 mutex_unlock(&bp->dmae_mutex);
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
280 struct dmae_command dmae;
281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
295 memset(&dmae, 0, sizeof(struct dmae_command));
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
303 DMAE_CMD_ENDIANITY_DW_SWAP |
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
324 mutex_lock(&bp->dmae_mutex);
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
333 while (*wb_comp != DMAE_COMP_VAL) {
336 BNX2X_ERR("DMAE timeout!\n");
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
350 mutex_unlock(&bp->dmae_mutex);
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
359 while (len > dmae_wr_max) {
360 bnx2x_write_dmae(bp, phys_addr + offset,
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
384 REG_RD_DMAE(bp, reg, wb_data, 2);
386 return HILO_U64(wb_data[0], wb_data[1]);
390 static int bnx2x_mc_assert(struct bnx2x *bp)
394 u32 row0, row1, row2, row3;
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
511 static void bnx2x_fw_dump(struct bnx2x *bp)
519 BNX2X_ERR("NO MCP - can not dump\n");
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526 pr_err("begin fw dump (mark 0x%x)\n", mark);
529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530 for (word = 0; word < 8; word++)
531 data[word] = htonl(REG_RD(bp, offset + 4*word));
533 pr_cont("%s", (char *)data);
535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536 for (word = 0; word < 8; word++)
537 data[word] = htonl(REG_RD(bp, offset + 4*word));
539 pr_cont("%s", (char *)data);
541 pr_err("end of fw dump\n");
544 static void bnx2x_panic_dump(struct bnx2x *bp)
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
552 BNX2X_ERR("begin crash dump -----------------\n");
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
563 for_each_queue(bp, i) {
564 struct bnx2x_fastpath *fp = &bp->fp[i];
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
569 i, fp->rx_bd_prod, fp->rx_bd_cons,
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
580 for_each_queue(bp, i) {
581 struct bnx2x_fastpath *fp = &bp->fp[i];
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590 fp->status_blk->c_status_block.status_block_index,
591 fp->tx_db.data.prod);
596 for_each_queue(bp, i) {
597 struct bnx2x_fastpath *fp = &bp->fp[i];
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601 for (j = start; j != end; j = RX_BD(j + 1)) {
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
611 for (j = start; j != end; j = RX_SGE(j + 1)) {
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
630 for_each_queue(bp, i) {
631 struct bnx2x_fastpath *fp = &bp->fp[i];
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
654 BNX2X_ERR("end crash dump -----------------\n");
657 static void bnx2x_int_enable(struct bnx2x *bp)
659 int port = BP_PORT(bp);
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
684 REG_WR(bp, addr, val);
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
692 REG_WR(bp, addr, val);
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
704 /* enable nig and gpio3 attention */
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
713 /* Make sure that interrupts are indeed enabled from here on */
717 static void bnx2x_int_disable(struct bnx2x *bp)
719 int port = BP_PORT(bp);
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
731 /* flush all outstanding writes */
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
744 /* disable interrupt handling */
745 atomic_inc(&bp->intr_sem);
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
752 /* make sure all ISRs are done */
754 synchronize_irq(bp->msix_table[0].vector);
759 for_each_queue(bp, i)
760 synchronize_irq(bp->msix_table[i + offset].vector);
762 synchronize_irq(bp->pdev->irq);
764 /* make sure sp_task is not running */
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
772 * General service functions
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810 u8 storm, u16 index, u8 op, u8 update)
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
814 struct igu_ack_register igu_ack;
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
827 /* Make sure that ACK is written */
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
834 struct host_status_block *fpsb = fp->status_blk;
836 barrier(); /* status block is written to by the chip */
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
855 * fast path service functions
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
860 /* Tell compiler that consumer and producer can change */
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
865 /* free skb in the packet ring at pos idx
866 * return idx of last bd freed
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
874 struct sk_buff *skb = tx_buf->skb;
875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893 BNX2X_ERR("BAD nbd!\n");
897 new_cons = nbd + tx_buf->first_bd;
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
902 /* Skip a parse bd... */
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
926 tx_buf->first_bd = 0;
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
945 #ifdef BNX2X_STOP_ON_ERROR
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
951 return (s16)(fp->bp->tx_ring_size) - used;
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
958 /* Tell compiler that status block fields can change */
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
966 struct bnx2x *bp = fp->bp;
967 struct netdev_queue *txq;
968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
970 #ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
975 txq = netdev_get_tx_queue(bp->dev, fp->index);
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
979 while (sw_cons != hw_cons) {
982 pkt_cons = TX_BD(sw_cons);
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
987 hw_cons, sw_cons, pkt_cons);
989 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1009 /* TBD need a thresh? */
1010 if (unlikely(netif_tx_queue_stopped(txq))) {
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1021 __netif_tx_lock(txq, smp_processor_id());
1023 if ((netif_tx_queue_stopped(txq)) &&
1024 (bp->state == BNX2X_STATE_OPEN) &&
1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026 netif_tx_wake_queue(txq);
1028 __netif_tx_unlock(txq);
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1046 fp->index, cid, command, bp->state,
1047 rr_cqe->ramrod_cqe.ramrod_type);
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1057 fp->state = BNX2X_FP_STATE_OPEN;
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1063 fp->state = BNX2X_FP_STATE_HALTED;
1067 BNX2X_ERR("unexpected MC reply (%d) "
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103 bp->set_mac_pending--;
1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109 bp->set_mac_pending--;
1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1115 command, bp->state);
1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1128 /* Skip "next page" elements */
1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1136 sw_buf->page = NULL;
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1158 if (unlikely(page == NULL))
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1168 sw_buf->page = page;
1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1205 /* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
1226 *prod_bd = *cons_bd;
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1232 u16 last_max = fp->last_max_sge;
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1255 struct bnx2x *bp = fp->bp;
1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
1259 u16 last_max, last_elem, first_elem;
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1346 #ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1353 fp->tpa_queue_used);
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1362 struct sw_rx_page *rx_pg, old_rx_pg;
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1371 /* This is needed in order to enable forwarding support */
1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374 max(frag_size, (u32)len_on_bd));
1376 #ifdef BNX2X_STOP_ON_ERROR
1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394 rx_pg = &fp->rx_page_ring[sge_idx];
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1405 /* Unmap the page as we r going to pass it to the stack */
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1417 frag_size -= frag_len;
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1438 if (likely(new_skb)) {
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1450 prefetch(((char *)(skb)) + 128);
1452 #ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1462 skb_reserve(skb, pad);
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 iph = (struct iphdr *)skb->data;
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1492 napi_gro_receive(&fp->napi, skb);
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1504 /* else drop the packet and keep the buffer in the bin */
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
1507 fp->eth_q_stats.rx_skb_alloc_failed++;
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1518 struct ustorm_eth_rx_producers rx_prods = {0};
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539 ((u32 *)&rx_prods)[i]);
1541 mmiowb(); /* keep prod updates ordered */
1543 DP(NETIF_MSG_RX_STATUS,
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1548 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1550 struct bnx2x *bp = fp->bp;
1551 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1555 #ifdef BNX2X_STOP_ON_ERROR
1556 if (unlikely(bp->panic))
1560 /* CQ "next element" is of the size of the regular element,
1561 that's why it's ok here */
1562 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1566 bd_cons = fp->rx_bd_cons;
1567 bd_prod = fp->rx_bd_prod;
1568 bd_prod_fw = bd_prod;
1569 sw_comp_cons = fp->rx_comp_cons;
1570 sw_comp_prod = fp->rx_comp_prod;
1572 /* Memory barrier necessary as speculative reads of the rx
1573 * buffer can be ahead of the index in the status block
1577 DP(NETIF_MSG_RX_STATUS,
1578 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1579 fp->index, hw_comp_cons, sw_comp_cons);
1581 while (sw_comp_cons != hw_comp_cons) {
1582 struct sw_rx_bd *rx_buf = NULL;
1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe;
1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589 bd_prod = RX_BD(bd_prod);
1590 bd_cons = RX_BD(bd_cons);
1592 /* Prefetch the page containing the BD descriptor
1593 at producer's index. It will be needed when new skb is
1595 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596 (&fp->rx_desc_ring[bd_prod])) -
1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1602 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1603 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1604 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1605 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1607 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1609 /* is this a slowpath msg? */
1610 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1611 bnx2x_sp_event(fp, cqe);
1614 /* this is an rx packet */
1616 rx_buf = &fp->rx_buf_ring[bd_cons];
1619 prefetch((u8 *)skb + 256);
1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621 pad = cqe->fast_path_cqe.placement_offset;
1623 /* If CQE is marked both TPA_START and TPA_END
1624 it is a non-TPA CQE */
1625 if ((!fp->disable_tpa) &&
1626 (TPA_TYPE(cqe_fp_flags) !=
1627 (TPA_TYPE_START | TPA_TYPE_END))) {
1628 u16 queue = cqe->fast_path_cqe.queue_index;
1630 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631 DP(NETIF_MSG_RX_STATUS,
1632 "calling tpa_start on queue %d\n",
1635 bnx2x_tpa_start(fp, queue, skb,
1640 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641 DP(NETIF_MSG_RX_STATUS,
1642 "calling tpa_stop on queue %d\n",
1645 if (!BNX2X_RX_SUM_FIX(cqe))
1646 BNX2X_ERR("STOP on none TCP "
1649 /* This is a size of the linear data
1651 len = le16_to_cpu(cqe->fast_path_cqe.
1653 bnx2x_tpa_stop(bp, fp, queue, pad,
1654 len, cqe, comp_ring_cons);
1655 #ifdef BNX2X_STOP_ON_ERROR
1660 bnx2x_update_sge_prod(fp,
1661 &cqe->fast_path_cqe);
1666 dma_sync_single_for_device(&bp->pdev->dev,
1667 dma_unmap_addr(rx_buf, mapping),
1668 pad + RX_COPY_THRESH,
1671 prefetch(((char *)(skb)) + 128);
1673 /* is this an error packet? */
1674 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1675 DP(NETIF_MSG_RX_ERR,
1676 "ERROR flags %x rx packet %u\n",
1677 cqe_fp_flags, sw_comp_cons);
1678 fp->eth_q_stats.rx_err_discard_pkt++;
1682 /* Since we don't have a jumbo ring
1683 * copy small packets if mtu > 1500
1685 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1686 (len <= RX_COPY_THRESH)) {
1687 struct sk_buff *new_skb;
1689 new_skb = netdev_alloc_skb(bp->dev,
1691 if (new_skb == NULL) {
1692 DP(NETIF_MSG_RX_ERR,
1693 "ERROR packet dropped "
1694 "because of alloc failure\n");
1695 fp->eth_q_stats.rx_skb_alloc_failed++;
1700 skb_copy_from_linear_data_offset(skb, pad,
1701 new_skb->data + pad, len);
1702 skb_reserve(new_skb, pad);
1703 skb_put(new_skb, len);
1705 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1710 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1711 dma_unmap_single(&bp->pdev->dev,
1712 dma_unmap_addr(rx_buf, mapping),
1715 skb_reserve(skb, pad);
1719 DP(NETIF_MSG_RX_ERR,
1720 "ERROR packet dropped because "
1721 "of alloc failure\n");
1722 fp->eth_q_stats.rx_skb_alloc_failed++;
1724 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1728 skb->protocol = eth_type_trans(skb, bp->dev);
1730 skb->ip_summed = CHECKSUM_NONE;
1732 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1733 skb->ip_summed = CHECKSUM_UNNECESSARY;
1735 fp->eth_q_stats.hw_csum_err++;
1739 skb_record_rx_queue(skb, fp->index);
1742 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1743 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1744 PARSING_FLAGS_VLAN))
1745 vlan_gro_receive(&fp->napi, bp->vlgrp,
1746 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1749 napi_gro_receive(&fp->napi, skb);
1755 bd_cons = NEXT_RX_IDX(bd_cons);
1756 bd_prod = NEXT_RX_IDX(bd_prod);
1757 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1760 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1761 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1763 if (rx_pkt == budget)
1767 fp->rx_bd_cons = bd_cons;
1768 fp->rx_bd_prod = bd_prod_fw;
1769 fp->rx_comp_cons = sw_comp_cons;
1770 fp->rx_comp_prod = sw_comp_prod;
1772 /* Update producers */
1773 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1776 fp->rx_pkt += rx_pkt;
1782 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1784 struct bnx2x_fastpath *fp = fp_cookie;
1785 struct bnx2x *bp = fp->bp;
1787 /* Return here if interrupt is disabled */
1788 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1789 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1793 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1794 fp->index, fp->sb_id);
1795 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1797 #ifdef BNX2X_STOP_ON_ERROR
1798 if (unlikely(bp->panic))
1802 /* Handle Rx and Tx according to MSI-X vector */
1803 prefetch(fp->rx_cons_sb);
1804 prefetch(fp->tx_cons_sb);
1805 prefetch(&fp->status_blk->u_status_block.status_block_index);
1806 prefetch(&fp->status_blk->c_status_block.status_block_index);
1807 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1812 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1814 struct bnx2x *bp = netdev_priv(dev_instance);
1815 u16 status = bnx2x_ack_int(bp);
1819 /* Return here if interrupt is shared and it's not for us */
1820 if (unlikely(status == 0)) {
1821 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1824 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1826 /* Return here if interrupt is disabled */
1827 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1828 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1832 #ifdef BNX2X_STOP_ON_ERROR
1833 if (unlikely(bp->panic))
1837 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1838 struct bnx2x_fastpath *fp = &bp->fp[i];
1840 mask = 0x2 << fp->sb_id;
1841 if (status & mask) {
1842 /* Handle Rx and Tx according to SB id */
1843 prefetch(fp->rx_cons_sb);
1844 prefetch(&fp->status_blk->u_status_block.
1845 status_block_index);
1846 prefetch(fp->tx_cons_sb);
1847 prefetch(&fp->status_blk->c_status_block.
1848 status_block_index);
1849 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1855 mask = 0x2 << CNIC_SB_ID(bp);
1856 if (status & (mask | 0x1)) {
1857 struct cnic_ops *c_ops = NULL;
1860 c_ops = rcu_dereference(bp->cnic_ops);
1862 c_ops->cnic_handler(bp->cnic_data, NULL);
1869 if (unlikely(status & 0x1)) {
1870 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1877 if (unlikely(status))
1878 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1884 /* end of fast path */
1886 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1891 * General service functions
1894 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1897 u32 resource_bit = (1 << resource);
1898 int func = BP_FUNC(bp);
1899 u32 hw_lock_control_reg;
1902 /* Validating that the resource is within range */
1903 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1905 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1906 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1911 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1913 hw_lock_control_reg =
1914 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1917 /* Validating that the resource is not already taken */
1918 lock_status = REG_RD(bp, hw_lock_control_reg);
1919 if (lock_status & resource_bit) {
1920 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1921 lock_status, resource_bit);
1925 /* Try for 5 second every 5ms */
1926 for (cnt = 0; cnt < 1000; cnt++) {
1927 /* Try to acquire the lock */
1928 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1929 lock_status = REG_RD(bp, hw_lock_control_reg);
1930 if (lock_status & resource_bit)
1935 DP(NETIF_MSG_HW, "Timeout\n");
1939 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1942 u32 resource_bit = (1 << resource);
1943 int func = BP_FUNC(bp);
1944 u32 hw_lock_control_reg;
1946 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1948 /* Validating that the resource is within range */
1949 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1951 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1952 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1957 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1959 hw_lock_control_reg =
1960 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1963 /* Validating that the resource is currently taken */
1964 lock_status = REG_RD(bp, hw_lock_control_reg);
1965 if (!(lock_status & resource_bit)) {
1966 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1967 lock_status, resource_bit);
1971 REG_WR(bp, hw_lock_control_reg, resource_bit);
1975 /* HW Lock for shared dual port PHYs */
1976 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1978 mutex_lock(&bp->port.phy_mutex);
1980 if (bp->port.need_hw_lock)
1981 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1984 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1986 if (bp->port.need_hw_lock)
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1989 mutex_unlock(&bp->port.phy_mutex);
1992 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1994 /* The GPIO should be swapped if swap register is set and active */
1995 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1996 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1997 int gpio_shift = gpio_num +
1998 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1999 u32 gpio_mask = (1 << gpio_shift);
2003 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2004 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2008 /* read GPIO value */
2009 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2011 /* get the requested pin value */
2012 if ((gpio_reg & gpio_mask) == gpio_mask)
2017 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2022 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2024 /* The GPIO should be swapped if swap register is set and active */
2025 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2026 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2027 int gpio_shift = gpio_num +
2028 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2029 u32 gpio_mask = (1 << gpio_shift);
2032 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2033 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2037 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2038 /* read GPIO and mask except the float bits */
2039 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2042 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2043 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2044 gpio_num, gpio_shift);
2045 /* clear FLOAT and set CLR */
2046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2050 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2051 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2052 gpio_num, gpio_shift);
2053 /* clear FLOAT and set SET */
2054 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2058 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2059 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2060 gpio_num, gpio_shift);
2062 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2069 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2075 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2077 /* The GPIO should be swapped if swap register is set and active */
2078 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2079 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2080 int gpio_shift = gpio_num +
2081 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2082 u32 gpio_mask = (1 << gpio_shift);
2085 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2086 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2090 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2092 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2095 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2096 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2097 "output low\n", gpio_num, gpio_shift);
2098 /* clear SET and set CLR */
2099 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2100 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2103 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2104 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2105 "output high\n", gpio_num, gpio_shift);
2106 /* clear CLR and set SET */
2107 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2115 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2116 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2121 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2123 u32 spio_mask = (1 << spio_num);
2126 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2127 (spio_num > MISC_REGISTERS_SPIO_7)) {
2128 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2132 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2133 /* read SPIO and mask except the float bits */
2134 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2137 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2138 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2139 /* clear FLOAT and set CLR */
2140 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2141 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2144 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2145 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2146 /* clear FLOAT and set SET */
2147 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2148 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2151 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2152 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2154 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2161 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2167 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2169 switch (bp->link_vars.ieee_fc &
2170 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2171 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2172 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2176 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2177 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2181 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2182 bp->port.advertising |= ADVERTISED_Asym_Pause;
2186 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2192 static void bnx2x_link_report(struct bnx2x *bp)
2194 if (bp->flags & MF_FUNC_DIS) {
2195 netif_carrier_off(bp->dev);
2196 netdev_err(bp->dev, "NIC Link is Down\n");
2200 if (bp->link_vars.link_up) {
2203 if (bp->state == BNX2X_STATE_OPEN)
2204 netif_carrier_on(bp->dev);
2205 netdev_info(bp->dev, "NIC Link is Up, ");
2207 line_speed = bp->link_vars.line_speed;
2212 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2213 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2214 if (vn_max_rate < line_speed)
2215 line_speed = vn_max_rate;
2217 pr_cont("%d Mbps ", line_speed);
2219 if (bp->link_vars.duplex == DUPLEX_FULL)
2220 pr_cont("full duplex");
2222 pr_cont("half duplex");
2224 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2225 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2226 pr_cont(", receive ");
2227 if (bp->link_vars.flow_ctrl &
2229 pr_cont("& transmit ");
2231 pr_cont(", transmit ");
2233 pr_cont("flow control ON");
2237 } else { /* link_down */
2238 netif_carrier_off(bp->dev);
2239 netdev_err(bp->dev, "NIC Link is Down\n");
2243 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2245 if (!BP_NOMCP(bp)) {
2248 /* Initialize link parameters structure variables */
2249 /* It is recommended to turn off RX FC for jumbo frames
2250 for better performance */
2251 if (bp->dev->mtu > 5000)
2252 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2254 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2256 bnx2x_acquire_phy_lock(bp);
2258 if (load_mode == LOAD_DIAG)
2259 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2261 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2263 bnx2x_release_phy_lock(bp);
2265 bnx2x_calc_fc_adv(bp);
2267 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2268 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2269 bnx2x_link_report(bp);
2274 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2278 static void bnx2x_link_set(struct bnx2x *bp)
2280 if (!BP_NOMCP(bp)) {
2281 bnx2x_acquire_phy_lock(bp);
2282 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2283 bnx2x_release_phy_lock(bp);
2285 bnx2x_calc_fc_adv(bp);
2287 BNX2X_ERR("Bootcode is missing - can not set link\n");
2290 static void bnx2x__link_reset(struct bnx2x *bp)
2292 if (!BP_NOMCP(bp)) {
2293 bnx2x_acquire_phy_lock(bp);
2294 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2295 bnx2x_release_phy_lock(bp);
2297 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2300 static u8 bnx2x_link_test(struct bnx2x *bp)
2304 if (!BP_NOMCP(bp)) {
2305 bnx2x_acquire_phy_lock(bp);
2306 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2307 bnx2x_release_phy_lock(bp);
2309 BNX2X_ERR("Bootcode is missing - can not test link\n");
2314 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2316 u32 r_param = bp->link_vars.line_speed / 8;
2317 u32 fair_periodic_timeout_usec;
2320 memset(&(bp->cmng.rs_vars), 0,
2321 sizeof(struct rate_shaping_vars_per_port));
2322 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2324 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2325 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2327 /* this is the threshold below which no timer arming will occur
2328 1.25 coefficient is for the threshold to be a little bigger
2329 than the real time, to compensate for timer in-accuracy */
2330 bp->cmng.rs_vars.rs_threshold =
2331 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2333 /* resolution of fairness timer */
2334 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2335 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2336 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2338 /* this is the threshold below which we won't arm the timer anymore */
2339 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2341 /* we multiply by 1e3/8 to get bytes/msec.
2342 We don't want the credits to pass a credit
2343 of the t_fair*FAIR_MEM (algorithm resolution) */
2344 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2345 /* since each tick is 4 usec */
2346 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2349 /* Calculates the sum of vn_min_rates.
2350 It's needed for further normalizing of the min_rates.
2352 sum of vn_min_rates.
2354 0 - if all the min_rates are 0.
2355 In the later case fainess algorithm should be deactivated.
2356 If not all min_rates are zero then those that are zeroes will be set to 1.
2358 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2361 int port = BP_PORT(bp);
2364 bp->vn_weight_sum = 0;
2365 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2366 int func = 2*vn + port;
2367 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2368 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2369 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2371 /* Skip hidden vns */
2372 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2375 /* If min rate is zero - set it to 1 */
2377 vn_min_rate = DEF_MIN_RATE;
2381 bp->vn_weight_sum += vn_min_rate;
2384 /* ... only if all min rates are zeros - disable fairness */
2386 bp->cmng.flags.cmng_enables &=
2387 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2388 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2389 " fairness will be disabled\n");
2391 bp->cmng.flags.cmng_enables |=
2392 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2395 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2397 struct rate_shaping_vars_per_vn m_rs_vn;
2398 struct fairness_vars_per_vn m_fair_vn;
2399 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2400 u16 vn_min_rate, vn_max_rate;
2403 /* If function is hidden - set min and max to zeroes */
2404 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2409 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2410 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2411 /* If min rate is zero - set it to 1 */
2413 vn_min_rate = DEF_MIN_RATE;
2414 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2415 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2418 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2419 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2421 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2422 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2424 /* global vn counter - maximal Mbps for this vn */
2425 m_rs_vn.vn_counter.rate = vn_max_rate;
2427 /* quota - number of bytes transmitted in this period */
2428 m_rs_vn.vn_counter.quota =
2429 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2431 if (bp->vn_weight_sum) {
2432 /* credit for each period of the fairness algorithm:
2433 number of bytes in T_FAIR (the vn share the port rate).
2434 vn_weight_sum should not be larger than 10000, thus
2435 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2437 m_fair_vn.vn_credit_delta =
2438 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2439 (8 * bp->vn_weight_sum))),
2440 (bp->cmng.fair_vars.fair_threshold * 2));
2441 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2442 m_fair_vn.vn_credit_delta);
2445 /* Store it to internal memory */
2446 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2447 REG_WR(bp, BAR_XSTRORM_INTMEM +
2448 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2449 ((u32 *)(&m_rs_vn))[i]);
2451 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2452 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2454 ((u32 *)(&m_fair_vn))[i]);
2458 /* This function is called upon link interrupt */
2459 static void bnx2x_link_attn(struct bnx2x *bp)
2461 /* Make sure that we are synced with the current statistics */
2462 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2464 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2466 if (bp->link_vars.link_up) {
2468 /* dropless flow control */
2469 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2470 int port = BP_PORT(bp);
2471 u32 pause_enabled = 0;
2473 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2476 REG_WR(bp, BAR_USTRORM_INTMEM +
2477 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2481 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2482 struct host_port_stats *pstats;
2484 pstats = bnx2x_sp(bp, port_stats);
2485 /* reset old bmac stats */
2486 memset(&(pstats->mac_stx[0]), 0,
2487 sizeof(struct mac_stx));
2489 if (bp->state == BNX2X_STATE_OPEN)
2490 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2493 /* indicate link status */
2494 bnx2x_link_report(bp);
2497 int port = BP_PORT(bp);
2501 /* Set the attention towards other drivers on the same port */
2502 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2503 if (vn == BP_E1HVN(bp))
2506 func = ((vn << 1) | port);
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2508 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2511 if (bp->link_vars.link_up) {
2514 /* Init rate shaping and fairness contexts */
2515 bnx2x_init_port_minmax(bp);
2517 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2518 bnx2x_init_vn_minmax(bp, 2*vn + port);
2520 /* Store it to internal memory */
2522 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2523 REG_WR(bp, BAR_XSTRORM_INTMEM +
2524 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2525 ((u32 *)(&bp->cmng))[i]);
2530 static void bnx2x__link_status_update(struct bnx2x *bp)
2532 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2535 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2537 if (bp->link_vars.link_up)
2538 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2540 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2542 bnx2x_calc_vn_weight_sum(bp);
2544 /* indicate link status */
2545 bnx2x_link_report(bp);
2548 static void bnx2x_pmf_update(struct bnx2x *bp)
2550 int port = BP_PORT(bp);
2554 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2556 /* enable nig attention */
2557 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2558 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2559 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2561 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2569 * General service functions
2572 /* send the MCP a request, block until there is a reply */
2573 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2575 int func = BP_FUNC(bp);
2576 u32 seq = ++bp->fw_seq;
2579 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2581 mutex_lock(&bp->fw_mb_mutex);
2582 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2583 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2586 /* let the FW do it's magic ... */
2589 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2591 /* Give the FW up to 5 second (500*10ms) */
2592 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2594 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2595 cnt*delay, rc, seq);
2597 /* is this a reply to our command? */
2598 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2599 rc &= FW_MSG_CODE_MASK;
2602 BNX2X_ERR("FW failed to respond!\n");
2606 mutex_unlock(&bp->fw_mb_mutex);
2611 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2612 static void bnx2x_set_rx_mode(struct net_device *dev);
2614 static void bnx2x_e1h_disable(struct bnx2x *bp)
2616 int port = BP_PORT(bp);
2618 netif_tx_disable(bp->dev);
2620 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2622 netif_carrier_off(bp->dev);
2625 static void bnx2x_e1h_enable(struct bnx2x *bp)
2627 int port = BP_PORT(bp);
2629 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2631 /* Tx queue should be only reenabled */
2632 netif_tx_wake_all_queues(bp->dev);
2635 * Should not call netif_carrier_on since it will be called if the link
2636 * is up when checking for link state
2640 static void bnx2x_update_min_max(struct bnx2x *bp)
2642 int port = BP_PORT(bp);
2645 /* Init rate shaping and fairness contexts */
2646 bnx2x_init_port_minmax(bp);
2648 bnx2x_calc_vn_weight_sum(bp);
2650 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2651 bnx2x_init_vn_minmax(bp, 2*vn + port);
2656 /* Set the attention towards other drivers on the same port */
2657 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2658 if (vn == BP_E1HVN(bp))
2661 func = ((vn << 1) | port);
2662 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2663 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2666 /* Store it to internal memory */
2667 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2668 REG_WR(bp, BAR_XSTRORM_INTMEM +
2669 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2670 ((u32 *)(&bp->cmng))[i]);
2674 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2676 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2678 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2681 * This is the only place besides the function initialization
2682 * where the bp->flags can change so it is done without any
2685 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2686 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2687 bp->flags |= MF_FUNC_DIS;
2689 bnx2x_e1h_disable(bp);
2691 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2692 bp->flags &= ~MF_FUNC_DIS;
2694 bnx2x_e1h_enable(bp);
2696 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2698 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2700 bnx2x_update_min_max(bp);
2701 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2704 /* Report results to MCP */
2706 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2708 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2711 /* must be called under the spq lock */
2712 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2714 struct eth_spe *next_spe = bp->spq_prod_bd;
2716 if (bp->spq_prod_bd == bp->spq_last_bd) {
2717 bp->spq_prod_bd = bp->spq;
2718 bp->spq_prod_idx = 0;
2719 DP(NETIF_MSG_TIMER, "end of spq\n");
2727 /* must be called under the spq lock */
2728 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2730 int func = BP_FUNC(bp);
2732 /* Make sure that BD data is updated before writing the producer */
2735 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2740 /* the slow path queue is odd since completions arrive on the fastpath ring */
2741 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2742 u32 data_hi, u32 data_lo, int common)
2744 struct eth_spe *spe;
2746 #ifdef BNX2X_STOP_ON_ERROR
2747 if (unlikely(bp->panic))
2751 spin_lock_bh(&bp->spq_lock);
2753 if (!bp->spq_left) {
2754 BNX2X_ERR("BUG! SPQ ring full!\n");
2755 spin_unlock_bh(&bp->spq_lock);
2760 spe = bnx2x_sp_get_next(bp);
2762 /* CID needs port number to be encoded int it */
2763 spe->hdr.conn_and_cmd_data =
2764 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2766 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2769 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2771 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2772 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2776 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2777 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2778 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2779 (u32)(U64_LO(bp->spq_mapping) +
2780 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2781 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2783 bnx2x_sp_prod_update(bp);
2784 spin_unlock_bh(&bp->spq_lock);
2788 /* acquire split MCP access lock register */
2789 static int bnx2x_acquire_alr(struct bnx2x *bp)
2795 for (j = 0; j < 1000; j++) {
2797 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2798 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2799 if (val & (1L << 31))
2804 if (!(val & (1L << 31))) {
2805 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2812 /* release split MCP access lock register */
2813 static void bnx2x_release_alr(struct bnx2x *bp)
2815 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2818 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2820 struct host_def_status_block *def_sb = bp->def_status_blk;
2823 barrier(); /* status block is written to by the chip */
2824 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2825 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2828 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2829 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2832 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2833 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2836 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2837 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2840 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2841 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2848 * slow path service functions
2851 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2853 int port = BP_PORT(bp);
2854 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2855 COMMAND_REG_ATTN_BITS_SET);
2856 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2857 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2858 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2859 NIG_REG_MASK_INTERRUPT_PORT0;
2863 if (bp->attn_state & asserted)
2864 BNX2X_ERR("IGU ERROR\n");
2866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2867 aeu_mask = REG_RD(bp, aeu_addr);
2869 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2870 aeu_mask, asserted);
2871 aeu_mask &= ~(asserted & 0x3ff);
2872 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2874 REG_WR(bp, aeu_addr, aeu_mask);
2875 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2877 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2878 bp->attn_state |= asserted;
2879 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2881 if (asserted & ATTN_HARD_WIRED_MASK) {
2882 if (asserted & ATTN_NIG_FOR_FUNC) {
2884 bnx2x_acquire_phy_lock(bp);
2886 /* save nig interrupt mask */
2887 nig_mask = REG_RD(bp, nig_int_mask_addr);
2888 REG_WR(bp, nig_int_mask_addr, 0);
2890 bnx2x_link_attn(bp);
2892 /* handle unicore attn? */
2894 if (asserted & ATTN_SW_TIMER_4_FUNC)
2895 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2897 if (asserted & GPIO_2_FUNC)
2898 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2900 if (asserted & GPIO_3_FUNC)
2901 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2903 if (asserted & GPIO_4_FUNC)
2904 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2907 if (asserted & ATTN_GENERAL_ATTN_1) {
2908 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2909 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2911 if (asserted & ATTN_GENERAL_ATTN_2) {
2912 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2913 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2915 if (asserted & ATTN_GENERAL_ATTN_3) {
2916 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2917 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2920 if (asserted & ATTN_GENERAL_ATTN_4) {
2921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2924 if (asserted & ATTN_GENERAL_ATTN_5) {
2925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2928 if (asserted & ATTN_GENERAL_ATTN_6) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2934 } /* if hardwired */
2936 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2938 REG_WR(bp, hc_addr, asserted);
2940 /* now set back the mask */
2941 if (asserted & ATTN_NIG_FOR_FUNC) {
2942 REG_WR(bp, nig_int_mask_addr, nig_mask);
2943 bnx2x_release_phy_lock(bp);
2947 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2949 int port = BP_PORT(bp);
2951 /* mark the failure */
2952 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2953 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2954 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2955 bp->link_params.ext_phy_config);
2957 /* log the failure */
2958 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2959 " the driver to shutdown the card to prevent permanent"
2960 " damage. Please contact OEM Support for assistance\n");
2963 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2965 int port = BP_PORT(bp);
2967 u32 val, swap_val, swap_override;
2969 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2970 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2972 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2974 val = REG_RD(bp, reg_offset);
2975 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2976 REG_WR(bp, reg_offset, val);
2978 BNX2X_ERR("SPIO5 hw attention\n");
2980 /* Fan failure attention */
2981 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2982 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2983 /* Low power mode is controlled by GPIO 2 */
2984 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2985 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2986 /* The PHY reset is controlled by GPIO 1 */
2987 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2988 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2992 /* The PHY reset is controlled by GPIO 1 */
2993 /* fake the port number to cancel the swap done in
2995 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2996 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2997 port = (swap_val && swap_override) ^ 1;
2998 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2999 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3005 bnx2x_fan_failure(bp);
3008 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3009 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3010 bnx2x_acquire_phy_lock(bp);
3011 bnx2x_handle_module_detect_int(&bp->link_params);
3012 bnx2x_release_phy_lock(bp);
3015 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3017 val = REG_RD(bp, reg_offset);
3018 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3019 REG_WR(bp, reg_offset, val);
3021 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3022 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3027 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3031 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3033 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3034 BNX2X_ERR("DB hw attention 0x%x\n", val);
3035 /* DORQ discard attention */
3037 BNX2X_ERR("FATAL error from DORQ\n");
3040 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3042 int port = BP_PORT(bp);
3045 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3046 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3048 val = REG_RD(bp, reg_offset);
3049 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3050 REG_WR(bp, reg_offset, val);
3052 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3053 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3058 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3062 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3064 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3065 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3066 /* CFC error attention */
3068 BNX2X_ERR("FATAL error from CFC\n");
3071 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3073 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3074 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3075 /* RQ_USDMDP_FIFO_OVERFLOW */
3077 BNX2X_ERR("FATAL error from PXP\n");
3080 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3082 int port = BP_PORT(bp);
3085 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3086 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3088 val = REG_RD(bp, reg_offset);
3089 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3090 REG_WR(bp, reg_offset, val);
3092 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3093 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3098 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3102 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3104 if (attn & BNX2X_PMF_LINK_ASSERT) {
3105 int func = BP_FUNC(bp);
3107 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3108 bp->mf_config = SHMEM_RD(bp,
3109 mf_cfg.func_mf_config[func].config);
3110 val = SHMEM_RD(bp, func_mb[func].drv_status);
3111 if (val & DRV_STATUS_DCC_EVENT_MASK)
3113 (val & DRV_STATUS_DCC_EVENT_MASK));
3114 bnx2x__link_status_update(bp);
3115 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3116 bnx2x_pmf_update(bp);
3118 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3120 BNX2X_ERR("MC assert!\n");
3121 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3122 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3123 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3127 } else if (attn & BNX2X_MCP_ASSERT) {
3129 BNX2X_ERR("MCP assert!\n");
3130 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3134 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3137 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3138 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3139 if (attn & BNX2X_GRC_TIMEOUT) {
3140 val = CHIP_IS_E1H(bp) ?
3141 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3142 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3144 if (attn & BNX2X_GRC_RSV) {
3145 val = CHIP_IS_E1H(bp) ?
3146 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3147 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3149 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3153 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3154 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3157 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3158 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3159 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3160 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3161 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3162 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3164 * should be run under rtnl lock
3166 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3168 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3170 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3176 * should be run under rtnl lock
3178 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3180 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3182 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3188 * should be run under rtnl lock
3190 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3192 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3193 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3194 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3198 * should be run under rtnl lock
3200 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3202 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3204 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3206 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3207 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3213 * should be run under rtnl lock
3215 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3217 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3219 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3221 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3222 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230 * should be run under rtnl lock
3232 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3234 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3237 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3239 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3240 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3243 static inline void _print_next_block(int idx, const char *blk)
3250 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3254 for (i = 0; sig; i++) {
3255 cur_bit = ((u32)0x1 << i);
3256 if (sig & cur_bit) {
3258 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3259 _print_next_block(par_num++, "BRB");
3261 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3262 _print_next_block(par_num++, "PARSER");
3264 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3265 _print_next_block(par_num++, "TSDM");
3267 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3268 _print_next_block(par_num++, "SEARCHER");
3270 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3271 _print_next_block(par_num++, "TSEMI");
3283 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3287 for (i = 0; sig; i++) {
3288 cur_bit = ((u32)0x1 << i);
3289 if (sig & cur_bit) {
3291 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3292 _print_next_block(par_num++, "PBCLIENT");
3294 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3295 _print_next_block(par_num++, "QM");
3297 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3298 _print_next_block(par_num++, "XSDM");
3300 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3301 _print_next_block(par_num++, "XSEMI");
3303 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3304 _print_next_block(par_num++, "DOORBELLQ");
3306 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3307 _print_next_block(par_num++, "VAUX PCI CORE");
3309 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3310 _print_next_block(par_num++, "DEBUG");
3312 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3313 _print_next_block(par_num++, "USDM");
3315 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3316 _print_next_block(par_num++, "USEMI");
3318 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3319 _print_next_block(par_num++, "UPB");
3321 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3322 _print_next_block(par_num++, "CSDM");
3334 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3338 for (i = 0; sig; i++) {
3339 cur_bit = ((u32)0x1 << i);
3340 if (sig & cur_bit) {
3342 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3343 _print_next_block(par_num++, "CSEMI");
3345 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3346 _print_next_block(par_num++, "PXP");
3348 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3349 _print_next_block(par_num++,
3350 "PXPPCICLOCKCLIENT");
3352 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3353 _print_next_block(par_num++, "CFC");
3355 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3356 _print_next_block(par_num++, "CDU");
3358 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3359 _print_next_block(par_num++, "IGU");
3361 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3362 _print_next_block(par_num++, "MISC");
3374 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3378 for (i = 0; sig; i++) {
3379 cur_bit = ((u32)0x1 << i);
3380 if (sig & cur_bit) {
3382 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3383 _print_next_block(par_num++, "MCP ROM");
3385 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3386 _print_next_block(par_num++, "MCP UMP RX");
3388 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3389 _print_next_block(par_num++, "MCP UMP TX");
3391 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3392 _print_next_block(par_num++, "MCP SCPAD");
3404 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3407 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3408 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3410 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3411 "[0]:0x%08x [1]:0x%08x "
3412 "[2]:0x%08x [3]:0x%08x\n",
3413 sig0 & HW_PRTY_ASSERT_SET_0,
3414 sig1 & HW_PRTY_ASSERT_SET_1,
3415 sig2 & HW_PRTY_ASSERT_SET_2,
3416 sig3 & HW_PRTY_ASSERT_SET_3);
3417 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3419 par_num = bnx2x_print_blocks_with_parity0(
3420 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3421 par_num = bnx2x_print_blocks_with_parity1(
3422 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3423 par_num = bnx2x_print_blocks_with_parity2(
3424 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3425 par_num = bnx2x_print_blocks_with_parity3(
3426 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3433 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3435 struct attn_route attn;
3436 int port = BP_PORT(bp);
3438 attn.sig[0] = REG_RD(bp,
3439 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3441 attn.sig[1] = REG_RD(bp,
3442 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3444 attn.sig[2] = REG_RD(bp,
3445 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3447 attn.sig[3] = REG_RD(bp,
3448 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3451 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3455 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3457 struct attn_route attn, *group_mask;
3458 int port = BP_PORT(bp);
3464 /* need to take HW lock because MCP or other port might also
3465 try to handle this event */
3466 bnx2x_acquire_alr(bp);
3468 if (bnx2x_chk_parity_attn(bp)) {
3469 bp->recovery_state = BNX2X_RECOVERY_INIT;
3470 bnx2x_set_reset_in_progress(bp);
3471 schedule_delayed_work(&bp->reset_task, 0);
3472 /* Disable HW interrupts */
3473 bnx2x_int_disable(bp);
3474 bnx2x_release_alr(bp);
3475 /* In case of parity errors don't handle attentions so that
3476 * other function would "see" parity errors.
3481 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3482 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3483 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3484 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3485 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3486 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3488 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3489 if (deasserted & (1 << index)) {
3490 group_mask = &bp->attn_group[index];
3492 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3493 index, group_mask->sig[0], group_mask->sig[1],
3494 group_mask->sig[2], group_mask->sig[3]);
3496 bnx2x_attn_int_deasserted3(bp,
3497 attn.sig[3] & group_mask->sig[3]);
3498 bnx2x_attn_int_deasserted1(bp,
3499 attn.sig[1] & group_mask->sig[1]);
3500 bnx2x_attn_int_deasserted2(bp,
3501 attn.sig[2] & group_mask->sig[2]);
3502 bnx2x_attn_int_deasserted0(bp,
3503 attn.sig[0] & group_mask->sig[0]);
3507 bnx2x_release_alr(bp);
3509 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3512 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3514 REG_WR(bp, reg_addr, val);
3516 if (~bp->attn_state & deasserted)
3517 BNX2X_ERR("IGU ERROR\n");
3519 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3520 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3522 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3523 aeu_mask = REG_RD(bp, reg_addr);
3525 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3526 aeu_mask, deasserted);
3527 aeu_mask |= (deasserted & 0x3ff);
3528 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3530 REG_WR(bp, reg_addr, aeu_mask);
3531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3533 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3534 bp->attn_state &= ~deasserted;
3535 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3538 static void bnx2x_attn_int(struct bnx2x *bp)
3540 /* read local copy of bits */
3541 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3543 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3545 u32 attn_state = bp->attn_state;
3547 /* look for changed bits */
3548 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3549 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3552 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3553 attn_bits, attn_ack, asserted, deasserted);
3555 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3556 BNX2X_ERR("BAD attention state\n");
3558 /* handle bits that were raised */
3560 bnx2x_attn_int_asserted(bp, asserted);
3563 bnx2x_attn_int_deasserted(bp, deasserted);
3566 static void bnx2x_sp_task(struct work_struct *work)
3568 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3571 /* Return here if interrupt is disabled */
3572 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3573 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3577 status = bnx2x_update_dsb_idx(bp);
3578 /* if (status == 0) */
3579 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3581 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3589 /* CStorm events: STAT_QUERY */
3591 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3595 if (unlikely(status))
3596 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3599 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3601 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3603 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3605 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3607 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3611 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3613 struct net_device *dev = dev_instance;
3614 struct bnx2x *bp = netdev_priv(dev);
3616 /* Return here if interrupt is disabled */
3617 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3618 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3622 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3624 #ifdef BNX2X_STOP_ON_ERROR
3625 if (unlikely(bp->panic))
3631 struct cnic_ops *c_ops;
3634 c_ops = rcu_dereference(bp->cnic_ops);
3636 c_ops->cnic_handler(bp->cnic_data, NULL);
3640 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3645 /* end of slow path */
3649 /****************************************************************************
3651 ****************************************************************************/
3653 /* sum[hi:lo] += add[hi:lo] */
3654 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3657 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3660 /* difference = minuend - subtrahend */
3661 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3663 if (m_lo < s_lo) { \
3665 d_hi = m_hi - s_hi; \
3667 /* we can 'loan' 1 */ \
3669 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3671 /* m_hi <= s_hi */ \
3676 /* m_lo >= s_lo */ \
3677 if (m_hi < s_hi) { \
3681 /* m_hi >= s_hi */ \
3682 d_hi = m_hi - s_hi; \
3683 d_lo = m_lo - s_lo; \
3688 #define UPDATE_STAT64(s, t) \
3690 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3691 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3692 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3693 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3694 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3695 pstats->mac_stx[1].t##_lo, diff.lo); \
3698 #define UPDATE_STAT64_NIG(s, t) \
3700 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3701 diff.lo, new->s##_lo, old->s##_lo); \
3702 ADD_64(estats->t##_hi, diff.hi, \
3703 estats->t##_lo, diff.lo); \
3706 /* sum[hi:lo] += add */
3707 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3710 s_hi += (s_lo < a) ? 1 : 0; \
3713 #define UPDATE_EXTEND_STAT(s) \
3715 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3716 pstats->mac_stx[1].s##_lo, \
3720 #define UPDATE_EXTEND_TSTAT(s, t) \
3722 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3723 old_tclient->s = tclient->s; \
3724 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3727 #define UPDATE_EXTEND_USTAT(s, t) \
3729 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3730 old_uclient->s = uclient->s; \
3731 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3734 #define UPDATE_EXTEND_XSTAT(s, t) \
3736 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3737 old_xclient->s = xclient->s; \
3738 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3741 /* minuend -= subtrahend */
3742 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3744 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3747 /* minuend[hi:lo] -= subtrahend */
3748 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3750 SUB_64(m_hi, 0, m_lo, s); \
3753 #define SUB_EXTEND_USTAT(s, t) \
3755 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3756 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3760 * General service functions
3763 static inline long bnx2x_hilo(u32 *hiref)
3765 u32 lo = *(hiref + 1);
3766 #if (BITS_PER_LONG == 64)
3769 return HILO_U64(hi, lo);
3776 * Init service functions
3779 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3781 if (!bp->stats_pending) {
3782 struct eth_query_ramrod_data ramrod_data = {0};
3785 ramrod_data.drv_counter = bp->stats_counter++;
3786 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3787 for_each_queue(bp, i)
3788 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3790 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3791 ((u32 *)&ramrod_data)[1],
3792 ((u32 *)&ramrod_data)[0], 0);
3794 /* stats ramrod has it's own slot on the spq */
3796 bp->stats_pending = 1;
3801 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3803 struct dmae_command *dmae = &bp->stats_dmae;
3804 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3806 *stats_comp = DMAE_COMP_VAL;
3807 if (CHIP_REV_IS_SLOW(bp))
3811 if (bp->executer_idx) {
3812 int loader_idx = PMF_DMAE_C(bp);
3814 memset(dmae, 0, sizeof(struct dmae_command));
3816 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3817 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3818 DMAE_CMD_DST_RESET |
3820 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3822 DMAE_CMD_ENDIANITY_DW_SWAP |
3824 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3826 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3827 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3828 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3829 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3830 sizeof(struct dmae_command) *
3831 (loader_idx + 1)) >> 2;
3832 dmae->dst_addr_hi = 0;
3833 dmae->len = sizeof(struct dmae_command) >> 2;
3836 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3837 dmae->comp_addr_hi = 0;
3841 bnx2x_post_dmae(bp, dmae, loader_idx);
3843 } else if (bp->func_stx) {
3845 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3849 static int bnx2x_stats_comp(struct bnx2x *bp)
3851 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3855 while (*stats_comp != DMAE_COMP_VAL) {
3857 BNX2X_ERR("timeout waiting for stats finished\n");
3867 * Statistics service functions
3870 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3872 struct dmae_command *dmae;
3874 int loader_idx = PMF_DMAE_C(bp);
3875 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3878 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3879 BNX2X_ERR("BUG!\n");
3883 bp->executer_idx = 0;
3885 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3887 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3889 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3891 DMAE_CMD_ENDIANITY_DW_SWAP |
3893 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3894 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3896 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3897 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3898 dmae->src_addr_lo = bp->port.port_stx >> 2;
3899 dmae->src_addr_hi = 0;
3900 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3901 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3902 dmae->len = DMAE_LEN32_RD_MAX;
3903 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3904 dmae->comp_addr_hi = 0;
3907 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3908 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3909 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3910 dmae->src_addr_hi = 0;
3911 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3912 DMAE_LEN32_RD_MAX * 4);
3913 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3914 DMAE_LEN32_RD_MAX * 4);
3915 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3916 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3917 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3918 dmae->comp_val = DMAE_COMP_VAL;
3921 bnx2x_hw_stats_post(bp);
3922 bnx2x_stats_comp(bp);
3925 static void bnx2x_port_stats_init(struct bnx2x *bp)
3927 struct dmae_command *dmae;
3928 int port = BP_PORT(bp);
3929 int vn = BP_E1HVN(bp);
3931 int loader_idx = PMF_DMAE_C(bp);
3933 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3936 if (!bp->link_vars.link_up || !bp->port.pmf) {
3937 BNX2X_ERR("BUG!\n");
3941 bp->executer_idx = 0;
3944 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3945 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3946 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3948 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3950 DMAE_CMD_ENDIANITY_DW_SWAP |
3952 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3953 (vn << DMAE_CMD_E1HVN_SHIFT));
3955 if (bp->port.port_stx) {
3957 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3958 dmae->opcode = opcode;
3959 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3960 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3961 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3962 dmae->dst_addr_hi = 0;
3963 dmae->len = sizeof(struct host_port_stats) >> 2;
3964 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3965 dmae->comp_addr_hi = 0;
3971 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3972 dmae->opcode = opcode;
3973 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3974 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3975 dmae->dst_addr_lo = bp->func_stx >> 2;
3976 dmae->dst_addr_hi = 0;
3977 dmae->len = sizeof(struct host_func_stats) >> 2;
3978 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3979 dmae->comp_addr_hi = 0;
3984 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3985 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3986 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3988 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3990 DMAE_CMD_ENDIANITY_DW_SWAP |
3992 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3993 (vn << DMAE_CMD_E1HVN_SHIFT));
3995 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3997 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3998 NIG_REG_INGRESS_BMAC0_MEM);
4000 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4001 BIGMAC_REGISTER_TX_STAT_GTBYT */
4002 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4003 dmae->opcode = opcode;
4004 dmae->src_addr_lo = (mac_addr +
4005 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4006 dmae->src_addr_hi = 0;
4007 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4008 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4009 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4010 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4011 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4012 dmae->comp_addr_hi = 0;
4015 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4016 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4017 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4018 dmae->opcode = opcode;
4019 dmae->src_addr_lo = (mac_addr +
4020 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4021 dmae->src_addr_hi = 0;
4022 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4023 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4024 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4025 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4026 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4027 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4028 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4029 dmae->comp_addr_hi = 0;
4032 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4034 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4036 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4037 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4038 dmae->opcode = opcode;
4039 dmae->src_addr_lo = (mac_addr +
4040 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4041 dmae->src_addr_hi = 0;
4042 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4043 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4044 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4045 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4046 dmae->comp_addr_hi = 0;
4049 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4050 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4051 dmae->opcode = opcode;
4052 dmae->src_addr_lo = (mac_addr +
4053 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4054 dmae->src_addr_hi = 0;
4055 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4056 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4057 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4058 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4060 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4061 dmae->comp_addr_hi = 0;
4064 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4065 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066 dmae->opcode = opcode;
4067 dmae->src_addr_lo = (mac_addr +
4068 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4069 dmae->src_addr_hi = 0;
4070 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4071 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4072 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4073 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4074 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4075 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4076 dmae->comp_addr_hi = 0;
4081 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4082 dmae->opcode = opcode;
4083 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4084 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4085 dmae->src_addr_hi = 0;
4086 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4087 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4088 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4089 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4090 dmae->comp_addr_hi = 0;
4093 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094 dmae->opcode = opcode;
4095 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4096 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4097 dmae->src_addr_hi = 0;
4098 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4099 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4100 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4101 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4102 dmae->len = (2*sizeof(u32)) >> 2;
4103 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4104 dmae->comp_addr_hi = 0;
4107 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4108 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4109 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4110 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4112 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4114 DMAE_CMD_ENDIANITY_DW_SWAP |
4116 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4117 (vn << DMAE_CMD_E1HVN_SHIFT));
4118 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4119 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4120 dmae->src_addr_hi = 0;
4121 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4122 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4123 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4124 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4125 dmae->len = (2*sizeof(u32)) >> 2;
4126 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4127 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4128 dmae->comp_val = DMAE_COMP_VAL;
4133 static void bnx2x_func_stats_init(struct bnx2x *bp)
4135 struct dmae_command *dmae = &bp->stats_dmae;
4136 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4139 if (!bp->func_stx) {
4140 BNX2X_ERR("BUG!\n");
4144 bp->executer_idx = 0;
4145 memset(dmae, 0, sizeof(struct dmae_command));
4147 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4148 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4149 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4151 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4153 DMAE_CMD_ENDIANITY_DW_SWAP |
4155 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4156 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4157 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4158 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4159 dmae->dst_addr_lo = bp->func_stx >> 2;
4160 dmae->dst_addr_hi = 0;
4161 dmae->len = sizeof(struct host_func_stats) >> 2;
4162 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4163 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4164 dmae->comp_val = DMAE_COMP_VAL;
4169 static void bnx2x_stats_start(struct bnx2x *bp)
4172 bnx2x_port_stats_init(bp);
4174 else if (bp->func_stx)
4175 bnx2x_func_stats_init(bp);
4177 bnx2x_hw_stats_post(bp);
4178 bnx2x_storm_stats_post(bp);
4181 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4183 bnx2x_stats_comp(bp);
4184 bnx2x_stats_pmf_update(bp);
4185 bnx2x_stats_start(bp);
4188 static void bnx2x_stats_restart(struct bnx2x *bp)
4190 bnx2x_stats_comp(bp);
4191 bnx2x_stats_start(bp);
4194 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4196 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4197 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4198 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4204 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4205 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4206 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4207 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4208 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4209 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4210 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4211 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4212 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4213 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4214 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4215 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4216 UPDATE_STAT64(tx_stat_gt127,
4217 tx_stat_etherstatspkts65octetsto127octets);
4218 UPDATE_STAT64(tx_stat_gt255,
4219 tx_stat_etherstatspkts128octetsto255octets);
4220 UPDATE_STAT64(tx_stat_gt511,
4221 tx_stat_etherstatspkts256octetsto511octets);
4222 UPDATE_STAT64(tx_stat_gt1023,
4223 tx_stat_etherstatspkts512octetsto1023octets);
4224 UPDATE_STAT64(tx_stat_gt1518,
4225 tx_stat_etherstatspkts1024octetsto1522octets);
4226 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4227 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4228 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4229 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4230 UPDATE_STAT64(tx_stat_gterr,
4231 tx_stat_dot3statsinternalmactransmiterrors);
4232 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4234 estats->pause_frames_received_hi =
4235 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4236 estats->pause_frames_received_lo =
4237 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4239 estats->pause_frames_sent_hi =
4240 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4241 estats->pause_frames_sent_lo =
4242 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4245 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4247 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4248 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4249 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4251 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4252 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4253 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4254 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4255 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4256 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4257 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4258 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4259 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4260 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4261 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4262 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4263 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4264 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4265 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4266 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4267 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4268 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4269 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4270 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4271 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4272 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4273 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4274 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4275 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4276 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4277 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4278 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4279 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4280 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4281 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4283 estats->pause_frames_received_hi =
4284 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4285 estats->pause_frames_received_lo =
4286 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4287 ADD_64(estats->pause_frames_received_hi,
4288 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4289 estats->pause_frames_received_lo,
4290 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4292 estats->pause_frames_sent_hi =
4293 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4294 estats->pause_frames_sent_lo =
4295 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4296 ADD_64(estats->pause_frames_sent_hi,
4297 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4298 estats->pause_frames_sent_lo,
4299 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4302 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4304 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4305 struct nig_stats *old = &(bp->port.old_nig_stats);
4306 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4307 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4313 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4314 bnx2x_bmac_stats_update(bp);
4316 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4317 bnx2x_emac_stats_update(bp);
4319 else { /* unreached */
4320 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4324 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4325 new->brb_discard - old->brb_discard);
4326 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4327 new->brb_truncate - old->brb_truncate);
4329 UPDATE_STAT64_NIG(egress_mac_pkt0,
4330 etherstatspkts1024octetsto1522octets);
4331 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4333 memcpy(old, new, sizeof(struct nig_stats));
4335 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4336 sizeof(struct mac_stx));
4337 estats->brb_drop_hi = pstats->brb_drop_hi;
4338 estats->brb_drop_lo = pstats->brb_drop_lo;
4340 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4342 if (!BP_NOMCP(bp)) {
4344 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4345 if (nig_timer_max != estats->nig_timer_max) {
4346 estats->nig_timer_max = nig_timer_max;
4347 BNX2X_ERR("NIG timer max (%u)\n",
4348 estats->nig_timer_max);
4355 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4357 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4358 struct tstorm_per_port_stats *tport =
4359 &stats->tstorm_common.port_statistics;
4360 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4361 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4364 memcpy(&(fstats->total_bytes_received_hi),
4365 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4366 sizeof(struct host_func_stats) - 2*sizeof(u32));
4367 estats->error_bytes_received_hi = 0;
4368 estats->error_bytes_received_lo = 0;
4369 estats->etherstatsoverrsizepkts_hi = 0;
4370 estats->etherstatsoverrsizepkts_lo = 0;
4371 estats->no_buff_discard_hi = 0;
4372 estats->no_buff_discard_lo = 0;
4374 for_each_queue(bp, i) {
4375 struct bnx2x_fastpath *fp = &bp->fp[i];
4376 int cl_id = fp->cl_id;
4377 struct tstorm_per_client_stats *tclient =
4378 &stats->tstorm_common.client_statistics[cl_id];
4379 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4380 struct ustorm_per_client_stats *uclient =
4381 &stats->ustorm_common.client_statistics[cl_id];
4382 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4383 struct xstorm_per_client_stats *xclient =
4384 &stats->xstorm_common.client_statistics[cl_id];
4385 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4386 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4389 /* are storm stats valid? */
4390 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4391 bp->stats_counter) {
4392 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4393 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4394 i, xclient->stats_counter, bp->stats_counter);
4397 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4398 bp->stats_counter) {
4399 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4400 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4401 i, tclient->stats_counter, bp->stats_counter);
4404 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4405 bp->stats_counter) {
4406 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4407 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4408 i, uclient->stats_counter, bp->stats_counter);
4412 qstats->total_bytes_received_hi =
4413 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4414 qstats->total_bytes_received_lo =
4415 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4417 ADD_64(qstats->total_bytes_received_hi,
4418 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4419 qstats->total_bytes_received_lo,
4420 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4422 ADD_64(qstats->total_bytes_received_hi,
4423 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4424 qstats->total_bytes_received_lo,
4425 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4427 SUB_64(qstats->total_bytes_received_hi,
4428 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4429 qstats->total_bytes_received_lo,
4430 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4432 SUB_64(qstats->total_bytes_received_hi,
4433 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4434 qstats->total_bytes_received_lo,
4435 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4437 SUB_64(qstats->total_bytes_received_hi,
4438 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4439 qstats->total_bytes_received_lo,
4440 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4442 qstats->valid_bytes_received_hi =
4443 qstats->total_bytes_received_hi;
4444 qstats->valid_bytes_received_lo =
4445 qstats->total_bytes_received_lo;
4447 qstats->error_bytes_received_hi =
4448 le32_to_cpu(tclient->rcv_error_bytes.hi);
4449 qstats->error_bytes_received_lo =
4450 le32_to_cpu(tclient->rcv_error_bytes.lo);
4452 ADD_64(qstats->total_bytes_received_hi,
4453 qstats->error_bytes_received_hi,
4454 qstats->total_bytes_received_lo,
4455 qstats->error_bytes_received_lo);
4457 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4458 total_unicast_packets_received);
4459 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4460 total_multicast_packets_received);
4461 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4462 total_broadcast_packets_received);
4463 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4464 etherstatsoverrsizepkts);
4465 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4467 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4468 total_unicast_packets_received);
4469 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4470 total_multicast_packets_received);
4471 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4472 total_broadcast_packets_received);
4473 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4474 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4475 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4477 qstats->total_bytes_transmitted_hi =
4478 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4479 qstats->total_bytes_transmitted_lo =
4480 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4482 ADD_64(qstats->total_bytes_transmitted_hi,
4483 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4484 qstats->total_bytes_transmitted_lo,
4485 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4487 ADD_64(qstats->total_bytes_transmitted_hi,
4488 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4489 qstats->total_bytes_transmitted_lo,
4490 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4492 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4493 total_unicast_packets_transmitted);
4494 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4495 total_multicast_packets_transmitted);
4496 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4497 total_broadcast_packets_transmitted);
4499 old_tclient->checksum_discard = tclient->checksum_discard;
4500 old_tclient->ttl0_discard = tclient->ttl0_discard;
4502 ADD_64(fstats->total_bytes_received_hi,
4503 qstats->total_bytes_received_hi,
4504 fstats->total_bytes_received_lo,
4505 qstats->total_bytes_received_lo);
4506 ADD_64(fstats->total_bytes_transmitted_hi,
4507 qstats->total_bytes_transmitted_hi,
4508 fstats->total_bytes_transmitted_lo,
4509 qstats->total_bytes_transmitted_lo);
4510 ADD_64(fstats->total_unicast_packets_received_hi,
4511 qstats->total_unicast_packets_received_hi,
4512 fstats->total_unicast_packets_received_lo,
4513 qstats->total_unicast_packets_received_lo);
4514 ADD_64(fstats->total_multicast_packets_received_hi,
4515 qstats->total_multicast_packets_received_hi,
4516 fstats->total_multicast_packets_received_lo,
4517 qstats->total_multicast_packets_received_lo);
4518 ADD_64(fstats->total_broadcast_packets_received_hi,
4519 qstats->total_broadcast_packets_received_hi,
4520 fstats->total_broadcast_packets_received_lo,
4521 qstats->total_broadcast_packets_received_lo);
4522 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4523 qstats->total_unicast_packets_transmitted_hi,
4524 fstats->total_unicast_packets_transmitted_lo,
4525 qstats->total_unicast_packets_transmitted_lo);
4526 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4527 qstats->total_multicast_packets_transmitted_hi,
4528 fstats->total_multicast_packets_transmitted_lo,
4529 qstats->total_multicast_packets_transmitted_lo);
4530 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4531 qstats->total_broadcast_packets_transmitted_hi,
4532 fstats->total_broadcast_packets_transmitted_lo,
4533 qstats->total_broadcast_packets_transmitted_lo);
4534 ADD_64(fstats->valid_bytes_received_hi,
4535 qstats->valid_bytes_received_hi,
4536 fstats->valid_bytes_received_lo,
4537 qstats->valid_bytes_received_lo);
4539 ADD_64(estats->error_bytes_received_hi,
4540 qstats->error_bytes_received_hi,
4541 estats->error_bytes_received_lo,
4542 qstats->error_bytes_received_lo);
4543 ADD_64(estats->etherstatsoverrsizepkts_hi,
4544 qstats->etherstatsoverrsizepkts_hi,
4545 estats->etherstatsoverrsizepkts_lo,
4546 qstats->etherstatsoverrsizepkts_lo);
4547 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4548 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4551 ADD_64(fstats->total_bytes_received_hi,
4552 estats->rx_stat_ifhcinbadoctets_hi,
4553 fstats->total_bytes_received_lo,
4554 estats->rx_stat_ifhcinbadoctets_lo);
4556 memcpy(estats, &(fstats->total_bytes_received_hi),
4557 sizeof(struct host_func_stats) - 2*sizeof(u32));
4559 ADD_64(estats->etherstatsoverrsizepkts_hi,
4560 estats->rx_stat_dot3statsframestoolong_hi,
4561 estats->etherstatsoverrsizepkts_lo,
4562 estats->rx_stat_dot3statsframestoolong_lo);
4563 ADD_64(estats->error_bytes_received_hi,
4564 estats->rx_stat_ifhcinbadoctets_hi,
4565 estats->error_bytes_received_lo,
4566 estats->rx_stat_ifhcinbadoctets_lo);
4569 estats->mac_filter_discard =
4570 le32_to_cpu(tport->mac_filter_discard);
4571 estats->xxoverflow_discard =
4572 le32_to_cpu(tport->xxoverflow_discard);
4573 estats->brb_truncate_discard =
4574 le32_to_cpu(tport->brb_truncate_discard);
4575 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4578 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4580 bp->stats_pending = 0;
4585 static void bnx2x_net_stats_update(struct bnx2x *bp)
4587 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4588 struct net_device_stats *nstats = &bp->dev->stats;
4591 nstats->rx_packets =
4592 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4593 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4594 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4596 nstats->tx_packets =
4597 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4598 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4599 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4601 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4603 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4605 nstats->rx_dropped = estats->mac_discard;
4606 for_each_queue(bp, i)
4607 nstats->rx_dropped +=
4608 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4610 nstats->tx_dropped = 0;
4613 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4615 nstats->collisions =
4616 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4618 nstats->rx_length_errors =
4619 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4620 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4621 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4622 bnx2x_hilo(&estats->brb_truncate_hi);
4623 nstats->rx_crc_errors =
4624 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4625 nstats->rx_frame_errors =
4626 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4627 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4628 nstats->rx_missed_errors = estats->xxoverflow_discard;
4630 nstats->rx_errors = nstats->rx_length_errors +
4631 nstats->rx_over_errors +
4632 nstats->rx_crc_errors +
4633 nstats->rx_frame_errors +
4634 nstats->rx_fifo_errors +
4635 nstats->rx_missed_errors;
4637 nstats->tx_aborted_errors =
4638 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4639 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4640 nstats->tx_carrier_errors =
4641 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4642 nstats->tx_fifo_errors = 0;
4643 nstats->tx_heartbeat_errors = 0;
4644 nstats->tx_window_errors = 0;
4646 nstats->tx_errors = nstats->tx_aborted_errors +
4647 nstats->tx_carrier_errors +
4648 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4651 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4653 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4656 estats->driver_xoff = 0;
4657 estats->rx_err_discard_pkt = 0;
4658 estats->rx_skb_alloc_failed = 0;
4659 estats->hw_csum_err = 0;
4660 for_each_queue(bp, i) {
4661 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4663 estats->driver_xoff += qstats->driver_xoff;
4664 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4665 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4666 estats->hw_csum_err += qstats->hw_csum_err;
4670 static void bnx2x_stats_update(struct bnx2x *bp)
4672 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4674 if (*stats_comp != DMAE_COMP_VAL)
4678 bnx2x_hw_stats_update(bp);
4680 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4681 BNX2X_ERR("storm stats were not updated for 3 times\n");
4686 bnx2x_net_stats_update(bp);
4687 bnx2x_drv_stats_update(bp);
4689 if (netif_msg_timer(bp)) {
4690 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4693 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4695 estats->brb_drop_lo, estats->brb_truncate_lo);
4697 for_each_queue(bp, i) {
4698 struct bnx2x_fastpath *fp = &bp->fp[i];
4699 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4701 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4702 " rx pkt(%lu) rx calls(%lu %lu)\n",
4703 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4705 le16_to_cpu(*fp->rx_cons_sb),
4706 bnx2x_hilo(&qstats->
4707 total_unicast_packets_received_hi),
4708 fp->rx_calls, fp->rx_pkt);
4711 for_each_queue(bp, i) {
4712 struct bnx2x_fastpath *fp = &bp->fp[i];
4713 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4714 struct netdev_queue *txq =
4715 netdev_get_tx_queue(bp->dev, i);
4717 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4718 " tx pkt(%lu) tx calls (%lu)"
4719 " %s (Xoff events %u)\n",
4720 fp->name, bnx2x_tx_avail(fp),
4721 le16_to_cpu(*fp->tx_cons_sb),
4722 bnx2x_hilo(&qstats->
4723 total_unicast_packets_transmitted_hi),
4725 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4726 qstats->driver_xoff);
4730 bnx2x_hw_stats_post(bp);
4731 bnx2x_storm_stats_post(bp);
4734 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4736 struct dmae_command *dmae;
4738 int loader_idx = PMF_DMAE_C(bp);
4739 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4741 bp->executer_idx = 0;
4743 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4745 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4747 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4749 DMAE_CMD_ENDIANITY_DW_SWAP |
4751 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4752 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4754 if (bp->port.port_stx) {
4756 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4758 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4760 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4761 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4762 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4763 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4764 dmae->dst_addr_hi = 0;
4765 dmae->len = sizeof(struct host_port_stats) >> 2;
4767 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4768 dmae->comp_addr_hi = 0;
4771 dmae->comp_addr_lo =
4772 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4773 dmae->comp_addr_hi =
4774 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4775 dmae->comp_val = DMAE_COMP_VAL;
4783 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4784 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4785 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4786 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4787 dmae->dst_addr_lo = bp->func_stx >> 2;
4788 dmae->dst_addr_hi = 0;
4789 dmae->len = sizeof(struct host_func_stats) >> 2;
4790 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4791 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4792 dmae->comp_val = DMAE_COMP_VAL;
4798 static void bnx2x_stats_stop(struct bnx2x *bp)
4802 bnx2x_stats_comp(bp);
4805 update = (bnx2x_hw_stats_update(bp) == 0);
4807 update |= (bnx2x_storm_stats_update(bp) == 0);
4810 bnx2x_net_stats_update(bp);
4813 bnx2x_port_stats_stop(bp);
4815 bnx2x_hw_stats_post(bp);
4816 bnx2x_stats_comp(bp);
4820 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4824 static const struct {
4825 void (*action)(struct bnx2x *bp);
4826 enum bnx2x_stats_state next_state;
4827 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4830 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4831 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4832 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4833 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4836 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4837 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4838 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4839 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4843 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4845 enum bnx2x_stats_state state = bp->stats_state;
4847 if (unlikely(bp->panic))
4850 bnx2x_stats_stm[state][event].action(bp);
4851 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4853 /* Make sure the state has been "changed" */
4856 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4857 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4858 state, event, bp->stats_state);
4861 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4863 struct dmae_command *dmae;
4864 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4867 if (!bp->port.pmf || !bp->port.port_stx) {
4868 BNX2X_ERR("BUG!\n");
4872 bp->executer_idx = 0;
4874 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4875 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4876 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4877 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4879 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4881 DMAE_CMD_ENDIANITY_DW_SWAP |
4883 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4884 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4885 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4886 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4887 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4888 dmae->dst_addr_hi = 0;
4889 dmae->len = sizeof(struct host_port_stats) >> 2;
4890 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4891 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4892 dmae->comp_val = DMAE_COMP_VAL;
4895 bnx2x_hw_stats_post(bp);
4896 bnx2x_stats_comp(bp);
4899 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4901 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4902 int port = BP_PORT(bp);
4907 if (!bp->port.pmf || !bp->func_stx) {
4908 BNX2X_ERR("BUG!\n");
4912 /* save our func_stx */
4913 func_stx = bp->func_stx;
4915 for (vn = VN_0; vn < vn_max; vn++) {
4918 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4919 bnx2x_func_stats_init(bp);
4920 bnx2x_hw_stats_post(bp);
4921 bnx2x_stats_comp(bp);
4924 /* restore our func_stx */
4925 bp->func_stx = func_stx;
4928 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4930 struct dmae_command *dmae = &bp->stats_dmae;
4931 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4934 if (!bp->func_stx) {
4935 BNX2X_ERR("BUG!\n");
4939 bp->executer_idx = 0;
4940 memset(dmae, 0, sizeof(struct dmae_command));
4942 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4943 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4944 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4946 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4948 DMAE_CMD_ENDIANITY_DW_SWAP |
4950 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4951 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4952 dmae->src_addr_lo = bp->func_stx >> 2;
4953 dmae->src_addr_hi = 0;
4954 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4955 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4956 dmae->len = sizeof(struct host_func_stats) >> 2;
4957 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4958 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4959 dmae->comp_val = DMAE_COMP_VAL;
4962 bnx2x_hw_stats_post(bp);
4963 bnx2x_stats_comp(bp);
4966 static void bnx2x_stats_init(struct bnx2x *bp)
4968 int port = BP_PORT(bp);
4969 int func = BP_FUNC(bp);
4972 bp->stats_pending = 0;
4973 bp->executer_idx = 0;
4974 bp->stats_counter = 0;
4976 /* port and func stats for management */
4977 if (!BP_NOMCP(bp)) {
4978 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4979 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4982 bp->port.port_stx = 0;
4985 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4986 bp->port.port_stx, bp->func_stx);
4989 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4990 bp->port.old_nig_stats.brb_discard =
4991 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4992 bp->port.old_nig_stats.brb_truncate =
4993 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4994 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4995 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4996 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4997 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4999 /* function stats */
5000 for_each_queue(bp, i) {
5001 struct bnx2x_fastpath *fp = &bp->fp[i];
5003 memset(&fp->old_tclient, 0,
5004 sizeof(struct tstorm_per_client_stats));
5005 memset(&fp->old_uclient, 0,
5006 sizeof(struct ustorm_per_client_stats));
5007 memset(&fp->old_xclient, 0,
5008 sizeof(struct xstorm_per_client_stats));
5009 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5012 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5013 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5015 bp->stats_state = STATS_STATE_DISABLED;
5018 if (bp->port.port_stx)
5019 bnx2x_port_stats_base_init(bp);
5022 bnx2x_func_stats_base_init(bp);
5024 } else if (bp->func_stx)
5025 bnx2x_func_stats_base_update(bp);
5028 static void bnx2x_timer(unsigned long data)
5030 struct bnx2x *bp = (struct bnx2x *) data;
5032 if (!netif_running(bp->dev))
5035 if (atomic_read(&bp->intr_sem) != 0)
5039 struct bnx2x_fastpath *fp = &bp->fp[0];
5043 rc = bnx2x_rx_int(fp, 1000);
5046 if (!BP_NOMCP(bp)) {
5047 int func = BP_FUNC(bp);
5051 ++bp->fw_drv_pulse_wr_seq;
5052 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5053 /* TBD - add SYSTEM_TIME */
5054 drv_pulse = bp->fw_drv_pulse_wr_seq;
5055 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5057 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5058 MCP_PULSE_SEQ_MASK);
5059 /* The delta between driver pulse and mcp response
5060 * should be 1 (before mcp response) or 0 (after mcp response)
5062 if ((drv_pulse != mcp_pulse) &&
5063 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5064 /* someone lost a heartbeat... */
5065 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5066 drv_pulse, mcp_pulse);
5070 if (bp->state == BNX2X_STATE_OPEN)
5071 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5074 mod_timer(&bp->timer, jiffies + bp->current_interval);
5077 /* end of Statistics */
5082 * nic init service functions
5085 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5087 int port = BP_PORT(bp);
5090 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5091 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5092 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5093 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5094 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5095 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5098 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5099 dma_addr_t mapping, int sb_id)
5101 int port = BP_PORT(bp);
5102 int func = BP_FUNC(bp);
5107 section = ((u64)mapping) + offsetof(struct host_status_block,
5109 sb->u_status_block.status_block_id = sb_id;
5111 REG_WR(bp, BAR_CSTRORM_INTMEM +
5112 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5113 REG_WR(bp, BAR_CSTRORM_INTMEM +
5114 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5116 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5117 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5119 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5120 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5121 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5124 section = ((u64)mapping) + offsetof(struct host_status_block,
5126 sb->c_status_block.status_block_id = sb_id;
5128 REG_WR(bp, BAR_CSTRORM_INTMEM +
5129 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5130 REG_WR(bp, BAR_CSTRORM_INTMEM +
5131 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5133 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5134 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5136 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5137 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5138 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5140 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5143 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5145 int func = BP_FUNC(bp);
5147 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5148 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5149 sizeof(struct tstorm_def_status_block)/4);
5150 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5151 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5152 sizeof(struct cstorm_def_status_block_u)/4);
5153 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5154 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5155 sizeof(struct cstorm_def_status_block_c)/4);
5156 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5157 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5158 sizeof(struct xstorm_def_status_block)/4);
5161 static void bnx2x_init_def_sb(struct bnx2x *bp,
5162 struct host_def_status_block *def_sb,
5163 dma_addr_t mapping, int sb_id)
5165 int port = BP_PORT(bp);
5166 int func = BP_FUNC(bp);
5167 int index, val, reg_offset;
5171 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5172 atten_status_block);
5173 def_sb->atten_status_block.status_block_id = sb_id;
5177 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5178 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5180 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5181 bp->attn_group[index].sig[0] = REG_RD(bp,
5182 reg_offset + 0x10*index);
5183 bp->attn_group[index].sig[1] = REG_RD(bp,
5184 reg_offset + 0x4 + 0x10*index);
5185 bp->attn_group[index].sig[2] = REG_RD(bp,
5186 reg_offset + 0x8 + 0x10*index);
5187 bp->attn_group[index].sig[3] = REG_RD(bp,
5188 reg_offset + 0xc + 0x10*index);
5191 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5192 HC_REG_ATTN_MSG0_ADDR_L);
5194 REG_WR(bp, reg_offset, U64_LO(section));
5195 REG_WR(bp, reg_offset + 4, U64_HI(section));
5197 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5199 val = REG_RD(bp, reg_offset);
5201 REG_WR(bp, reg_offset, val);
5204 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5205 u_def_status_block);
5206 def_sb->u_def_status_block.status_block_id = sb_id;
5208 REG_WR(bp, BAR_CSTRORM_INTMEM +
5209 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5210 REG_WR(bp, BAR_CSTRORM_INTMEM +
5211 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5213 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5214 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5216 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5217 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5218 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5221 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5222 c_def_status_block);
5223 def_sb->c_def_status_block.status_block_id = sb_id;
5225 REG_WR(bp, BAR_CSTRORM_INTMEM +
5226 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5227 REG_WR(bp, BAR_CSTRORM_INTMEM +
5228 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5230 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5231 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5233 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5234 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5235 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5238 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5239 t_def_status_block);
5240 def_sb->t_def_status_block.status_block_id = sb_id;
5242 REG_WR(bp, BAR_TSTRORM_INTMEM +
5243 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5244 REG_WR(bp, BAR_TSTRORM_INTMEM +
5245 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5247 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5248 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5250 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5251 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5252 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5255 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5256 x_def_status_block);
5257 def_sb->x_def_status_block.status_block_id = sb_id;
5259 REG_WR(bp, BAR_XSTRORM_INTMEM +
5260 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5261 REG_WR(bp, BAR_XSTRORM_INTMEM +
5262 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5264 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5265 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5267 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5268 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5269 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5271 bp->stats_pending = 0;
5272 bp->set_mac_pending = 0;
5274 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5277 static void bnx2x_update_coalesce(struct bnx2x *bp)
5279 int port = BP_PORT(bp);
5282 for_each_queue(bp, i) {
5283 int sb_id = bp->fp[i].sb_id;
5285 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5286 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5287 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5288 U_SB_ETH_RX_CQ_INDEX),
5289 bp->rx_ticks/(4 * BNX2X_BTR));
5290 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5291 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5292 U_SB_ETH_RX_CQ_INDEX),
5293 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5295 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5296 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5297 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5298 C_SB_ETH_TX_CQ_INDEX),
5299 bp->tx_ticks/(4 * BNX2X_BTR));
5300 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5301 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5302 C_SB_ETH_TX_CQ_INDEX),
5303 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5307 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5308 struct bnx2x_fastpath *fp, int last)
5312 for (i = 0; i < last; i++) {
5313 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5314 struct sk_buff *skb = rx_buf->skb;
5317 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5321 if (fp->tpa_state[i] == BNX2X_TPA_START)
5322 dma_unmap_single(&bp->pdev->dev,
5323 dma_unmap_addr(rx_buf, mapping),
5324 bp->rx_buf_size, DMA_FROM_DEVICE);
5331 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5333 int func = BP_FUNC(bp);
5334 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5335 ETH_MAX_AGGREGATION_QUEUES_E1H;
5336 u16 ring_prod, cqe_ring_prod;
5339 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5341 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5343 if (bp->flags & TPA_ENABLE_FLAG) {
5345 for_each_queue(bp, j) {
5346 struct bnx2x_fastpath *fp = &bp->fp[j];
5348 for (i = 0; i < max_agg_queues; i++) {
5349 fp->tpa_pool[i].skb =
5350 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5351 if (!fp->tpa_pool[i].skb) {
5352 BNX2X_ERR("Failed to allocate TPA "
5353 "skb pool for queue[%d] - "
5354 "disabling TPA on this "
5356 bnx2x_free_tpa_pool(bp, fp, i);
5357 fp->disable_tpa = 1;
5360 dma_unmap_addr_set((struct sw_rx_bd *)
5361 &bp->fp->tpa_pool[i],
5363 fp->tpa_state[i] = BNX2X_TPA_STOP;
5368 for_each_queue(bp, j) {
5369 struct bnx2x_fastpath *fp = &bp->fp[j];
5372 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5373 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5375 /* "next page" elements initialization */
5377 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5378 struct eth_rx_sge *sge;
5380 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5382 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5383 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5385 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5386 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5389 bnx2x_init_sge_ring_bit_mask(fp);
5392 for (i = 1; i <= NUM_RX_RINGS; i++) {
5393 struct eth_rx_bd *rx_bd;
5395 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5397 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5398 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5400 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5401 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5405 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5406 struct eth_rx_cqe_next_page *nextpg;
5408 nextpg = (struct eth_rx_cqe_next_page *)
5409 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5411 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5412 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5414 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5415 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5418 /* Allocate SGEs and initialize the ring elements */
5419 for (i = 0, ring_prod = 0;
5420 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5422 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5423 BNX2X_ERR("was only able to allocate "
5425 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5426 /* Cleanup already allocated elements */
5427 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5428 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5429 fp->disable_tpa = 1;
5433 ring_prod = NEXT_SGE_IDX(ring_prod);
5435 fp->rx_sge_prod = ring_prod;
5437 /* Allocate BDs and initialize BD ring */
5438 fp->rx_comp_cons = 0;
5439 cqe_ring_prod = ring_prod = 0;
5440 for (i = 0; i < bp->rx_ring_size; i++) {
5441 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5442 BNX2X_ERR("was only able to allocate "
5443 "%d rx skbs on queue[%d]\n", i, j);
5444 fp->eth_q_stats.rx_skb_alloc_failed++;
5447 ring_prod = NEXT_RX_IDX(ring_prod);
5448 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5449 WARN_ON(ring_prod <= i);
5452 fp->rx_bd_prod = ring_prod;
5453 /* must not have more available CQEs than BDs */
5454 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5456 fp->rx_pkt = fp->rx_calls = 0;
5459 * this will generate an interrupt (to the TSTORM)
5460 * must only be done after chip is initialized
5462 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5467 REG_WR(bp, BAR_USTRORM_INTMEM +
5468 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5469 U64_LO(fp->rx_comp_mapping));
5470 REG_WR(bp, BAR_USTRORM_INTMEM +
5471 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5472 U64_HI(fp->rx_comp_mapping));
5476 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5480 for_each_queue(bp, j) {
5481 struct bnx2x_fastpath *fp = &bp->fp[j];
5483 for (i = 1; i <= NUM_TX_RINGS; i++) {
5484 struct eth_tx_next_bd *tx_next_bd =
5485 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5487 tx_next_bd->addr_hi =
5488 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5489 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5490 tx_next_bd->addr_lo =
5491 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5492 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5495 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5496 fp->tx_db.data.zero_fill1 = 0;
5497 fp->tx_db.data.prod = 0;
5499 fp->tx_pkt_prod = 0;
5500 fp->tx_pkt_cons = 0;
5503 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5508 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5510 int func = BP_FUNC(bp);
5512 spin_lock_init(&bp->spq_lock);
5514 bp->spq_left = MAX_SPQ_PENDING;
5515 bp->spq_prod_idx = 0;
5516 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5517 bp->spq_prod_bd = bp->spq;
5518 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5520 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5521 U64_LO(bp->spq_mapping));
5523 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5524 U64_HI(bp->spq_mapping));
5526 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5530 static void bnx2x_init_context(struct bnx2x *bp)
5535 for_each_queue(bp, i) {
5536 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5537 struct bnx2x_fastpath *fp = &bp->fp[i];
5538 u8 cl_id = fp->cl_id;
5540 context->ustorm_st_context.common.sb_index_numbers =
5541 BNX2X_RX_SB_INDEX_NUM;
5542 context->ustorm_st_context.common.clientId = cl_id;
5543 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5544 context->ustorm_st_context.common.flags =
5545 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5546 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5547 context->ustorm_st_context.common.statistics_counter_id =
5549 context->ustorm_st_context.common.mc_alignment_log_size =
5550 BNX2X_RX_ALIGN_SHIFT;
5551 context->ustorm_st_context.common.bd_buff_size =
5553 context->ustorm_st_context.common.bd_page_base_hi =
5554 U64_HI(fp->rx_desc_mapping);
5555 context->ustorm_st_context.common.bd_page_base_lo =
5556 U64_LO(fp->rx_desc_mapping);
5557 if (!fp->disable_tpa) {
5558 context->ustorm_st_context.common.flags |=
5559 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5560 context->ustorm_st_context.common.sge_buff_size =
5561 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5563 context->ustorm_st_context.common.sge_page_base_hi =
5564 U64_HI(fp->rx_sge_mapping);
5565 context->ustorm_st_context.common.sge_page_base_lo =
5566 U64_LO(fp->rx_sge_mapping);
5568 context->ustorm_st_context.common.max_sges_for_packet =
5569 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5570 context->ustorm_st_context.common.max_sges_for_packet =
5571 ((context->ustorm_st_context.common.
5572 max_sges_for_packet + PAGES_PER_SGE - 1) &
5573 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5576 context->ustorm_ag_context.cdu_usage =
5577 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5578 CDU_REGION_NUMBER_UCM_AG,
5579 ETH_CONNECTION_TYPE);
5581 context->xstorm_ag_context.cdu_reserved =
5582 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5583 CDU_REGION_NUMBER_XCM_AG,
5584 ETH_CONNECTION_TYPE);
5588 for_each_queue(bp, i) {
5589 struct bnx2x_fastpath *fp = &bp->fp[i];
5590 struct eth_context *context =
5591 bnx2x_sp(bp, context[i].eth);
5593 context->cstorm_st_context.sb_index_number =
5594 C_SB_ETH_TX_CQ_INDEX;
5595 context->cstorm_st_context.status_block_id = fp->sb_id;
5597 context->xstorm_st_context.tx_bd_page_base_hi =
5598 U64_HI(fp->tx_desc_mapping);
5599 context->xstorm_st_context.tx_bd_page_base_lo =
5600 U64_LO(fp->tx_desc_mapping);
5601 context->xstorm_st_context.statistics_data = (fp->cl_id |
5602 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5606 static void bnx2x_init_ind_table(struct bnx2x *bp)
5608 int func = BP_FUNC(bp);
5611 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5615 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5616 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5617 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5618 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5619 bp->fp->cl_id + (i % bp->num_queues));
5622 static void bnx2x_set_client_config(struct bnx2x *bp)
5624 struct tstorm_eth_client_config tstorm_client = {0};
5625 int port = BP_PORT(bp);
5628 tstorm_client.mtu = bp->dev->mtu;
5629 tstorm_client.config_flags =
5630 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5631 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5633 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5634 tstorm_client.config_flags |=
5635 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5636 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5640 for_each_queue(bp, i) {
5641 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5643 REG_WR(bp, BAR_TSTRORM_INTMEM +
5644 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5645 ((u32 *)&tstorm_client)[0]);
5646 REG_WR(bp, BAR_TSTRORM_INTMEM +
5647 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5648 ((u32 *)&tstorm_client)[1]);
5651 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5652 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5655 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5657 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5658 int mode = bp->rx_mode;
5659 int mask = bp->rx_mode_cl_mask;
5660 int func = BP_FUNC(bp);
5661 int port = BP_PORT(bp);
5663 /* All but management unicast packets should pass to the host as well */
5665 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5666 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5667 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5668 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5670 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5673 case BNX2X_RX_MODE_NONE: /* no Rx */
5674 tstorm_mac_filter.ucast_drop_all = mask;
5675 tstorm_mac_filter.mcast_drop_all = mask;
5676 tstorm_mac_filter.bcast_drop_all = mask;
5679 case BNX2X_RX_MODE_NORMAL:
5680 tstorm_mac_filter.bcast_accept_all = mask;
5683 case BNX2X_RX_MODE_ALLMULTI:
5684 tstorm_mac_filter.mcast_accept_all = mask;
5685 tstorm_mac_filter.bcast_accept_all = mask;
5688 case BNX2X_RX_MODE_PROMISC:
5689 tstorm_mac_filter.ucast_accept_all = mask;
5690 tstorm_mac_filter.mcast_accept_all = mask;
5691 tstorm_mac_filter.bcast_accept_all = mask;
5692 /* pass management unicast packets as well */
5693 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5697 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5702 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5705 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5706 REG_WR(bp, BAR_TSTRORM_INTMEM +
5707 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5708 ((u32 *)&tstorm_mac_filter)[i]);
5710 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5711 ((u32 *)&tstorm_mac_filter)[i]); */
5714 if (mode != BNX2X_RX_MODE_NONE)
5715 bnx2x_set_client_config(bp);
5718 static void bnx2x_init_internal_common(struct bnx2x *bp)
5722 /* Zero this manually as its initialization is
5723 currently missing in the initTool */
5724 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5725 REG_WR(bp, BAR_USTRORM_INTMEM +
5726 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5729 static void bnx2x_init_internal_port(struct bnx2x *bp)
5731 int port = BP_PORT(bp);
5734 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5736 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5737 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5738 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5741 static void bnx2x_init_internal_func(struct bnx2x *bp)
5743 struct tstorm_eth_function_common_config tstorm_config = {0};
5744 struct stats_indication_flags stats_flags = {0};
5745 int port = BP_PORT(bp);
5746 int func = BP_FUNC(bp);
5752 tstorm_config.config_flags = MULTI_FLAGS(bp);
5753 tstorm_config.rss_result_mask = MULTI_MASK;
5756 /* Enable TPA if needed */
5757 if (bp->flags & TPA_ENABLE_FLAG)
5758 tstorm_config.config_flags |=
5759 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5762 tstorm_config.config_flags |=
5763 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5765 tstorm_config.leading_client_id = BP_L_ID(bp);
5767 REG_WR(bp, BAR_TSTRORM_INTMEM +
5768 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5769 (*(u32 *)&tstorm_config));
5771 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5772 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5773 bnx2x_set_storm_rx_mode(bp);
5775 for_each_queue(bp, i) {
5776 u8 cl_id = bp->fp[i].cl_id;
5778 /* reset xstorm per client statistics */
5779 offset = BAR_XSTRORM_INTMEM +
5780 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5782 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5783 REG_WR(bp, offset + j*4, 0);
5785 /* reset tstorm per client statistics */
5786 offset = BAR_TSTRORM_INTMEM +
5787 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5789 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5790 REG_WR(bp, offset + j*4, 0);
5792 /* reset ustorm per client statistics */
5793 offset = BAR_USTRORM_INTMEM +
5794 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5796 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5797 REG_WR(bp, offset + j*4, 0);
5800 /* Init statistics related context */
5801 stats_flags.collect_eth = 1;
5803 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5804 ((u32 *)&stats_flags)[0]);
5805 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5806 ((u32 *)&stats_flags)[1]);
5808 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5809 ((u32 *)&stats_flags)[0]);
5810 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5811 ((u32 *)&stats_flags)[1]);
5813 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5814 ((u32 *)&stats_flags)[0]);
5815 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5816 ((u32 *)&stats_flags)[1]);
5818 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5819 ((u32 *)&stats_flags)[0]);
5820 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5821 ((u32 *)&stats_flags)[1]);
5823 REG_WR(bp, BAR_XSTRORM_INTMEM +
5824 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5825 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5826 REG_WR(bp, BAR_XSTRORM_INTMEM +
5827 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5828 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5830 REG_WR(bp, BAR_TSTRORM_INTMEM +
5831 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5832 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5833 REG_WR(bp, BAR_TSTRORM_INTMEM +
5834 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5835 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5837 REG_WR(bp, BAR_USTRORM_INTMEM +
5838 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5839 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5840 REG_WR(bp, BAR_USTRORM_INTMEM +
5841 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5842 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5844 if (CHIP_IS_E1H(bp)) {
5845 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5847 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5849 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5851 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5854 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5858 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5859 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5860 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5861 for_each_queue(bp, i) {
5862 struct bnx2x_fastpath *fp = &bp->fp[i];
5864 REG_WR(bp, BAR_USTRORM_INTMEM +
5865 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5866 U64_LO(fp->rx_comp_mapping));
5867 REG_WR(bp, BAR_USTRORM_INTMEM +
5868 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5869 U64_HI(fp->rx_comp_mapping));
5872 REG_WR(bp, BAR_USTRORM_INTMEM +
5873 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5874 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5875 REG_WR(bp, BAR_USTRORM_INTMEM +
5876 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5877 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5879 REG_WR16(bp, BAR_USTRORM_INTMEM +
5880 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5884 /* dropless flow control */
5885 if (CHIP_IS_E1H(bp)) {
5886 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5888 rx_pause.bd_thr_low = 250;
5889 rx_pause.cqe_thr_low = 250;
5891 rx_pause.sge_thr_low = 0;
5892 rx_pause.bd_thr_high = 350;
5893 rx_pause.cqe_thr_high = 350;
5894 rx_pause.sge_thr_high = 0;
5896 for_each_queue(bp, i) {
5897 struct bnx2x_fastpath *fp = &bp->fp[i];
5899 if (!fp->disable_tpa) {
5900 rx_pause.sge_thr_low = 150;
5901 rx_pause.sge_thr_high = 250;
5905 offset = BAR_USTRORM_INTMEM +
5906 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5909 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5911 REG_WR(bp, offset + j*4,
5912 ((u32 *)&rx_pause)[j]);
5916 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5918 /* Init rate shaping and fairness contexts */
5922 /* During init there is no active link
5923 Until link is up, set link rate to 10Gbps */
5924 bp->link_vars.line_speed = SPEED_10000;
5925 bnx2x_init_port_minmax(bp);
5929 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5930 bnx2x_calc_vn_weight_sum(bp);
5932 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5933 bnx2x_init_vn_minmax(bp, 2*vn + port);
5935 /* Enable rate shaping and fairness */
5936 bp->cmng.flags.cmng_enables |=
5937 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5940 /* rate shaping and fairness are disabled */
5942 "single function mode minmax will be disabled\n");
5946 /* Store cmng structures to internal memory */
5948 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5949 REG_WR(bp, BAR_XSTRORM_INTMEM +
5950 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5951 ((u32 *)(&bp->cmng))[i]);
5954 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5956 switch (load_code) {
5957 case FW_MSG_CODE_DRV_LOAD_COMMON:
5958 bnx2x_init_internal_common(bp);
5961 case FW_MSG_CODE_DRV_LOAD_PORT:
5962 bnx2x_init_internal_port(bp);
5965 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5966 bnx2x_init_internal_func(bp);
5970 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5975 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5979 for_each_queue(bp, i) {
5980 struct bnx2x_fastpath *fp = &bp->fp[i];
5983 fp->state = BNX2X_FP_STATE_CLOSED;
5985 fp->cl_id = BP_L_ID(bp) + i;
5987 fp->sb_id = fp->cl_id + 1;
5989 fp->sb_id = fp->cl_id;
5992 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5993 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5994 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5996 bnx2x_update_fpsb_idx(fp);
5999 /* ensure status block indices were read */
6003 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6005 bnx2x_update_dsb_idx(bp);
6006 bnx2x_update_coalesce(bp);
6007 bnx2x_init_rx_rings(bp);
6008 bnx2x_init_tx_ring(bp);
6009 bnx2x_init_sp_ring(bp);
6010 bnx2x_init_context(bp);
6011 bnx2x_init_internal(bp, load_code);
6012 bnx2x_init_ind_table(bp);
6013 bnx2x_stats_init(bp);
6015 /* At this point, we are ready for interrupts */
6016 atomic_set(&bp->intr_sem, 0);
6018 /* flush all before enabling interrupts */
6022 bnx2x_int_enable(bp);
6024 /* Check for SPIO5 */
6025 bnx2x_attn_int_deasserted0(bp,
6026 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6027 AEU_INPUTS_ATTN_BITS_SPIO5);
6030 /* end of nic init */
6033 * gzip service functions
6036 static int bnx2x_gunzip_init(struct bnx2x *bp)
6038 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6039 &bp->gunzip_mapping, GFP_KERNEL);
6040 if (bp->gunzip_buf == NULL)
6043 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6044 if (bp->strm == NULL)
6047 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6049 if (bp->strm->workspace == NULL)
6059 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6060 bp->gunzip_mapping);
6061 bp->gunzip_buf = NULL;
6064 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6065 " un-compression\n");
6069 static void bnx2x_gunzip_end(struct bnx2x *bp)
6071 kfree(bp->strm->workspace);
6076 if (bp->gunzip_buf) {
6077 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6078 bp->gunzip_mapping);
6079 bp->gunzip_buf = NULL;
6083 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6087 /* check gzip header */
6088 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6089 BNX2X_ERR("Bad gzip header\n");
6097 if (zbuf[3] & FNAME)
6098 while ((zbuf[n++] != 0) && (n < len));
6100 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6101 bp->strm->avail_in = len - n;
6102 bp->strm->next_out = bp->gunzip_buf;
6103 bp->strm->avail_out = FW_BUF_SIZE;
6105 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6109 rc = zlib_inflate(bp->strm, Z_FINISH);
6110 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6111 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6114 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6115 if (bp->gunzip_outlen & 0x3)
6116 netdev_err(bp->dev, "Firmware decompression error:"
6117 " gunzip_outlen (%d) not aligned\n",
6119 bp->gunzip_outlen >>= 2;
6121 zlib_inflateEnd(bp->strm);
6123 if (rc == Z_STREAM_END)
6129 /* nic load/unload */
6132 * General service functions
6135 /* send a NIG loopback debug packet */
6136 static void bnx2x_lb_pckt(struct bnx2x *bp)
6140 /* Ethernet source and destination addresses */
6141 wb_write[0] = 0x55555555;
6142 wb_write[1] = 0x55555555;
6143 wb_write[2] = 0x20; /* SOP */
6144 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6146 /* NON-IP protocol */
6147 wb_write[0] = 0x09000000;
6148 wb_write[1] = 0x55555555;
6149 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
6150 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6153 /* some of the internal memories
6154 * are not directly readable from the driver
6155 * to test them we send debug packets
6157 static int bnx2x_int_mem_test(struct bnx2x *bp)
6163 if (CHIP_REV_IS_FPGA(bp))
6165 else if (CHIP_REV_IS_EMUL(bp))
6170 DP(NETIF_MSG_HW, "start part1\n");
6172 /* Disable inputs of parser neighbor blocks */
6173 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6174 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6175 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6176 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6178 /* Write 0 to parser credits for CFC search request */
6179 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6181 /* send Ethernet packet */
6184 /* TODO do i reset NIG statistic? */
6185 /* Wait until NIG register shows 1 packet of size 0x10 */
6186 count = 1000 * factor;
6189 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6190 val = *bnx2x_sp(bp, wb_data[0]);
6198 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6202 /* Wait until PRS register shows 1 packet */
6203 count = 1000 * factor;
6205 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6213 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6217 /* Reset and init BRB, PRS */
6218 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6220 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6222 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6223 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6225 DP(NETIF_MSG_HW, "part2\n");
6227 /* Disable inputs of parser neighbor blocks */
6228 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6229 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6230 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6231 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6233 /* Write 0 to parser credits for CFC search request */
6234 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6236 /* send 10 Ethernet packets */
6237 for (i = 0; i < 10; i++)
6240 /* Wait until NIG register shows 10 + 1
6241 packets of size 11*0x10 = 0xb0 */
6242 count = 1000 * factor;
6245 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6246 val = *bnx2x_sp(bp, wb_data[0]);
6254 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6258 /* Wait until PRS register shows 2 packets */
6259 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6261 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6263 /* Write 1 to parser credits for CFC search request */
6264 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6266 /* Wait until PRS register shows 3 packets */
6267 msleep(10 * factor);
6268 /* Wait until NIG register shows 1 packet of size 0x10 */
6269 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6271 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6273 /* clear NIG EOP FIFO */
6274 for (i = 0; i < 11; i++)
6275 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6276 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6278 BNX2X_ERR("clear of NIG failed\n");
6282 /* Reset and init BRB, PRS, NIG */
6283 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6285 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6287 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6288 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6291 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6294 /* Enable inputs of parser neighbor blocks */
6295 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6296 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6297 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6298 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6300 DP(NETIF_MSG_HW, "done\n");
6305 static void enable_blocks_attention(struct bnx2x *bp)
6307 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6308 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6309 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6310 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6311 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6312 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6313 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6314 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6315 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6316 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6317 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6318 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6319 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6320 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6321 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6322 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6323 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6324 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6325 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6326 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6327 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6328 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6329 if (CHIP_REV_IS_FPGA(bp))
6330 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6332 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6333 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6334 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6335 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6336 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6337 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6338 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6339 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6340 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6341 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6344 static const struct {
6347 } bnx2x_parity_mask[] = {
6348 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6349 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6350 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6351 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6352 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6353 {QM_REG_QM_PRTY_MASK, 0x0},
6354 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6355 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6356 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6357 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6358 {CDU_REG_CDU_PRTY_MASK, 0x0},
6359 {CFC_REG_CFC_PRTY_MASK, 0x0},
6360 {DBG_REG_DBG_PRTY_MASK, 0x0},
6361 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6362 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6363 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6364 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6365 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6366 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6367 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6368 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6369 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6370 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6371 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6372 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6373 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6374 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6375 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6378 static void enable_blocks_parity(struct bnx2x *bp)
6380 int i, mask_arr_len =
6381 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6383 for (i = 0; i < mask_arr_len; i++)
6384 REG_WR(bp, bnx2x_parity_mask[i].addr,
6385 bnx2x_parity_mask[i].mask);
6389 static void bnx2x_reset_common(struct bnx2x *bp)
6392 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6394 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6397 static void bnx2x_init_pxp(struct bnx2x *bp)
6400 int r_order, w_order;
6402 pci_read_config_word(bp->pdev,
6403 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6404 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6405 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6407 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6409 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6413 bnx2x_init_pxp_arb(bp, r_order, w_order);
6416 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6426 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6427 SHARED_HW_CFG_FAN_FAILURE_MASK;
6429 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6433 * The fan failure mechanism is usually related to the PHY type since
6434 * the power consumption of the board is affected by the PHY. Currently,
6435 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6437 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6438 for (port = PORT_0; port < PORT_MAX; port++) {
6440 SHMEM_RD(bp, dev_info.port_hw_config[port].
6441 external_phy_config) &
6442 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6445 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6447 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6449 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6452 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6454 if (is_required == 0)
6457 /* Fan failure is indicated by SPIO 5 */
6458 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6459 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6461 /* set to active low mode */
6462 val = REG_RD(bp, MISC_REG_SPIO_INT);
6463 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6464 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6465 REG_WR(bp, MISC_REG_SPIO_INT, val);
6467 /* enable interrupt to signal the IGU */
6468 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6469 val |= (1 << MISC_REGISTERS_SPIO_5);
6470 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6473 static int bnx2x_init_common(struct bnx2x *bp)
6480 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6482 bnx2x_reset_common(bp);
6483 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6484 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6486 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6487 if (CHIP_IS_E1H(bp))
6488 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6490 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6492 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6494 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6495 if (CHIP_IS_E1(bp)) {
6496 /* enable HW interrupt from PXP on USDM overflow
6497 bit 16 on INT_MASK_0 */
6498 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6501 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6505 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6506 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6507 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6508 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6509 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6510 /* make sure this value is 0 */
6511 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6513 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6514 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6515 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6516 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6517 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6520 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6522 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6523 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6524 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6527 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6528 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6530 /* let the HW do it's magic ... */
6532 /* finish PXP init */
6533 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6535 BNX2X_ERR("PXP2 CFG failed\n");
6538 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6540 BNX2X_ERR("PXP2 RD_INIT failed\n");
6544 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6545 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6547 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6549 /* clean the DMAE memory */
6551 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6553 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6554 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6555 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6556 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6558 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6559 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6560 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6561 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6563 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6568 for (i = 0; i < 64; i++) {
6569 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6570 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6572 if (CHIP_IS_E1H(bp)) {
6573 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6574 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6579 /* soft reset pulse */
6580 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6581 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6584 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6587 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6588 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6589 if (!CHIP_REV_IS_SLOW(bp)) {
6590 /* enable hw interrupt from doorbell Q */
6591 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6594 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6595 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6596 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6599 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6601 if (CHIP_IS_E1H(bp))
6602 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6604 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6605 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6606 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6607 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6609 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6610 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6611 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6612 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6614 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6615 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6616 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6617 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6620 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6622 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6625 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6626 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6627 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6629 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6630 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6631 REG_WR(bp, i, 0xc0cac01a);
6632 /* TODO: replace with something meaningful */
6634 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6636 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6637 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6638 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6639 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6640 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6641 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6642 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6643 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6644 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6645 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6647 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6649 if (sizeof(union cdu_context) != 1024)
6650 /* we currently assume that a context is 1024 bytes */
6651 dev_alert(&bp->pdev->dev, "please adjust the size "
6652 "of cdu_context(%ld)\n",
6653 (long)sizeof(union cdu_context));
6655 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6656 val = (4 << 24) + (0 << 12) + 1024;
6657 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6659 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6660 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6661 /* enable context validation interrupt from CFC */
6662 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6664 /* set the thresholds to prevent CFC/CDU race */
6665 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6667 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6668 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6670 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6671 /* Reset PCIE errors for debug */
6672 REG_WR(bp, 0x2814, 0xffffffff);
6673 REG_WR(bp, 0x3820, 0xffffffff);
6675 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6676 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6677 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6678 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6680 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6681 if (CHIP_IS_E1H(bp)) {
6682 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6683 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6686 if (CHIP_REV_IS_SLOW(bp))
6689 /* finish CFC init */
6690 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6692 BNX2X_ERR("CFC LL_INIT failed\n");
6695 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6697 BNX2X_ERR("CFC AC_INIT failed\n");
6700 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6702 BNX2X_ERR("CFC CAM_INIT failed\n");
6705 REG_WR(bp, CFC_REG_DEBUG0, 0);
6707 /* read NIG statistic
6708 to see if this is our first up since powerup */
6709 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6710 val = *bnx2x_sp(bp, wb_data[0]);
6712 /* do internal memory self test */
6713 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6714 BNX2X_ERR("internal mem self test failed\n");
6718 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6723 bp->port.need_hw_lock = 1;
6730 bnx2x_setup_fan_failure_detection(bp);
6732 /* clear PXP2 attentions */
6733 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6735 enable_blocks_attention(bp);
6736 if (CHIP_PARITY_SUPPORTED(bp))
6737 enable_blocks_parity(bp);
6739 if (!BP_NOMCP(bp)) {
6740 bnx2x_acquire_phy_lock(bp);
6741 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6742 bnx2x_release_phy_lock(bp);
6744 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6749 static int bnx2x_init_port(struct bnx2x *bp)
6751 int port = BP_PORT(bp);
6752 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6756 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6758 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6760 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6761 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6763 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6764 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6765 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6766 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6769 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6771 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6772 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6773 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6776 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6778 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6779 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6780 /* no pause for emulation and FPGA */
6785 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6786 else if (bp->dev->mtu > 4096) {
6787 if (bp->flags & ONE_PORT_FLAG)
6791 /* (24*1024 + val*4)/256 */
6792 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6795 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6796 high = low + 56; /* 14*1024/256 */
6798 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6799 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6802 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6804 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6805 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6806 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6807 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6809 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6810 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6811 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6812 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6814 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6815 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6817 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6819 /* configure PBF to work without PAUSE mtu 9000 */
6820 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6822 /* update threshold */
6823 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6824 /* update init credit */
6825 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6828 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6830 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6833 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6835 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6836 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6838 if (CHIP_IS_E1(bp)) {
6839 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6840 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6842 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6844 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6845 /* init aeu_mask_attn_func_0/1:
6846 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6847 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6848 * bits 4-7 are used for "per vn group attention" */
6849 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6850 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6852 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6853 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6854 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6855 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6856 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6858 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6860 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6862 if (CHIP_IS_E1H(bp)) {
6863 /* 0x2 disable e1hov, 0x1 enable */
6864 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6865 (IS_E1HMF(bp) ? 0x1 : 0x2));
6868 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6869 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6870 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6874 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6875 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6877 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6880 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6882 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6883 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6885 /* The GPIO should be swapped if the swap register is
6887 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6888 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6890 /* Select function upon port-swap configuration */
6892 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6893 aeu_gpio_mask = (swap_val && swap_override) ?
6894 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6895 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6897 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6898 aeu_gpio_mask = (swap_val && swap_override) ?
6899 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6900 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6902 val = REG_RD(bp, offset);
6903 /* add GPIO3 to group */
6904 val |= aeu_gpio_mask;
6905 REG_WR(bp, offset, val);
6909 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6910 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6911 /* add SPIO 5 to group 0 */
6913 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6914 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6915 val = REG_RD(bp, reg_addr);
6916 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6917 REG_WR(bp, reg_addr, val);
6925 bnx2x__link_reset(bp);
6930 #define ILT_PER_FUNC (768/2)
6931 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6932 /* the phys address is shifted right 12 bits and has an added
6933 1=valid bit added to the 53rd bit
6934 then since this is a wide register(TM)
6935 we split it into two 32 bit writes
6937 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6938 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6939 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6940 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6943 #define CNIC_ILT_LINES 127
6944 #define CNIC_CTX_PER_ILT 16
6946 #define CNIC_ILT_LINES 0
6949 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6953 if (CHIP_IS_E1H(bp))
6954 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6956 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6958 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6961 static int bnx2x_init_func(struct bnx2x *bp)
6963 int port = BP_PORT(bp);
6964 int func = BP_FUNC(bp);
6968 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6970 /* set MSI reconfigure capability */
6971 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6972 val = REG_RD(bp, addr);
6973 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6974 REG_WR(bp, addr, val);
6976 i = FUNC_ILT_BASE(func);
6978 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6979 if (CHIP_IS_E1H(bp)) {
6980 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6981 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6983 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6984 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6987 i += 1 + CNIC_ILT_LINES;
6988 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6990 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6992 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6993 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6997 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6999 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7001 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7002 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7006 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7008 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7010 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7011 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7014 /* tell the searcher where the T2 table is */
7015 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7017 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7018 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7020 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7021 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7022 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7024 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7027 if (CHIP_IS_E1H(bp)) {
7028 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7029 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7030 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7031 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7032 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7033 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7034 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7035 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7036 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7038 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7039 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7042 /* HC init per function */
7043 if (CHIP_IS_E1H(bp)) {
7044 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7046 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7047 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7049 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7051 /* Reset PCIE errors for debug */
7052 REG_WR(bp, 0x2114, 0xffffffff);
7053 REG_WR(bp, 0x2120, 0xffffffff);
7058 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7062 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7063 BP_FUNC(bp), load_code);
7066 mutex_init(&bp->dmae_mutex);
7067 rc = bnx2x_gunzip_init(bp);
7071 switch (load_code) {
7072 case FW_MSG_CODE_DRV_LOAD_COMMON:
7073 rc = bnx2x_init_common(bp);
7078 case FW_MSG_CODE_DRV_LOAD_PORT:
7080 rc = bnx2x_init_port(bp);
7085 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7087 rc = bnx2x_init_func(bp);
7093 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7097 if (!BP_NOMCP(bp)) {
7098 int func = BP_FUNC(bp);
7100 bp->fw_drv_pulse_wr_seq =
7101 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7102 DRV_PULSE_SEQ_MASK);
7103 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7106 /* this needs to be done before gunzip end */
7107 bnx2x_zero_def_sb(bp);
7108 for_each_queue(bp, i)
7109 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7111 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7115 bnx2x_gunzip_end(bp);
7120 static void bnx2x_free_mem(struct bnx2x *bp)
7123 #define BNX2X_PCI_FREE(x, y, size) \
7126 dma_free_coherent(&bp->pdev->dev, size, x, y); \
7132 #define BNX2X_FREE(x) \
7144 for_each_queue(bp, i) {
7147 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7148 bnx2x_fp(bp, i, status_blk_mapping),
7149 sizeof(struct host_status_block));
7152 for_each_queue(bp, i) {
7154 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7155 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7156 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7157 bnx2x_fp(bp, i, rx_desc_mapping),
7158 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7160 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7161 bnx2x_fp(bp, i, rx_comp_mapping),
7162 sizeof(struct eth_fast_path_rx_cqe) *
7166 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7167 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7168 bnx2x_fp(bp, i, rx_sge_mapping),
7169 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7172 for_each_queue(bp, i) {
7174 /* fastpath tx rings: tx_buf tx_desc */
7175 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7176 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7177 bnx2x_fp(bp, i, tx_desc_mapping),
7178 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7180 /* end of fastpath */
7182 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7183 sizeof(struct host_def_status_block));
7185 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7186 sizeof(struct bnx2x_slowpath));
7189 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7190 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7191 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7192 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7193 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7194 sizeof(struct host_status_block));
7196 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7198 #undef BNX2X_PCI_FREE
7202 static int bnx2x_alloc_mem(struct bnx2x *bp)
7205 #define BNX2X_PCI_ALLOC(x, y, size) \
7207 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7209 goto alloc_mem_err; \
7210 memset(x, 0, size); \
7213 #define BNX2X_ALLOC(x, size) \
7215 x = vmalloc(size); \
7217 goto alloc_mem_err; \
7218 memset(x, 0, size); \
7225 for_each_queue(bp, i) {
7226 bnx2x_fp(bp, i, bp) = bp;
7229 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7230 &bnx2x_fp(bp, i, status_blk_mapping),
7231 sizeof(struct host_status_block));
7234 for_each_queue(bp, i) {
7236 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7237 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7238 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7239 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7240 &bnx2x_fp(bp, i, rx_desc_mapping),
7241 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7243 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7244 &bnx2x_fp(bp, i, rx_comp_mapping),
7245 sizeof(struct eth_fast_path_rx_cqe) *
7249 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7250 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7251 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7252 &bnx2x_fp(bp, i, rx_sge_mapping),
7253 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7256 for_each_queue(bp, i) {
7258 /* fastpath tx rings: tx_buf tx_desc */
7259 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7260 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7261 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7262 &bnx2x_fp(bp, i, tx_desc_mapping),
7263 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7265 /* end of fastpath */
7267 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7268 sizeof(struct host_def_status_block));
7270 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7271 sizeof(struct bnx2x_slowpath));
7274 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7276 /* allocate searcher T2 table
7277 we allocate 1/4 of alloc num for T2
7278 (which is not entered into the ILT) */
7279 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7281 /* Initialize T2 (for 1024 connections) */
7282 for (i = 0; i < 16*1024; i += 64)
7283 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7285 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7286 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7288 /* QM queues (128*MAX_CONN) */
7289 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7291 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7292 sizeof(struct host_status_block));
7295 /* Slow path ring */
7296 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7304 #undef BNX2X_PCI_ALLOC
7308 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7312 for_each_queue(bp, i) {
7313 struct bnx2x_fastpath *fp = &bp->fp[i];
7315 u16 bd_cons = fp->tx_bd_cons;
7316 u16 sw_prod = fp->tx_pkt_prod;
7317 u16 sw_cons = fp->tx_pkt_cons;
7319 while (sw_cons != sw_prod) {
7320 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7326 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7330 for_each_queue(bp, j) {
7331 struct bnx2x_fastpath *fp = &bp->fp[j];
7333 for (i = 0; i < NUM_RX_BD; i++) {
7334 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7335 struct sk_buff *skb = rx_buf->skb;
7340 dma_unmap_single(&bp->pdev->dev,
7341 dma_unmap_addr(rx_buf, mapping),
7342 bp->rx_buf_size, DMA_FROM_DEVICE);
7347 if (!fp->disable_tpa)
7348 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7349 ETH_MAX_AGGREGATION_QUEUES_E1 :
7350 ETH_MAX_AGGREGATION_QUEUES_E1H);
7354 static void bnx2x_free_skbs(struct bnx2x *bp)
7356 bnx2x_free_tx_skbs(bp);
7357 bnx2x_free_rx_skbs(bp);
7360 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7364 free_irq(bp->msix_table[0].vector, bp->dev);
7365 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7366 bp->msix_table[0].vector);
7371 for_each_queue(bp, i) {
7372 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
7373 "state %x\n", i, bp->msix_table[i + offset].vector,
7374 bnx2x_fp(bp, i, state));
7376 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7380 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7382 if (bp->flags & USING_MSIX_FLAG) {
7384 bnx2x_free_msix_irqs(bp);
7385 pci_disable_msix(bp->pdev);
7386 bp->flags &= ~USING_MSIX_FLAG;
7388 } else if (bp->flags & USING_MSI_FLAG) {
7390 free_irq(bp->pdev->irq, bp->dev);
7391 pci_disable_msi(bp->pdev);
7392 bp->flags &= ~USING_MSI_FLAG;
7394 } else if (!disable_only)
7395 free_irq(bp->pdev->irq, bp->dev);
7398 static int bnx2x_enable_msix(struct bnx2x *bp)
7400 int i, rc, offset = 1;
7403 bp->msix_table[0].entry = igu_vec;
7404 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7407 igu_vec = BP_L_ID(bp) + offset;
7408 bp->msix_table[1].entry = igu_vec;
7409 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7412 for_each_queue(bp, i) {
7413 igu_vec = BP_L_ID(bp) + offset + i;
7414 bp->msix_table[i + offset].entry = igu_vec;
7415 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7416 "(fastpath #%u)\n", i + offset, igu_vec, i);
7419 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7420 BNX2X_NUM_QUEUES(bp) + offset);
7423 * reconfigure number of tx/rx queues according to available
7426 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7427 /* vectors available for FP */
7428 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7431 "Trying to use less MSI-X vectors: %d\n", rc);
7433 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7437 "MSI-X is not attainable rc %d\n", rc);
7441 bp->num_queues = min(bp->num_queues, fp_vec);
7443 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7446 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7450 bp->flags |= USING_MSIX_FLAG;
7455 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7457 int i, rc, offset = 1;
7459 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7460 bp->dev->name, bp->dev);
7462 BNX2X_ERR("request sp irq failed\n");
7469 for_each_queue(bp, i) {
7470 struct bnx2x_fastpath *fp = &bp->fp[i];
7471 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7474 rc = request_irq(bp->msix_table[i + offset].vector,
7475 bnx2x_msix_fp_int, 0, fp->name, fp);
7477 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7478 bnx2x_free_msix_irqs(bp);
7482 fp->state = BNX2X_FP_STATE_IRQ;
7485 i = BNX2X_NUM_QUEUES(bp);
7486 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7488 bp->msix_table[0].vector,
7489 0, bp->msix_table[offset].vector,
7490 i - 1, bp->msix_table[offset + i - 1].vector);
7495 static int bnx2x_enable_msi(struct bnx2x *bp)
7499 rc = pci_enable_msi(bp->pdev);
7501 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7504 bp->flags |= USING_MSI_FLAG;
7509 static int bnx2x_req_irq(struct bnx2x *bp)
7511 unsigned long flags;
7514 if (bp->flags & USING_MSI_FLAG)
7517 flags = IRQF_SHARED;
7519 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7520 bp->dev->name, bp->dev);
7522 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7527 static void bnx2x_napi_enable(struct bnx2x *bp)
7531 for_each_queue(bp, i)
7532 napi_enable(&bnx2x_fp(bp, i, napi));
7535 static void bnx2x_napi_disable(struct bnx2x *bp)
7539 for_each_queue(bp, i)
7540 napi_disable(&bnx2x_fp(bp, i, napi));
7543 static void bnx2x_netif_start(struct bnx2x *bp)
7547 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7548 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7551 if (netif_running(bp->dev)) {
7552 bnx2x_napi_enable(bp);
7553 bnx2x_int_enable(bp);
7554 if (bp->state == BNX2X_STATE_OPEN)
7555 netif_tx_wake_all_queues(bp->dev);
7560 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7562 bnx2x_int_disable_sync(bp, disable_hw);
7563 bnx2x_napi_disable(bp);
7564 netif_tx_disable(bp->dev);
7568 * Init service functions
7572 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7574 * @param bp driver descriptor
7575 * @param set set or clear an entry (1 or 0)
7576 * @param mac pointer to a buffer containing a MAC
7577 * @param cl_bit_vec bit vector of clients to register a MAC for
7578 * @param cam_offset offset in a CAM to use
7579 * @param with_bcast set broadcast MAC as well
7581 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7582 u32 cl_bit_vec, u8 cam_offset,
7585 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7586 int port = BP_PORT(bp);
7589 * unicasts 0-31:port0 32-63:port1
7590 * multicast 64-127:port0 128-191:port1
7592 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7593 config->hdr.offset = cam_offset;
7594 config->hdr.client_id = 0xff;
7595 config->hdr.reserved1 = 0;
7598 config->config_table[0].cam_entry.msb_mac_addr =
7599 swab16(*(u16 *)&mac[0]);
7600 config->config_table[0].cam_entry.middle_mac_addr =
7601 swab16(*(u16 *)&mac[2]);
7602 config->config_table[0].cam_entry.lsb_mac_addr =
7603 swab16(*(u16 *)&mac[4]);
7604 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7606 config->config_table[0].target_table_entry.flags = 0;
7608 CAM_INVALIDATE(config->config_table[0]);
7609 config->config_table[0].target_table_entry.clients_bit_vector =
7610 cpu_to_le32(cl_bit_vec);
7611 config->config_table[0].target_table_entry.vlan_id = 0;
7613 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7614 (set ? "setting" : "clearing"),
7615 config->config_table[0].cam_entry.msb_mac_addr,
7616 config->config_table[0].cam_entry.middle_mac_addr,
7617 config->config_table[0].cam_entry.lsb_mac_addr);
7621 config->config_table[1].cam_entry.msb_mac_addr =
7622 cpu_to_le16(0xffff);
7623 config->config_table[1].cam_entry.middle_mac_addr =
7624 cpu_to_le16(0xffff);
7625 config->config_table[1].cam_entry.lsb_mac_addr =
7626 cpu_to_le16(0xffff);
7627 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7629 config->config_table[1].target_table_entry.flags =
7630 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7632 CAM_INVALIDATE(config->config_table[1]);
7633 config->config_table[1].target_table_entry.clients_bit_vector =
7634 cpu_to_le32(cl_bit_vec);
7635 config->config_table[1].target_table_entry.vlan_id = 0;
7638 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7639 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7640 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7644 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7646 * @param bp driver descriptor
7647 * @param set set or clear an entry (1 or 0)
7648 * @param mac pointer to a buffer containing a MAC
7649 * @param cl_bit_vec bit vector of clients to register a MAC for
7650 * @param cam_offset offset in a CAM to use
7652 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7653 u32 cl_bit_vec, u8 cam_offset)
7655 struct mac_configuration_cmd_e1h *config =
7656 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7658 config->hdr.length = 1;
7659 config->hdr.offset = cam_offset;
7660 config->hdr.client_id = 0xff;
7661 config->hdr.reserved1 = 0;
7664 config->config_table[0].msb_mac_addr =
7665 swab16(*(u16 *)&mac[0]);
7666 config->config_table[0].middle_mac_addr =
7667 swab16(*(u16 *)&mac[2]);
7668 config->config_table[0].lsb_mac_addr =
7669 swab16(*(u16 *)&mac[4]);
7670 config->config_table[0].clients_bit_vector =
7671 cpu_to_le32(cl_bit_vec);
7672 config->config_table[0].vlan_id = 0;
7673 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7675 config->config_table[0].flags = BP_PORT(bp);
7677 config->config_table[0].flags =
7678 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7680 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7681 (set ? "setting" : "clearing"),
7682 config->config_table[0].msb_mac_addr,
7683 config->config_table[0].middle_mac_addr,
7684 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7686 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7687 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7688 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7691 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7692 int *state_p, int poll)
7694 /* can take a while if any port is running */
7697 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7698 poll ? "polling" : "waiting", state, idx);
7703 bnx2x_rx_int(bp->fp, 10);
7704 /* if index is different from 0
7705 * the reply for some commands will
7706 * be on the non default queue
7709 bnx2x_rx_int(&bp->fp[idx], 10);
7712 mb(); /* state is changed by bnx2x_sp_event() */
7713 if (*state_p == state) {
7714 #ifdef BNX2X_STOP_ON_ERROR
7715 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7727 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7728 poll ? "polling" : "waiting", state, idx);
7729 #ifdef BNX2X_STOP_ON_ERROR
7736 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7738 bp->set_mac_pending++;
7741 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7742 (1 << bp->fp->cl_id), BP_FUNC(bp));
7744 /* Wait for a completion */
7745 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7748 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7750 bp->set_mac_pending++;
7753 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7754 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7757 /* Wait for a completion */
7758 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7763 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7764 * MAC(s). This function will wait until the ramdord completion
7767 * @param bp driver handle
7768 * @param set set or clear the CAM entry
7770 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7772 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7774 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7776 bp->set_mac_pending++;
7779 /* Send a SET_MAC ramrod */
7781 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7782 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7785 /* CAM allocation for E1H
7786 * unicasts: by func number
7787 * multicast: 20+FUNC*20, 20 each
7789 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7790 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7792 /* Wait for a completion when setting */
7793 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7799 static int bnx2x_setup_leading(struct bnx2x *bp)
7803 /* reset IGU state */
7804 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7807 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7809 /* Wait for completion */
7810 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7815 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7817 struct bnx2x_fastpath *fp = &bp->fp[index];
7819 /* reset IGU state */
7820 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7823 fp->state = BNX2X_FP_STATE_OPENING;
7824 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7827 /* Wait for completion */
7828 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7832 static int bnx2x_poll(struct napi_struct *napi, int budget);
7834 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7837 switch (bp->multi_mode) {
7838 case ETH_RSS_MODE_DISABLED:
7842 case ETH_RSS_MODE_REGULAR:
7844 bp->num_queues = min_t(u32, num_queues,
7845 BNX2X_MAX_QUEUES(bp));
7847 bp->num_queues = min_t(u32, num_online_cpus(),
7848 BNX2X_MAX_QUEUES(bp));
7858 static int bnx2x_set_num_queues(struct bnx2x *bp)
7866 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7869 /* Set number of queues according to bp->multi_mode value */
7870 bnx2x_set_num_queues_msix(bp);
7872 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7875 /* if we can't use MSI-X we only need one fp,
7876 * so try to enable MSI-X with the requested number of fp's
7877 * and fallback to MSI or legacy INTx with one fp
7879 rc = bnx2x_enable_msix(bp);
7881 /* failed to enable MSI-X */
7885 bp->dev->real_num_tx_queues = bp->num_queues;
7890 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7891 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7894 /* must be called with rtnl_lock */
7895 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7900 #ifdef BNX2X_STOP_ON_ERROR
7901 if (unlikely(bp->panic))
7905 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7907 rc = bnx2x_set_num_queues(bp);
7909 if (bnx2x_alloc_mem(bp)) {
7910 bnx2x_free_irq(bp, true);
7914 for_each_queue(bp, i)
7915 bnx2x_fp(bp, i, disable_tpa) =
7916 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7918 for_each_queue(bp, i)
7919 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7922 bnx2x_napi_enable(bp);
7924 if (bp->flags & USING_MSIX_FLAG) {
7925 rc = bnx2x_req_msix_irqs(bp);
7927 bnx2x_free_irq(bp, true);
7931 /* Fall to INTx if failed to enable MSI-X due to lack of
7932 memory (in bnx2x_set_num_queues()) */
7933 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7934 bnx2x_enable_msi(bp);
7936 rc = bnx2x_req_irq(bp);
7938 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7939 bnx2x_free_irq(bp, true);
7942 if (bp->flags & USING_MSI_FLAG) {
7943 bp->dev->irq = bp->pdev->irq;
7944 netdev_info(bp->dev, "using MSI IRQ %d\n",
7949 /* Send LOAD_REQUEST command to MCP
7950 Returns the type of LOAD command:
7951 if it is the first port to be initialized
7952 common blocks should be initialized, otherwise - not
7954 if (!BP_NOMCP(bp)) {
7955 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7957 BNX2X_ERR("MCP response failure, aborting\n");
7961 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7962 rc = -EBUSY; /* other port in diagnostic mode */
7967 int port = BP_PORT(bp);
7969 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7970 load_count[0], load_count[1], load_count[2]);
7972 load_count[1 + port]++;
7973 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7974 load_count[0], load_count[1], load_count[2]);
7975 if (load_count[0] == 1)
7976 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7977 else if (load_count[1 + port] == 1)
7978 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7980 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7983 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7984 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7988 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7991 rc = bnx2x_init_hw(bp, load_code);
7993 BNX2X_ERR("HW init failed, aborting\n");
7994 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7995 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7996 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8000 /* Setup NIC internals and enable interrupts */
8001 bnx2x_nic_init(bp, load_code);
8003 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8004 (bp->common.shmem2_base))
8005 SHMEM2_WR(bp, dcc_support,
8006 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8007 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8009 /* Send LOAD_DONE command to MCP */
8010 if (!BP_NOMCP(bp)) {
8011 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8013 BNX2X_ERR("MCP response failure, aborting\n");
8019 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8021 rc = bnx2x_setup_leading(bp);
8023 BNX2X_ERR("Setup leading failed!\n");
8024 #ifndef BNX2X_STOP_ON_ERROR
8032 if (CHIP_IS_E1H(bp))
8033 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8034 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8035 bp->flags |= MF_FUNC_DIS;
8038 if (bp->state == BNX2X_STATE_OPEN) {
8040 /* Enable Timer scan */
8041 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8043 for_each_nondefault_queue(bp, i) {
8044 rc = bnx2x_setup_multi(bp, i);
8054 bnx2x_set_eth_mac_addr_e1(bp, 1);
8056 bnx2x_set_eth_mac_addr_e1h(bp, 1);
8058 /* Set iSCSI L2 MAC */
8059 mutex_lock(&bp->cnic_mutex);
8060 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8061 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8062 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8063 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8066 mutex_unlock(&bp->cnic_mutex);
8071 bnx2x_initial_phy_init(bp, load_mode);
8073 /* Start fast path */
8074 switch (load_mode) {
8076 if (bp->state == BNX2X_STATE_OPEN) {
8077 /* Tx queue should be only reenabled */
8078 netif_tx_wake_all_queues(bp->dev);
8080 /* Initialize the receive filter. */
8081 bnx2x_set_rx_mode(bp->dev);
8085 netif_tx_start_all_queues(bp->dev);
8086 if (bp->state != BNX2X_STATE_OPEN)
8087 netif_tx_disable(bp->dev);
8088 /* Initialize the receive filter. */
8089 bnx2x_set_rx_mode(bp->dev);
8093 /* Initialize the receive filter. */
8094 bnx2x_set_rx_mode(bp->dev);
8095 bp->state = BNX2X_STATE_DIAG;
8103 bnx2x__link_status_update(bp);
8105 /* start the timer */
8106 mod_timer(&bp->timer, jiffies + bp->current_interval);
8109 bnx2x_setup_cnic_irq_info(bp);
8110 if (bp->state == BNX2X_STATE_OPEN)
8111 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8113 bnx2x_inc_load_cnt(bp);
8119 /* Disable Timer scan */
8120 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8123 bnx2x_int_disable_sync(bp, 1);
8124 if (!BP_NOMCP(bp)) {
8125 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8126 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8129 /* Free SKBs, SGEs, TPA pool and driver internals */
8130 bnx2x_free_skbs(bp);
8131 for_each_queue(bp, i)
8132 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8135 bnx2x_free_irq(bp, false);
8137 bnx2x_napi_disable(bp);
8138 for_each_queue(bp, i)
8139 netif_napi_del(&bnx2x_fp(bp, i, napi));
8145 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8147 struct bnx2x_fastpath *fp = &bp->fp[index];
8150 /* halt the connection */
8151 fp->state = BNX2X_FP_STATE_HALTING;
8152 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8154 /* Wait for completion */
8155 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8157 if (rc) /* timeout */
8160 /* delete cfc entry */
8161 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8163 /* Wait for completion */
8164 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8169 static int bnx2x_stop_leading(struct bnx2x *bp)
8171 __le16 dsb_sp_prod_idx;
8172 /* if the other port is handling traffic,
8173 this can take a lot of time */
8179 /* Send HALT ramrod */
8180 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8181 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8183 /* Wait for completion */
8184 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8185 &(bp->fp[0].state), 1);
8186 if (rc) /* timeout */
8189 dsb_sp_prod_idx = *bp->dsb_sp_prod;
8191 /* Send PORT_DELETE ramrod */
8192 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8194 /* Wait for completion to arrive on default status block
8195 we are going to reset the chip anyway
8196 so there is not much to do if this times out
8198 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8200 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8201 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8202 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8203 #ifdef BNX2X_STOP_ON_ERROR
8211 rmb(); /* Refresh the dsb_sp_prod */
8213 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8214 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8219 static void bnx2x_reset_func(struct bnx2x *bp)
8221 int port = BP_PORT(bp);
8222 int func = BP_FUNC(bp);
8226 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8227 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8230 /* Disable Timer scan */
8231 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8233 * Wait for at least 10ms and up to 2 second for the timers scan to
8236 for (i = 0; i < 200; i++) {
8238 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8243 base = FUNC_ILT_BASE(func);
8244 for (i = base; i < base + ILT_PER_FUNC; i++)
8245 bnx2x_ilt_wr(bp, i, 0);
8248 static void bnx2x_reset_port(struct bnx2x *bp)
8250 int port = BP_PORT(bp);
8253 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8255 /* Do not rcv packets to BRB */
8256 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8257 /* Do not direct rcv packets that are not for MCP to the BRB */
8258 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8259 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8262 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8265 /* Check for BRB port occupancy */
8266 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8268 DP(NETIF_MSG_IFDOWN,
8269 "BRB1 is not empty %d blocks are occupied\n", val);
8271 /* TODO: Close Doorbell port? */
8274 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8276 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8277 BP_FUNC(bp), reset_code);
8279 switch (reset_code) {
8280 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8281 bnx2x_reset_port(bp);
8282 bnx2x_reset_func(bp);
8283 bnx2x_reset_common(bp);
8286 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8287 bnx2x_reset_port(bp);
8288 bnx2x_reset_func(bp);
8291 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8292 bnx2x_reset_func(bp);
8296 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8301 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8303 int port = BP_PORT(bp);
8307 /* Wait until tx fastpath tasks complete */
8308 for_each_queue(bp, i) {
8309 struct bnx2x_fastpath *fp = &bp->fp[i];
8312 while (bnx2x_has_tx_work_unload(fp)) {
8316 BNX2X_ERR("timeout waiting for queue[%d]\n",
8318 #ifdef BNX2X_STOP_ON_ERROR
8329 /* Give HW time to discard old tx messages */
8332 if (CHIP_IS_E1(bp)) {
8333 struct mac_configuration_cmd *config =
8334 bnx2x_sp(bp, mcast_config);
8336 bnx2x_set_eth_mac_addr_e1(bp, 0);
8338 for (i = 0; i < config->hdr.length; i++)
8339 CAM_INVALIDATE(config->config_table[i]);
8341 config->hdr.length = i;
8342 if (CHIP_REV_IS_SLOW(bp))
8343 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8345 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8346 config->hdr.client_id = bp->fp->cl_id;
8347 config->hdr.reserved1 = 0;
8349 bp->set_mac_pending++;
8352 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8353 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8354 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8357 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8359 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8361 for (i = 0; i < MC_HASH_SIZE; i++)
8362 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8364 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8367 /* Clear iSCSI L2 MAC */
8368 mutex_lock(&bp->cnic_mutex);
8369 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8370 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8371 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8373 mutex_unlock(&bp->cnic_mutex);
8376 if (unload_mode == UNLOAD_NORMAL)
8377 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8379 else if (bp->flags & NO_WOL_FLAG)
8380 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8383 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8384 u8 *mac_addr = bp->dev->dev_addr;
8386 /* The mac address is written to entries 1-4 to
8387 preserve entry 0 which is used by the PMF */
8388 u8 entry = (BP_E1HVN(bp) + 1)*8;
8390 val = (mac_addr[0] << 8) | mac_addr[1];
8391 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8393 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8394 (mac_addr[4] << 8) | mac_addr[5];
8395 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8397 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8400 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8402 /* Close multi and leading connections
8403 Completions for ramrods are collected in a synchronous way */
8404 for_each_nondefault_queue(bp, i)
8405 if (bnx2x_stop_multi(bp, i))
8408 rc = bnx2x_stop_leading(bp);
8410 BNX2X_ERR("Stop leading failed!\n");
8411 #ifdef BNX2X_STOP_ON_ERROR
8420 reset_code = bnx2x_fw_command(bp, reset_code);
8422 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8423 load_count[0], load_count[1], load_count[2]);
8425 load_count[1 + port]--;
8426 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8427 load_count[0], load_count[1], load_count[2]);
8428 if (load_count[0] == 0)
8429 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8430 else if (load_count[1 + port] == 0)
8431 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8433 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8436 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8437 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8438 bnx2x__link_reset(bp);
8440 /* Reset the chip */
8441 bnx2x_reset_chip(bp, reset_code);
8443 /* Report UNLOAD_DONE to MCP */
8445 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8449 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8453 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8455 if (CHIP_IS_E1(bp)) {
8456 int port = BP_PORT(bp);
8457 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8458 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8460 val = REG_RD(bp, addr);
8462 REG_WR(bp, addr, val);
8463 } else if (CHIP_IS_E1H(bp)) {
8464 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8465 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8466 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8467 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8471 /* must be called with rtnl_lock */
8472 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8476 if (bp->state == BNX2X_STATE_CLOSED) {
8477 /* Interface has been removed - nothing to recover */
8478 bp->recovery_state = BNX2X_RECOVERY_DONE;
8480 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8487 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8489 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8491 /* Set "drop all" */
8492 bp->rx_mode = BNX2X_RX_MODE_NONE;
8493 bnx2x_set_storm_rx_mode(bp);
8495 /* Disable HW interrupts, NAPI and Tx */
8496 bnx2x_netif_stop(bp, 1);
8498 del_timer_sync(&bp->timer);
8499 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8500 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8501 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8504 bnx2x_free_irq(bp, false);
8506 /* Cleanup the chip if needed */
8507 if (unload_mode != UNLOAD_RECOVERY)
8508 bnx2x_chip_cleanup(bp, unload_mode);
8512 /* Free SKBs, SGEs, TPA pool and driver internals */
8513 bnx2x_free_skbs(bp);
8514 for_each_queue(bp, i)
8515 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8516 for_each_queue(bp, i)
8517 netif_napi_del(&bnx2x_fp(bp, i, napi));
8520 bp->state = BNX2X_STATE_CLOSED;
8522 netif_carrier_off(bp->dev);
8524 /* The last driver must disable a "close the gate" if there is no
8525 * parity attention or "process kill" pending.
8527 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8528 bnx2x_reset_is_done(bp))
8529 bnx2x_disable_close_the_gate(bp);
8531 /* Reset MCP mail box sequence if there is on going recovery */
8532 if (unload_mode == UNLOAD_RECOVERY)
8538 /* Close gates #2, #3 and #4: */
8539 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8543 /* Gates #2 and #4a are closed/opened for "not E1" only */
8544 if (!CHIP_IS_E1(bp)) {
8546 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8547 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8548 close ? (val | 0x1) : (val & (~(u32)1)));
8550 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8551 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8552 close ? (val | 0x1) : (val & (~(u32)1)));
8556 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8557 val = REG_RD(bp, addr);
8558 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8560 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8561 close ? "closing" : "opening");
8565 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8567 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8569 /* Do some magic... */
8570 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8571 *magic_val = val & SHARED_MF_CLP_MAGIC;
8572 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8575 /* Restore the value of the `magic' bit.
8577 * @param pdev Device handle.
8578 * @param magic_val Old value of the `magic' bit.
8580 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8582 /* Restore the `magic' bit value... */
8583 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8584 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8585 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8586 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8587 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8588 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8591 /* Prepares for MCP reset: takes care of CLP configurations.
8594 * @param magic_val Old value of 'magic' bit.
8596 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8599 u32 validity_offset;
8601 DP(NETIF_MSG_HW, "Starting\n");
8603 /* Set `magic' bit in order to save MF config */
8604 if (!CHIP_IS_E1(bp))
8605 bnx2x_clp_reset_prep(bp, magic_val);
8607 /* Get shmem offset */
8608 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8609 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8611 /* Clear validity map flags */
8613 REG_WR(bp, shmem + validity_offset, 0);
8616 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8617 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
8619 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8620 * depending on the HW type.
8624 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8626 /* special handling for emulation and FPGA,
8627 wait 10 times longer */
8628 if (CHIP_REV_IS_SLOW(bp))
8629 msleep(MCP_ONE_TIMEOUT*10);
8631 msleep(MCP_ONE_TIMEOUT);
8634 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8636 u32 shmem, cnt, validity_offset, val;
8641 /* Get shmem offset */
8642 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8644 BNX2X_ERR("Shmem 0 return failure\n");
8649 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8651 /* Wait for MCP to come up */
8652 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8653 /* TBD: its best to check validity map of last port.
8654 * currently checks on port 0.
8656 val = REG_RD(bp, shmem + validity_offset);
8657 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8658 shmem + validity_offset, val);
8660 /* check that shared memory is valid. */
8661 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8662 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8665 bnx2x_mcp_wait_one(bp);
8668 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8670 /* Check that shared memory is valid. This indicates that MCP is up. */
8671 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8672 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8673 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8679 /* Restore the `magic' bit value */
8680 if (!CHIP_IS_E1(bp))
8681 bnx2x_clp_reset_done(bp, magic_val);
8686 static void bnx2x_pxp_prep(struct bnx2x *bp)
8688 if (!CHIP_IS_E1(bp)) {
8689 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8690 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8691 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8697 * Reset the whole chip except for:
8699 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8702 * - MISC (including AEU)
8706 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8708 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8711 MISC_REGISTERS_RESET_REG_1_RST_HC |
8712 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8713 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8716 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8717 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8718 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8719 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8720 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8721 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8722 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8723 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8725 reset_mask1 = 0xffffffff;
8728 reset_mask2 = 0xffff;
8730 reset_mask2 = 0x1ffff;
8732 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8733 reset_mask1 & (~not_reset_mask1));
8734 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8735 reset_mask2 & (~not_reset_mask2));
8740 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8741 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8745 static int bnx2x_process_kill(struct bnx2x *bp)
8749 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8752 /* Empty the Tetris buffer, wait for 1s */
8754 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8755 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8756 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8757 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8758 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8759 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8760 ((port_is_idle_0 & 0x1) == 0x1) &&
8761 ((port_is_idle_1 & 0x1) == 0x1) &&
8762 (pgl_exp_rom2 == 0xffffffff))
8765 } while (cnt-- > 0);
8768 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8770 " outstanding read requests after 1s!\n");
8771 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8772 " port_is_idle_0=0x%08x,"
8773 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8774 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8781 /* Close gates #2, #3 and #4 */
8782 bnx2x_set_234_gates(bp, true);
8784 /* TBD: Indicate that "process kill" is in progress to MCP */
8786 /* Clear "unprepared" bit */
8787 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8790 /* Make sure all is written to the chip before the reset */
8793 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8794 * PSWHST, GRC and PSWRD Tetris buffer.
8798 /* Prepare to chip reset: */
8800 bnx2x_reset_mcp_prep(bp, &val);
8806 /* reset the chip */
8807 bnx2x_process_kill_chip_reset(bp);
8810 /* Recover after reset: */
8812 if (bnx2x_reset_mcp_comp(bp, val))
8818 /* Open the gates #2, #3 and #4 */
8819 bnx2x_set_234_gates(bp, false);
8821 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8822 * reset state, re-enable attentions. */
8827 static int bnx2x_leader_reset(struct bnx2x *bp)
8830 /* Try to recover after the failure */
8831 if (bnx2x_process_kill(bp)) {
8832 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8835 goto exit_leader_reset;
8838 /* Clear "reset is in progress" bit and update the driver state */
8839 bnx2x_set_reset_done(bp);
8840 bp->recovery_state = BNX2X_RECOVERY_DONE;
8844 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8849 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8851 /* Assumption: runs under rtnl lock. This together with the fact
8852 * that it's called only from bnx2x_reset_task() ensure that it
8853 * will never be called when netif_running(bp->dev) is false.
8855 static void bnx2x_parity_recover(struct bnx2x *bp)
8857 DP(NETIF_MSG_HW, "Handling parity\n");
8859 switch (bp->recovery_state) {
8860 case BNX2X_RECOVERY_INIT:
8861 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8862 /* Try to get a LEADER_LOCK HW lock */
8863 if (bnx2x_trylock_hw_lock(bp,
8864 HW_LOCK_RESOURCE_RESERVED_08))
8867 /* Stop the driver */
8868 /* If interface has been removed - break */
8869 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8872 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8873 /* Ensure "is_leader" and "recovery_state"
8874 * update values are seen on other CPUs
8879 case BNX2X_RECOVERY_WAIT:
8880 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8881 if (bp->is_leader) {
8882 u32 load_counter = bnx2x_get_load_cnt(bp);
8884 /* Wait until all other functions get
8887 schedule_delayed_work(&bp->reset_task,
8891 /* If all other functions got down -
8892 * try to bring the chip back to
8893 * normal. In any case it's an exit
8894 * point for a leader.
8896 if (bnx2x_leader_reset(bp) ||
8897 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8898 printk(KERN_ERR"%s: Recovery "
8899 "has failed. Power cycle is "
8900 "needed.\n", bp->dev->name);
8901 /* Disconnect this device */
8902 netif_device_detach(bp->dev);
8903 /* Block ifup for all function
8904 * of this ASIC until
8905 * "process kill" or power
8908 bnx2x_set_reset_in_progress(bp);
8909 /* Shut down the power */
8910 bnx2x_set_power_state(bp,
8917 } else { /* non-leader */
8918 if (!bnx2x_reset_is_done(bp)) {
8919 /* Try to get a LEADER_LOCK HW lock as
8920 * long as a former leader may have
8921 * been unloaded by the user or
8922 * released a leadership by another
8925 if (bnx2x_trylock_hw_lock(bp,
8926 HW_LOCK_RESOURCE_RESERVED_08)) {
8927 /* I'm a leader now! Restart a
8934 schedule_delayed_work(&bp->reset_task,
8938 } else { /* A leader has completed
8939 * the "process kill". It's an exit
8940 * point for a non-leader.
8942 bnx2x_nic_load(bp, LOAD_NORMAL);
8943 bp->recovery_state =
8944 BNX2X_RECOVERY_DONE;
8955 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8956 * scheduled on a general queue in order to prevent a dead lock.
8958 static void bnx2x_reset_task(struct work_struct *work)
8960 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8962 #ifdef BNX2X_STOP_ON_ERROR
8963 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8964 " so reset not done to allow debug dump,\n"
8965 KERN_ERR " you will need to reboot when done\n");
8971 if (!netif_running(bp->dev))
8972 goto reset_task_exit;
8974 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8975 bnx2x_parity_recover(bp);
8977 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8978 bnx2x_nic_load(bp, LOAD_NORMAL);
8985 /* end of nic load/unload */
8990 * Init service functions
8993 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8996 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8997 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8998 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8999 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9000 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9001 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9002 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9003 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9005 BNX2X_ERR("Unsupported function index: %d\n", func);
9010 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9012 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9014 /* Flush all outstanding writes */
9017 /* Pretend to be function 0 */
9019 /* Flush the GRC transaction (in the chip) */
9020 new_val = REG_RD(bp, reg);
9022 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9027 /* From now we are in the "like-E1" mode */
9028 bnx2x_int_disable(bp);
9030 /* Flush all outstanding writes */
9033 /* Restore the original funtion settings */
9034 REG_WR(bp, reg, orig_func);
9035 new_val = REG_RD(bp, reg);
9036 if (new_val != orig_func) {
9037 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9038 orig_func, new_val);
9043 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9045 if (CHIP_IS_E1H(bp))
9046 bnx2x_undi_int_disable_e1h(bp, func);
9048 bnx2x_int_disable(bp);
9051 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9055 /* Check if there is any driver already loaded */
9056 val = REG_RD(bp, MISC_REG_UNPREPARED);
9058 /* Check if it is the UNDI driver
9059 * UNDI driver initializes CID offset for normal bell to 0x7
9061 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9062 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9064 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9066 int func = BP_FUNC(bp);
9070 /* clear the UNDI indication */
9071 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9073 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9075 /* try unload UNDI on port 0 */
9078 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9079 DRV_MSG_SEQ_NUMBER_MASK);
9080 reset_code = bnx2x_fw_command(bp, reset_code);
9082 /* if UNDI is loaded on the other port */
9083 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9085 /* send "DONE" for previous unload */
9086 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9088 /* unload UNDI on port 1 */
9091 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9092 DRV_MSG_SEQ_NUMBER_MASK);
9093 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9095 bnx2x_fw_command(bp, reset_code);
9098 /* now it's safe to release the lock */
9099 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9101 bnx2x_undi_int_disable(bp, func);
9103 /* close input traffic and wait for it */
9104 /* Do not rcv packets to BRB */
9106 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9107 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9108 /* Do not direct rcv packets that are not for MCP to
9111 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9112 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9115 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9116 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9119 /* save NIG port swap info */
9120 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9121 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9124 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9127 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9129 /* take the NIG out of reset and restore swap values */
9131 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9132 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9133 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9134 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9136 /* send unload done to the MCP */
9137 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9139 /* restore our func and fw_seq */
9142 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9143 DRV_MSG_SEQ_NUMBER_MASK);
9146 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9150 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9152 u32 val, val2, val3, val4, id;
9155 /* Get the chip revision id and number. */
9156 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9157 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9158 id = ((val & 0xffff) << 16);
9159 val = REG_RD(bp, MISC_REG_CHIP_REV);
9160 id |= ((val & 0xf) << 12);
9161 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9162 id |= ((val & 0xff) << 4);
9163 val = REG_RD(bp, MISC_REG_BOND_ID);
9165 bp->common.chip_id = id;
9166 bp->link_params.chip_id = bp->common.chip_id;
9167 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9169 val = (REG_RD(bp, 0x2874) & 0x55);
9170 if ((bp->common.chip_id & 0x1) ||
9171 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9172 bp->flags |= ONE_PORT_FLAG;
9173 BNX2X_DEV_INFO("single port device\n");
9176 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9177 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9178 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9179 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9180 bp->common.flash_size, bp->common.flash_size);
9182 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9183 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9184 bp->link_params.shmem_base = bp->common.shmem_base;
9185 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9186 bp->common.shmem_base, bp->common.shmem2_base);
9188 if (!bp->common.shmem_base ||
9189 (bp->common.shmem_base < 0xA0000) ||
9190 (bp->common.shmem_base >= 0xC0000)) {
9191 BNX2X_DEV_INFO("MCP not active\n");
9192 bp->flags |= NO_MCP_FLAG;
9196 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9197 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9198 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9199 BNX2X_ERROR("BAD MCP validity signature\n");
9201 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9202 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9204 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9205 SHARED_HW_CFG_LED_MODE_MASK) >>
9206 SHARED_HW_CFG_LED_MODE_SHIFT);
9208 bp->link_params.feature_config_flags = 0;
9209 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9210 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9211 bp->link_params.feature_config_flags |=
9212 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9214 bp->link_params.feature_config_flags &=
9215 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9217 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9218 bp->common.bc_ver = val;
9219 BNX2X_DEV_INFO("bc_ver %X\n", val);
9220 if (val < BNX2X_BC_VER) {
9221 /* for now only warn
9222 * later we might need to enforce this */
9223 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9224 "please upgrade BC\n", BNX2X_BC_VER, val);
9226 bp->link_params.feature_config_flags |=
9227 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9228 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9230 if (BP_E1HVN(bp) == 0) {
9231 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9232 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9234 /* no WOL capability for E1HVN != 0 */
9235 bp->flags |= NO_WOL_FLAG;
9237 BNX2X_DEV_INFO("%sWoL capable\n",
9238 (bp->flags & NO_WOL_FLAG) ? "not " : "");
9240 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9241 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9242 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9243 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9245 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9246 val, val2, val3, val4);
9249 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9252 int port = BP_PORT(bp);
9255 switch (switch_cfg) {
9257 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9260 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9261 switch (ext_phy_type) {
9262 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9263 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9266 bp->port.supported |= (SUPPORTED_10baseT_Half |
9267 SUPPORTED_10baseT_Full |
9268 SUPPORTED_100baseT_Half |
9269 SUPPORTED_100baseT_Full |
9270 SUPPORTED_1000baseT_Full |
9271 SUPPORTED_2500baseX_Full |
9276 SUPPORTED_Asym_Pause);
9279 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9280 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9283 bp->port.supported |= (SUPPORTED_10baseT_Half |
9284 SUPPORTED_10baseT_Full |
9285 SUPPORTED_100baseT_Half |
9286 SUPPORTED_100baseT_Full |
9287 SUPPORTED_1000baseT_Full |
9292 SUPPORTED_Asym_Pause);
9296 BNX2X_ERR("NVRAM config error. "
9297 "BAD SerDes ext_phy_config 0x%x\n",
9298 bp->link_params.ext_phy_config);
9302 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9304 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9307 case SWITCH_CFG_10G:
9308 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9311 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9312 switch (ext_phy_type) {
9313 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9314 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9317 bp->port.supported |= (SUPPORTED_10baseT_Half |
9318 SUPPORTED_10baseT_Full |
9319 SUPPORTED_100baseT_Half |
9320 SUPPORTED_100baseT_Full |
9321 SUPPORTED_1000baseT_Full |
9322 SUPPORTED_2500baseX_Full |
9323 SUPPORTED_10000baseT_Full |
9328 SUPPORTED_Asym_Pause);
9331 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9332 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9335 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9336 SUPPORTED_1000baseT_Full |
9340 SUPPORTED_Asym_Pause);
9343 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9344 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9347 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9348 SUPPORTED_2500baseX_Full |
9349 SUPPORTED_1000baseT_Full |
9353 SUPPORTED_Asym_Pause);
9356 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9357 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9360 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9363 SUPPORTED_Asym_Pause);
9366 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9367 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9370 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9371 SUPPORTED_1000baseT_Full |
9374 SUPPORTED_Asym_Pause);
9377 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9378 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9381 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9382 SUPPORTED_1000baseT_Full |
9386 SUPPORTED_Asym_Pause);
9389 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9390 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9393 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9394 SUPPORTED_1000baseT_Full |
9398 SUPPORTED_Asym_Pause);
9401 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9402 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9405 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9409 SUPPORTED_Asym_Pause);
9412 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9413 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9416 bp->port.supported |= (SUPPORTED_10baseT_Half |
9417 SUPPORTED_10baseT_Full |
9418 SUPPORTED_100baseT_Half |
9419 SUPPORTED_100baseT_Full |
9420 SUPPORTED_1000baseT_Full |
9421 SUPPORTED_10000baseT_Full |
9425 SUPPORTED_Asym_Pause);
9428 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9429 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9430 bp->link_params.ext_phy_config);
9434 BNX2X_ERR("NVRAM config error. "
9435 "BAD XGXS ext_phy_config 0x%x\n",
9436 bp->link_params.ext_phy_config);
9440 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9442 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9447 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9448 bp->port.link_config);
9451 bp->link_params.phy_addr = bp->port.phy_addr;
9453 /* mask what we support according to speed_cap_mask */
9454 if (!(bp->link_params.speed_cap_mask &
9455 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9456 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9458 if (!(bp->link_params.speed_cap_mask &
9459 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9460 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9462 if (!(bp->link_params.speed_cap_mask &
9463 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9464 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9466 if (!(bp->link_params.speed_cap_mask &
9467 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9468 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9470 if (!(bp->link_params.speed_cap_mask &
9471 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9472 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9473 SUPPORTED_1000baseT_Full);
9475 if (!(bp->link_params.speed_cap_mask &
9476 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9477 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9479 if (!(bp->link_params.speed_cap_mask &
9480 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9481 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9483 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9486 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9488 bp->link_params.req_duplex = DUPLEX_FULL;
9490 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9491 case PORT_FEATURE_LINK_SPEED_AUTO:
9492 if (bp->port.supported & SUPPORTED_Autoneg) {
9493 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9494 bp->port.advertising = bp->port.supported;
9497 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9499 if ((ext_phy_type ==
9500 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9502 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9503 /* force 10G, no AN */
9504 bp->link_params.req_line_speed = SPEED_10000;
9505 bp->port.advertising =
9506 (ADVERTISED_10000baseT_Full |
9510 BNX2X_ERR("NVRAM config error. "
9511 "Invalid link_config 0x%x"
9512 " Autoneg not supported\n",
9513 bp->port.link_config);
9518 case PORT_FEATURE_LINK_SPEED_10M_FULL:
9519 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9520 bp->link_params.req_line_speed = SPEED_10;
9521 bp->port.advertising = (ADVERTISED_10baseT_Full |
9524 BNX2X_ERROR("NVRAM config error. "
9525 "Invalid link_config 0x%x"
9526 " speed_cap_mask 0x%x\n",
9527 bp->port.link_config,
9528 bp->link_params.speed_cap_mask);
9533 case PORT_FEATURE_LINK_SPEED_10M_HALF:
9534 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9535 bp->link_params.req_line_speed = SPEED_10;
9536 bp->link_params.req_duplex = DUPLEX_HALF;
9537 bp->port.advertising = (ADVERTISED_10baseT_Half |
9540 BNX2X_ERROR("NVRAM config error. "
9541 "Invalid link_config 0x%x"
9542 " speed_cap_mask 0x%x\n",
9543 bp->port.link_config,
9544 bp->link_params.speed_cap_mask);
9549 case PORT_FEATURE_LINK_SPEED_100M_FULL:
9550 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9551 bp->link_params.req_line_speed = SPEED_100;
9552 bp->port.advertising = (ADVERTISED_100baseT_Full |
9555 BNX2X_ERROR("NVRAM config error. "
9556 "Invalid link_config 0x%x"
9557 " speed_cap_mask 0x%x\n",
9558 bp->port.link_config,
9559 bp->link_params.speed_cap_mask);
9564 case PORT_FEATURE_LINK_SPEED_100M_HALF:
9565 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9566 bp->link_params.req_line_speed = SPEED_100;
9567 bp->link_params.req_duplex = DUPLEX_HALF;
9568 bp->port.advertising = (ADVERTISED_100baseT_Half |
9571 BNX2X_ERROR("NVRAM config error. "
9572 "Invalid link_config 0x%x"
9573 " speed_cap_mask 0x%x\n",
9574 bp->port.link_config,
9575 bp->link_params.speed_cap_mask);
9580 case PORT_FEATURE_LINK_SPEED_1G:
9581 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9582 bp->link_params.req_line_speed = SPEED_1000;
9583 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9586 BNX2X_ERROR("NVRAM config error. "
9587 "Invalid link_config 0x%x"
9588 " speed_cap_mask 0x%x\n",
9589 bp->port.link_config,
9590 bp->link_params.speed_cap_mask);
9595 case PORT_FEATURE_LINK_SPEED_2_5G:
9596 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9597 bp->link_params.req_line_speed = SPEED_2500;
9598 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9601 BNX2X_ERROR("NVRAM config error. "
9602 "Invalid link_config 0x%x"
9603 " speed_cap_mask 0x%x\n",
9604 bp->port.link_config,
9605 bp->link_params.speed_cap_mask);
9610 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9611 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9612 case PORT_FEATURE_LINK_SPEED_10G_KR:
9613 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9614 bp->link_params.req_line_speed = SPEED_10000;
9615 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9618 BNX2X_ERROR("NVRAM config error. "
9619 "Invalid link_config 0x%x"
9620 " speed_cap_mask 0x%x\n",
9621 bp->port.link_config,
9622 bp->link_params.speed_cap_mask);
9628 BNX2X_ERROR("NVRAM config error. "
9629 "BAD link speed link_config 0x%x\n",
9630 bp->port.link_config);
9631 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9632 bp->port.advertising = bp->port.supported;
9636 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9637 PORT_FEATURE_FLOW_CONTROL_MASK);
9638 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9639 !(bp->port.supported & SUPPORTED_Autoneg))
9640 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9642 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
9643 " advertising 0x%x\n",
9644 bp->link_params.req_line_speed,
9645 bp->link_params.req_duplex,
9646 bp->link_params.req_flow_ctrl, bp->port.advertising);
9649 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9651 mac_hi = cpu_to_be16(mac_hi);
9652 mac_lo = cpu_to_be32(mac_lo);
9653 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9654 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9657 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9659 int port = BP_PORT(bp);
9665 bp->link_params.bp = bp;
9666 bp->link_params.port = port;
9668 bp->link_params.lane_config =
9669 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9670 bp->link_params.ext_phy_config =
9672 dev_info.port_hw_config[port].external_phy_config);
9673 /* BCM8727_NOC => BCM8727 no over current */
9674 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9675 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9676 bp->link_params.ext_phy_config &=
9677 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9678 bp->link_params.ext_phy_config |=
9679 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9680 bp->link_params.feature_config_flags |=
9681 FEATURE_CONFIG_BCM8727_NOC;
9684 bp->link_params.speed_cap_mask =
9686 dev_info.port_hw_config[port].speed_capability_mask);
9688 bp->port.link_config =
9689 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9691 /* Get the 4 lanes xgxs config rx and tx */
9692 for (i = 0; i < 2; i++) {
9694 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9695 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9696 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9699 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9700 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9701 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9704 /* If the device is capable of WoL, set the default state according
9707 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9708 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9709 (config & PORT_FEATURE_WOL_ENABLED));
9711 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9712 " speed_cap_mask 0x%08x link_config 0x%08x\n",
9713 bp->link_params.lane_config,
9714 bp->link_params.ext_phy_config,
9715 bp->link_params.speed_cap_mask, bp->port.link_config);
9717 bp->link_params.switch_cfg |= (bp->port.link_config &
9718 PORT_FEATURE_CONNECTED_SWITCH_MASK);
9719 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9721 bnx2x_link_settings_requested(bp);
9724 * If connected directly, work with the internal PHY, otherwise, work
9725 * with the external PHY
9727 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9728 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9729 bp->mdio.prtad = bp->link_params.phy_addr;
9731 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9732 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9734 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9736 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9737 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9738 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9739 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9740 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9743 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9744 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9745 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9749 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9751 int func = BP_FUNC(bp);
9755 bnx2x_get_common_hwinfo(bp);
9759 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9761 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9763 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9764 FUNC_MF_CFG_E1HOV_TAG_MASK);
9765 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9767 BNX2X_DEV_INFO("%s function mode\n",
9768 IS_E1HMF(bp) ? "multi" : "single");
9771 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9773 FUNC_MF_CFG_E1HOV_TAG_MASK);
9774 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9776 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9778 func, bp->e1hov, bp->e1hov);
9780 BNX2X_ERROR("No valid E1HOV for func %d,"
9781 " aborting\n", func);
9786 BNX2X_ERROR("VN %d in single function mode,"
9787 " aborting\n", BP_E1HVN(bp));
9793 if (!BP_NOMCP(bp)) {
9794 bnx2x_get_port_hwinfo(bp);
9796 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9797 DRV_MSG_SEQ_NUMBER_MASK);
9798 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9802 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9803 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9804 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9805 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9806 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9807 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9808 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9809 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9810 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9811 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9812 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9814 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9822 /* only supposed to happen on emulation/FPGA */
9823 BNX2X_ERROR("warning: random MAC workaround active\n");
9824 random_ether_addr(bp->dev->dev_addr);
9825 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9831 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9833 int cnt, i, block_end, rodi;
9834 char vpd_data[BNX2X_VPD_LEN+1];
9835 char str_id_reg[VENDOR_ID_LEN+1];
9836 char str_id_cap[VENDOR_ID_LEN+1];
9839 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9840 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9842 if (cnt < BNX2X_VPD_LEN)
9845 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9846 PCI_VPD_LRDT_RO_DATA);
9851 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9852 pci_vpd_lrdt_size(&vpd_data[i]);
9854 i += PCI_VPD_LRDT_TAG_SIZE;
9856 if (block_end > BNX2X_VPD_LEN)
9859 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9860 PCI_VPD_RO_KEYWORD_MFR_ID);
9864 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9866 if (len != VENDOR_ID_LEN)
9869 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9871 /* vendor specific info */
9872 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9873 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9874 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9875 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9877 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9878 PCI_VPD_RO_KEYWORD_VENDOR0);
9880 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9882 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9884 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9885 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9886 bp->fw_ver[len] = ' ';
9895 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9897 int func = BP_FUNC(bp);
9901 /* Disable interrupt handling until HW is initialized */
9902 atomic_set(&bp->intr_sem, 1);
9903 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9905 mutex_init(&bp->port.phy_mutex);
9906 mutex_init(&bp->fw_mb_mutex);
9908 mutex_init(&bp->cnic_mutex);
9911 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9912 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9914 rc = bnx2x_get_hwinfo(bp);
9916 bnx2x_read_fwinfo(bp);
9917 /* need to reset chip if undi was active */
9919 bnx2x_undi_unload(bp);
9921 if (CHIP_REV_IS_FPGA(bp))
9922 dev_err(&bp->pdev->dev, "FPGA detected\n");
9924 if (BP_NOMCP(bp) && (func == 0))
9925 dev_err(&bp->pdev->dev, "MCP disabled, "
9926 "must load devices in order!\n");
9928 /* Set multi queue mode */
9929 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9930 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9931 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9932 "requested is not MSI-X\n");
9933 multi_mode = ETH_RSS_MODE_DISABLED;
9935 bp->multi_mode = multi_mode;
9938 bp->dev->features |= NETIF_F_GRO;
9942 bp->flags &= ~TPA_ENABLE_FLAG;
9943 bp->dev->features &= ~NETIF_F_LRO;
9945 bp->flags |= TPA_ENABLE_FLAG;
9946 bp->dev->features |= NETIF_F_LRO;
9950 bp->dropless_fc = 0;
9952 bp->dropless_fc = dropless_fc;
9956 bp->tx_ring_size = MAX_TX_AVAIL;
9957 bp->rx_ring_size = MAX_RX_AVAIL;
9961 /* make sure that the numbers are in the right granularity */
9962 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9963 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9965 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9966 bp->current_interval = (poll ? poll : timer_interval);
9968 init_timer(&bp->timer);
9969 bp->timer.expires = jiffies + bp->current_interval;
9970 bp->timer.data = (unsigned long) bp;
9971 bp->timer.function = bnx2x_timer;
9977 * ethtool service functions
9980 /* All ethtool functions called with rtnl_lock */
9982 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9984 struct bnx2x *bp = netdev_priv(dev);
9986 cmd->supported = bp->port.supported;
9987 cmd->advertising = bp->port.advertising;
9989 if ((bp->state == BNX2X_STATE_OPEN) &&
9990 !(bp->flags & MF_FUNC_DIS) &&
9991 (bp->link_vars.link_up)) {
9992 cmd->speed = bp->link_vars.line_speed;
9993 cmd->duplex = bp->link_vars.duplex;
9998 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9999 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10000 if (vn_max_rate < cmd->speed)
10001 cmd->speed = vn_max_rate;
10008 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10010 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10012 switch (ext_phy_type) {
10013 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10020 cmd->port = PORT_FIBRE;
10023 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10024 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10025 cmd->port = PORT_TP;
10028 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10029 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10030 bp->link_params.ext_phy_config);
10034 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10035 bp->link_params.ext_phy_config);
10039 cmd->port = PORT_TP;
10041 cmd->phy_address = bp->mdio.prtad;
10042 cmd->transceiver = XCVR_INTERNAL;
10044 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10045 cmd->autoneg = AUTONEG_ENABLE;
10047 cmd->autoneg = AUTONEG_DISABLE;
10052 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10053 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10054 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10055 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10056 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10057 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10058 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10063 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10065 struct bnx2x *bp = netdev_priv(dev);
10071 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10072 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10073 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10074 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10075 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10076 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10077 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10079 if (cmd->autoneg == AUTONEG_ENABLE) {
10080 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10081 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10085 /* advertise the requested speed and duplex if supported */
10086 cmd->advertising &= bp->port.supported;
10088 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10089 bp->link_params.req_duplex = DUPLEX_FULL;
10090 bp->port.advertising |= (ADVERTISED_Autoneg |
10093 } else { /* forced speed */
10094 /* advertise the requested speed and duplex if supported */
10095 switch (cmd->speed) {
10097 if (cmd->duplex == DUPLEX_FULL) {
10098 if (!(bp->port.supported &
10099 SUPPORTED_10baseT_Full)) {
10101 "10M full not supported\n");
10105 advertising = (ADVERTISED_10baseT_Full |
10108 if (!(bp->port.supported &
10109 SUPPORTED_10baseT_Half)) {
10111 "10M half not supported\n");
10115 advertising = (ADVERTISED_10baseT_Half |
10121 if (cmd->duplex == DUPLEX_FULL) {
10122 if (!(bp->port.supported &
10123 SUPPORTED_100baseT_Full)) {
10125 "100M full not supported\n");
10129 advertising = (ADVERTISED_100baseT_Full |
10132 if (!(bp->port.supported &
10133 SUPPORTED_100baseT_Half)) {
10135 "100M half not supported\n");
10139 advertising = (ADVERTISED_100baseT_Half |
10145 if (cmd->duplex != DUPLEX_FULL) {
10146 DP(NETIF_MSG_LINK, "1G half not supported\n");
10150 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10151 DP(NETIF_MSG_LINK, "1G full not supported\n");
10155 advertising = (ADVERTISED_1000baseT_Full |
10160 if (cmd->duplex != DUPLEX_FULL) {
10162 "2.5G half not supported\n");
10166 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10168 "2.5G full not supported\n");
10172 advertising = (ADVERTISED_2500baseX_Full |
10177 if (cmd->duplex != DUPLEX_FULL) {
10178 DP(NETIF_MSG_LINK, "10G half not supported\n");
10182 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10183 DP(NETIF_MSG_LINK, "10G full not supported\n");
10187 advertising = (ADVERTISED_10000baseT_Full |
10192 DP(NETIF_MSG_LINK, "Unsupported speed\n");
10196 bp->link_params.req_line_speed = cmd->speed;
10197 bp->link_params.req_duplex = cmd->duplex;
10198 bp->port.advertising = advertising;
10201 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10202 DP_LEVEL " req_duplex %d advertising 0x%x\n",
10203 bp->link_params.req_line_speed, bp->link_params.req_duplex,
10204 bp->port.advertising);
10206 if (netif_running(dev)) {
10207 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10208 bnx2x_link_set(bp);
10214 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10215 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10217 static int bnx2x_get_regs_len(struct net_device *dev)
10219 struct bnx2x *bp = netdev_priv(dev);
10220 int regdump_len = 0;
10223 if (CHIP_IS_E1(bp)) {
10224 for (i = 0; i < REGS_COUNT; i++)
10225 if (IS_E1_ONLINE(reg_addrs[i].info))
10226 regdump_len += reg_addrs[i].size;
10228 for (i = 0; i < WREGS_COUNT_E1; i++)
10229 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10230 regdump_len += wreg_addrs_e1[i].size *
10231 (1 + wreg_addrs_e1[i].read_regs_count);
10234 for (i = 0; i < REGS_COUNT; i++)
10235 if (IS_E1H_ONLINE(reg_addrs[i].info))
10236 regdump_len += reg_addrs[i].size;
10238 for (i = 0; i < WREGS_COUNT_E1H; i++)
10239 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10240 regdump_len += wreg_addrs_e1h[i].size *
10241 (1 + wreg_addrs_e1h[i].read_regs_count);
10244 regdump_len += sizeof(struct dump_hdr);
10246 return regdump_len;
10249 static void bnx2x_get_regs(struct net_device *dev,
10250 struct ethtool_regs *regs, void *_p)
10253 struct bnx2x *bp = netdev_priv(dev);
10254 struct dump_hdr dump_hdr = {0};
10257 memset(p, 0, regs->len);
10259 if (!netif_running(bp->dev))
10262 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10263 dump_hdr.dump_sign = dump_sign_all;
10264 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10265 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10266 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10267 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10268 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10270 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10271 p += dump_hdr.hdr_size + 1;
10273 if (CHIP_IS_E1(bp)) {
10274 for (i = 0; i < REGS_COUNT; i++)
10275 if (IS_E1_ONLINE(reg_addrs[i].info))
10276 for (j = 0; j < reg_addrs[i].size; j++)
10278 reg_addrs[i].addr + j*4);
10281 for (i = 0; i < REGS_COUNT; i++)
10282 if (IS_E1H_ONLINE(reg_addrs[i].info))
10283 for (j = 0; j < reg_addrs[i].size; j++)
10285 reg_addrs[i].addr + j*4);
10289 #define PHY_FW_VER_LEN 10
10291 static void bnx2x_get_drvinfo(struct net_device *dev,
10292 struct ethtool_drvinfo *info)
10294 struct bnx2x *bp = netdev_priv(dev);
10295 u8 phy_fw_ver[PHY_FW_VER_LEN];
10297 strcpy(info->driver, DRV_MODULE_NAME);
10298 strcpy(info->version, DRV_MODULE_VERSION);
10300 phy_fw_ver[0] = '\0';
10301 if (bp->port.pmf) {
10302 bnx2x_acquire_phy_lock(bp);
10303 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10304 (bp->state != BNX2X_STATE_CLOSED),
10305 phy_fw_ver, PHY_FW_VER_LEN);
10306 bnx2x_release_phy_lock(bp);
10309 strncpy(info->fw_version, bp->fw_ver, 32);
10310 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10312 (bp->common.bc_ver & 0xff0000) >> 16,
10313 (bp->common.bc_ver & 0xff00) >> 8,
10314 (bp->common.bc_ver & 0xff),
10315 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10316 strcpy(info->bus_info, pci_name(bp->pdev));
10317 info->n_stats = BNX2X_NUM_STATS;
10318 info->testinfo_len = BNX2X_NUM_TESTS;
10319 info->eedump_len = bp->common.flash_size;
10320 info->regdump_len = bnx2x_get_regs_len(dev);
10323 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10325 struct bnx2x *bp = netdev_priv(dev);
10327 if (bp->flags & NO_WOL_FLAG) {
10328 wol->supported = 0;
10331 wol->supported = WAKE_MAGIC;
10333 wol->wolopts = WAKE_MAGIC;
10337 memset(&wol->sopass, 0, sizeof(wol->sopass));
10340 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10342 struct bnx2x *bp = netdev_priv(dev);
10344 if (wol->wolopts & ~WAKE_MAGIC)
10347 if (wol->wolopts & WAKE_MAGIC) {
10348 if (bp->flags & NO_WOL_FLAG)
10358 static u32 bnx2x_get_msglevel(struct net_device *dev)
10360 struct bnx2x *bp = netdev_priv(dev);
10362 return bp->msg_enable;
10365 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10367 struct bnx2x *bp = netdev_priv(dev);
10369 if (capable(CAP_NET_ADMIN))
10370 bp->msg_enable = level;
10373 static int bnx2x_nway_reset(struct net_device *dev)
10375 struct bnx2x *bp = netdev_priv(dev);
10380 if (netif_running(dev)) {
10381 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10382 bnx2x_link_set(bp);
10388 static u32 bnx2x_get_link(struct net_device *dev)
10390 struct bnx2x *bp = netdev_priv(dev);
10392 if (bp->flags & MF_FUNC_DIS)
10395 return bp->link_vars.link_up;
10398 static int bnx2x_get_eeprom_len(struct net_device *dev)
10400 struct bnx2x *bp = netdev_priv(dev);
10402 return bp->common.flash_size;
10405 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10407 int port = BP_PORT(bp);
10411 /* adjust timeout for emulation/FPGA */
10412 count = NVRAM_TIMEOUT_COUNT;
10413 if (CHIP_REV_IS_SLOW(bp))
10416 /* request access to nvram interface */
10417 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10418 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10420 for (i = 0; i < count*10; i++) {
10421 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10422 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10428 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10429 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10436 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10438 int port = BP_PORT(bp);
10442 /* adjust timeout for emulation/FPGA */
10443 count = NVRAM_TIMEOUT_COUNT;
10444 if (CHIP_REV_IS_SLOW(bp))
10447 /* relinquish nvram interface */
10448 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10449 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10451 for (i = 0; i < count*10; i++) {
10452 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10453 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10459 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10460 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10467 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10471 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10473 /* enable both bits, even on read */
10474 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10475 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10476 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10479 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10483 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10485 /* disable both bits, even after read */
10486 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10487 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10488 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10491 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10497 /* build the command word */
10498 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10500 /* need to clear DONE bit separately */
10501 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10503 /* address of the NVRAM to read from */
10504 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10505 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10507 /* issue a read command */
10508 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10510 /* adjust timeout for emulation/FPGA */
10511 count = NVRAM_TIMEOUT_COUNT;
10512 if (CHIP_REV_IS_SLOW(bp))
10515 /* wait for completion */
10518 for (i = 0; i < count; i++) {
10520 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10522 if (val & MCPR_NVM_COMMAND_DONE) {
10523 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10524 /* we read nvram data in cpu order
10525 * but ethtool sees it as an array of bytes
10526 * converting to big-endian will do the work */
10527 *ret_val = cpu_to_be32(val);
10536 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10543 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10545 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10550 if (offset + buf_size > bp->common.flash_size) {
10551 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10552 " buf_size (0x%x) > flash_size (0x%x)\n",
10553 offset, buf_size, bp->common.flash_size);
10557 /* request access to nvram interface */
10558 rc = bnx2x_acquire_nvram_lock(bp);
10562 /* enable access to nvram interface */
10563 bnx2x_enable_nvram_access(bp);
10565 /* read the first word(s) */
10566 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10567 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10568 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10569 memcpy(ret_buf, &val, 4);
10571 /* advance to the next dword */
10572 offset += sizeof(u32);
10573 ret_buf += sizeof(u32);
10574 buf_size -= sizeof(u32);
10579 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10580 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10581 memcpy(ret_buf, &val, 4);
10584 /* disable access to nvram interface */
10585 bnx2x_disable_nvram_access(bp);
10586 bnx2x_release_nvram_lock(bp);
10591 static int bnx2x_get_eeprom(struct net_device *dev,
10592 struct ethtool_eeprom *eeprom, u8 *eebuf)
10594 struct bnx2x *bp = netdev_priv(dev);
10597 if (!netif_running(dev))
10600 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10601 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10602 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10603 eeprom->len, eeprom->len);
10605 /* parameters already validated in ethtool_get_eeprom */
10607 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10612 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10617 /* build the command word */
10618 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10620 /* need to clear DONE bit separately */
10621 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10623 /* write the data */
10624 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10626 /* address of the NVRAM to write to */
10627 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10628 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10630 /* issue the write command */
10631 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10633 /* adjust timeout for emulation/FPGA */
10634 count = NVRAM_TIMEOUT_COUNT;
10635 if (CHIP_REV_IS_SLOW(bp))
10638 /* wait for completion */
10640 for (i = 0; i < count; i++) {
10642 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10643 if (val & MCPR_NVM_COMMAND_DONE) {
10652 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
10654 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10662 if (offset + buf_size > bp->common.flash_size) {
10663 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10664 " buf_size (0x%x) > flash_size (0x%x)\n",
10665 offset, buf_size, bp->common.flash_size);
10669 /* request access to nvram interface */
10670 rc = bnx2x_acquire_nvram_lock(bp);
10674 /* enable access to nvram interface */
10675 bnx2x_enable_nvram_access(bp);
10677 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10678 align_offset = (offset & ~0x03);
10679 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10682 val &= ~(0xff << BYTE_OFFSET(offset));
10683 val |= (*data_buf << BYTE_OFFSET(offset));
10685 /* nvram data is returned as an array of bytes
10686 * convert it back to cpu order */
10687 val = be32_to_cpu(val);
10689 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10693 /* disable access to nvram interface */
10694 bnx2x_disable_nvram_access(bp);
10695 bnx2x_release_nvram_lock(bp);
10700 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10706 u32 written_so_far;
10708 if (buf_size == 1) /* ethtool */
10709 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10711 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10713 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10718 if (offset + buf_size > bp->common.flash_size) {
10719 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10720 " buf_size (0x%x) > flash_size (0x%x)\n",
10721 offset, buf_size, bp->common.flash_size);
10725 /* request access to nvram interface */
10726 rc = bnx2x_acquire_nvram_lock(bp);
10730 /* enable access to nvram interface */
10731 bnx2x_enable_nvram_access(bp);
10733 written_so_far = 0;
10734 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10735 while ((written_so_far < buf_size) && (rc == 0)) {
10736 if (written_so_far == (buf_size - sizeof(u32)))
10737 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10738 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10739 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10740 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10741 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10743 memcpy(&val, data_buf, 4);
10745 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10747 /* advance to the next dword */
10748 offset += sizeof(u32);
10749 data_buf += sizeof(u32);
10750 written_so_far += sizeof(u32);
10754 /* disable access to nvram interface */
10755 bnx2x_disable_nvram_access(bp);
10756 bnx2x_release_nvram_lock(bp);
10761 static int bnx2x_set_eeprom(struct net_device *dev,
10762 struct ethtool_eeprom *eeprom, u8 *eebuf)
10764 struct bnx2x *bp = netdev_priv(dev);
10765 int port = BP_PORT(bp);
10768 if (!netif_running(dev))
10771 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10772 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10773 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10774 eeprom->len, eeprom->len);
10776 /* parameters already validated in ethtool_set_eeprom */
10778 /* PHY eeprom can be accessed only by the PMF */
10779 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10783 if (eeprom->magic == 0x50485950) {
10784 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10785 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10787 bnx2x_acquire_phy_lock(bp);
10788 rc |= bnx2x_link_reset(&bp->link_params,
10789 &bp->link_vars, 0);
10790 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10791 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10792 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10793 MISC_REGISTERS_GPIO_HIGH, port);
10794 bnx2x_release_phy_lock(bp);
10795 bnx2x_link_report(bp);
10797 } else if (eeprom->magic == 0x50485952) {
10798 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10799 if (bp->state == BNX2X_STATE_OPEN) {
10800 bnx2x_acquire_phy_lock(bp);
10801 rc |= bnx2x_link_reset(&bp->link_params,
10802 &bp->link_vars, 1);
10804 rc |= bnx2x_phy_init(&bp->link_params,
10806 bnx2x_release_phy_lock(bp);
10807 bnx2x_calc_fc_adv(bp);
10809 } else if (eeprom->magic == 0x53985943) {
10810 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10811 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10812 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10814 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10816 /* DSP Remove Download Mode */
10817 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10818 MISC_REGISTERS_GPIO_LOW, port);
10820 bnx2x_acquire_phy_lock(bp);
10822 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10824 /* wait 0.5 sec to allow it to run */
10826 bnx2x_ext_phy_hw_reset(bp, port);
10828 bnx2x_release_phy_lock(bp);
10831 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10836 static int bnx2x_get_coalesce(struct net_device *dev,
10837 struct ethtool_coalesce *coal)
10839 struct bnx2x *bp = netdev_priv(dev);
10841 memset(coal, 0, sizeof(struct ethtool_coalesce));
10843 coal->rx_coalesce_usecs = bp->rx_ticks;
10844 coal->tx_coalesce_usecs = bp->tx_ticks;
10849 static int bnx2x_set_coalesce(struct net_device *dev,
10850 struct ethtool_coalesce *coal)
10852 struct bnx2x *bp = netdev_priv(dev);
10854 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10855 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10856 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10858 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10859 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10860 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10862 if (netif_running(dev))
10863 bnx2x_update_coalesce(bp);
10868 static void bnx2x_get_ringparam(struct net_device *dev,
10869 struct ethtool_ringparam *ering)
10871 struct bnx2x *bp = netdev_priv(dev);
10873 ering->rx_max_pending = MAX_RX_AVAIL;
10874 ering->rx_mini_max_pending = 0;
10875 ering->rx_jumbo_max_pending = 0;
10877 ering->rx_pending = bp->rx_ring_size;
10878 ering->rx_mini_pending = 0;
10879 ering->rx_jumbo_pending = 0;
10881 ering->tx_max_pending = MAX_TX_AVAIL;
10882 ering->tx_pending = bp->tx_ring_size;
10885 static int bnx2x_set_ringparam(struct net_device *dev,
10886 struct ethtool_ringparam *ering)
10888 struct bnx2x *bp = netdev_priv(dev);
10891 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10892 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10896 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10897 (ering->tx_pending > MAX_TX_AVAIL) ||
10898 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10901 bp->rx_ring_size = ering->rx_pending;
10902 bp->tx_ring_size = ering->tx_pending;
10904 if (netif_running(dev)) {
10905 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10906 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10912 static void bnx2x_get_pauseparam(struct net_device *dev,
10913 struct ethtool_pauseparam *epause)
10915 struct bnx2x *bp = netdev_priv(dev);
10917 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10918 BNX2X_FLOW_CTRL_AUTO) &&
10919 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10921 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10922 BNX2X_FLOW_CTRL_RX);
10923 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10924 BNX2X_FLOW_CTRL_TX);
10926 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10927 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10928 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10931 static int bnx2x_set_pauseparam(struct net_device *dev,
10932 struct ethtool_pauseparam *epause)
10934 struct bnx2x *bp = netdev_priv(dev);
10939 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10940 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10941 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10943 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10945 if (epause->rx_pause)
10946 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10948 if (epause->tx_pause)
10949 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10951 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10952 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10954 if (epause->autoneg) {
10955 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10956 DP(NETIF_MSG_LINK, "autoneg not supported\n");
10960 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10961 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10965 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10967 if (netif_running(dev)) {
10968 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10969 bnx2x_link_set(bp);
10975 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10977 struct bnx2x *bp = netdev_priv(dev);
10981 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10982 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10986 /* TPA requires Rx CSUM offloading */
10987 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10988 if (!disable_tpa) {
10989 if (!(dev->features & NETIF_F_LRO)) {
10990 dev->features |= NETIF_F_LRO;
10991 bp->flags |= TPA_ENABLE_FLAG;
10996 } else if (dev->features & NETIF_F_LRO) {
10997 dev->features &= ~NETIF_F_LRO;
10998 bp->flags &= ~TPA_ENABLE_FLAG;
11002 if (changed && netif_running(dev)) {
11003 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11004 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11010 static u32 bnx2x_get_rx_csum(struct net_device *dev)
11012 struct bnx2x *bp = netdev_priv(dev);
11014 return bp->rx_csum;
11017 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11019 struct bnx2x *bp = netdev_priv(dev);
11022 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11023 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11027 bp->rx_csum = data;
11029 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11030 TPA'ed packets will be discarded due to wrong TCP CSUM */
11032 u32 flags = ethtool_op_get_flags(dev);
11034 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11040 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11043 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11044 dev->features |= NETIF_F_TSO6;
11046 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11047 dev->features &= ~NETIF_F_TSO6;
11053 static const struct {
11054 char string[ETH_GSTRING_LEN];
11055 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11056 { "register_test (offline)" },
11057 { "memory_test (offline)" },
11058 { "loopback_test (offline)" },
11059 { "nvram_test (online)" },
11060 { "interrupt_test (online)" },
11061 { "link_test (online)" },
11062 { "idle check (online)" }
11065 static int bnx2x_test_registers(struct bnx2x *bp)
11067 int idx, i, rc = -ENODEV;
11069 int port = BP_PORT(bp);
11070 static const struct {
11075 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11076 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11077 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11078 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11079 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11080 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11081 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11082 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11083 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11084 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11085 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11086 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11087 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11088 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11089 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11090 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11091 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11092 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
11093 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
11094 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11095 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
11096 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11097 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11098 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11099 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11100 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11101 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11102 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11103 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
11104 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11105 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
11106 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11107 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11108 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11109 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11110 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11111 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11113 { 0xffffffff, 0, 0x00000000 }
11116 if (!netif_running(bp->dev))
11119 /* Repeat the test twice:
11120 First by writing 0x00000000, second by writing 0xffffffff */
11121 for (idx = 0; idx < 2; idx++) {
11128 wr_val = 0xffffffff;
11132 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11133 u32 offset, mask, save_val, val;
11135 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11136 mask = reg_tbl[i].mask;
11138 save_val = REG_RD(bp, offset);
11140 REG_WR(bp, offset, (wr_val & mask));
11141 val = REG_RD(bp, offset);
11143 /* Restore the original register's value */
11144 REG_WR(bp, offset, save_val);
11146 /* verify value is as expected */
11147 if ((val & mask) != (wr_val & mask)) {
11148 DP(NETIF_MSG_PROBE,
11149 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11150 offset, val, wr_val, mask);
11151 goto test_reg_exit;
11162 static int bnx2x_test_memory(struct bnx2x *bp)
11164 int i, j, rc = -ENODEV;
11166 static const struct {
11170 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11171 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11172 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11173 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11174 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11175 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11176 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11180 static const struct {
11186 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11187 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11188 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11189 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11190 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11191 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11193 { NULL, 0xffffffff, 0, 0 }
11196 if (!netif_running(bp->dev))
11199 /* Go through all the memories */
11200 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11201 for (j = 0; j < mem_tbl[i].size; j++)
11202 REG_RD(bp, mem_tbl[i].offset + j*4);
11204 /* Check the parity status */
11205 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11206 val = REG_RD(bp, prty_tbl[i].offset);
11207 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11208 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11210 "%s is 0x%x\n", prty_tbl[i].name, val);
11211 goto test_mem_exit;
11221 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11226 while (bnx2x_link_test(bp) && cnt--)
11230 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11232 unsigned int pkt_size, num_pkts, i;
11233 struct sk_buff *skb;
11234 unsigned char *packet;
11235 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11236 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11237 u16 tx_start_idx, tx_idx;
11238 u16 rx_start_idx, rx_idx;
11239 u16 pkt_prod, bd_prod;
11240 struct sw_tx_bd *tx_buf;
11241 struct eth_tx_start_bd *tx_start_bd;
11242 struct eth_tx_parse_bd *pbd = NULL;
11243 dma_addr_t mapping;
11244 union eth_rx_cqe *cqe;
11246 struct sw_rx_bd *rx_buf;
11250 /* check the loopback mode */
11251 switch (loopback_mode) {
11252 case BNX2X_PHY_LOOPBACK:
11253 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11256 case BNX2X_MAC_LOOPBACK:
11257 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11258 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11264 /* prepare the loopback packet */
11265 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11266 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11267 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11270 goto test_loopback_exit;
11272 packet = skb_put(skb, pkt_size);
11273 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11274 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11275 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11276 for (i = ETH_HLEN; i < pkt_size; i++)
11277 packet[i] = (unsigned char) (i & 0xff);
11279 /* send the loopback packet */
11281 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11282 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11284 pkt_prod = fp_tx->tx_pkt_prod++;
11285 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11286 tx_buf->first_bd = fp_tx->tx_bd_prod;
11290 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11291 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11292 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11293 skb_headlen(skb), DMA_TO_DEVICE);
11294 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11295 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11296 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11297 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11298 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11299 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11300 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11301 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11303 /* turn on parsing and get a BD */
11304 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11305 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11307 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11311 fp_tx->tx_db.data.prod += 2;
11313 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11318 fp_tx->tx_bd_prod += 2; /* start + pbd */
11322 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11323 if (tx_idx != tx_start_idx + num_pkts)
11324 goto test_loopback_exit;
11326 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11327 if (rx_idx != rx_start_idx + num_pkts)
11328 goto test_loopback_exit;
11330 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11331 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11332 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11333 goto test_loopback_rx_exit;
11335 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11336 if (len != pkt_size)
11337 goto test_loopback_rx_exit;
11339 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11341 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11342 for (i = ETH_HLEN; i < pkt_size; i++)
11343 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11344 goto test_loopback_rx_exit;
11348 test_loopback_rx_exit:
11350 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11351 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11352 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11353 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11355 /* Update producers */
11356 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11357 fp_rx->rx_sge_prod);
11359 test_loopback_exit:
11360 bp->link_params.loopback_mode = LOOPBACK_NONE;
11365 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11372 if (!netif_running(bp->dev))
11373 return BNX2X_LOOPBACK_FAILED;
11375 bnx2x_netif_stop(bp, 1);
11376 bnx2x_acquire_phy_lock(bp);
11378 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11380 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11381 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11384 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11386 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11387 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11390 bnx2x_release_phy_lock(bp);
11391 bnx2x_netif_start(bp);
11396 #define CRC32_RESIDUAL 0xdebb20e3
11398 static int bnx2x_test_nvram(struct bnx2x *bp)
11400 static const struct {
11404 { 0, 0x14 }, /* bootstrap */
11405 { 0x14, 0xec }, /* dir */
11406 { 0x100, 0x350 }, /* manuf_info */
11407 { 0x450, 0xf0 }, /* feature_info */
11408 { 0x640, 0x64 }, /* upgrade_key_info */
11410 { 0x708, 0x70 }, /* manuf_key_info */
11414 __be32 buf[0x350 / 4];
11415 u8 *data = (u8 *)buf;
11422 rc = bnx2x_nvram_read(bp, 0, data, 4);
11424 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11425 goto test_nvram_exit;
11428 magic = be32_to_cpu(buf[0]);
11429 if (magic != 0x669955aa) {
11430 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11432 goto test_nvram_exit;
11435 for (i = 0; nvram_tbl[i].size; i++) {
11437 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11438 nvram_tbl[i].size);
11440 DP(NETIF_MSG_PROBE,
11441 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11442 goto test_nvram_exit;
11445 crc = ether_crc_le(nvram_tbl[i].size, data);
11446 if (crc != CRC32_RESIDUAL) {
11447 DP(NETIF_MSG_PROBE,
11448 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11450 goto test_nvram_exit;
11458 static int bnx2x_test_intr(struct bnx2x *bp)
11460 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11463 if (!netif_running(bp->dev))
11466 config->hdr.length = 0;
11467 if (CHIP_IS_E1(bp))
11468 /* use last unicast entries */
11469 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11471 config->hdr.offset = BP_FUNC(bp);
11472 config->hdr.client_id = bp->fp->cl_id;
11473 config->hdr.reserved1 = 0;
11475 bp->set_mac_pending++;
11477 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11478 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11479 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11481 for (i = 0; i < 10; i++) {
11482 if (!bp->set_mac_pending)
11485 msleep_interruptible(10);
11494 static void bnx2x_self_test(struct net_device *dev,
11495 struct ethtool_test *etest, u64 *buf)
11497 struct bnx2x *bp = netdev_priv(dev);
11499 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11500 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11501 etest->flags |= ETH_TEST_FL_FAILED;
11505 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11507 if (!netif_running(dev))
11510 /* offline tests are not supported in MF mode */
11512 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11514 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11515 int port = BP_PORT(bp);
11519 /* save current value of input enable for TX port IF */
11520 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11521 /* disable input for TX port IF */
11522 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11524 link_up = (bnx2x_link_test(bp) == 0);
11525 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11526 bnx2x_nic_load(bp, LOAD_DIAG);
11527 /* wait until link state is restored */
11528 bnx2x_wait_for_link(bp, link_up);
11530 if (bnx2x_test_registers(bp) != 0) {
11532 etest->flags |= ETH_TEST_FL_FAILED;
11534 if (bnx2x_test_memory(bp) != 0) {
11536 etest->flags |= ETH_TEST_FL_FAILED;
11538 buf[2] = bnx2x_test_loopback(bp, link_up);
11540 etest->flags |= ETH_TEST_FL_FAILED;
11542 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11544 /* restore input for TX port IF */
11545 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11547 bnx2x_nic_load(bp, LOAD_NORMAL);
11548 /* wait until link state is restored */
11549 bnx2x_wait_for_link(bp, link_up);
11551 if (bnx2x_test_nvram(bp) != 0) {
11553 etest->flags |= ETH_TEST_FL_FAILED;
11555 if (bnx2x_test_intr(bp) != 0) {
11557 etest->flags |= ETH_TEST_FL_FAILED;
11560 if (bnx2x_link_test(bp) != 0) {
11562 etest->flags |= ETH_TEST_FL_FAILED;
11565 #ifdef BNX2X_EXTRA_DEBUG
11566 bnx2x_panic_dump(bp);
11570 static const struct {
11573 u8 string[ETH_GSTRING_LEN];
11574 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11575 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11576 { Q_STATS_OFFSET32(error_bytes_received_hi),
11577 8, "[%d]: rx_error_bytes" },
11578 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11579 8, "[%d]: rx_ucast_packets" },
11580 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11581 8, "[%d]: rx_mcast_packets" },
11582 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11583 8, "[%d]: rx_bcast_packets" },
11584 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11585 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11586 4, "[%d]: rx_phy_ip_err_discards"},
11587 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11588 4, "[%d]: rx_skb_alloc_discard" },
11589 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11591 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11592 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11593 8, "[%d]: tx_ucast_packets" },
11594 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11595 8, "[%d]: tx_mcast_packets" },
11596 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11597 8, "[%d]: tx_bcast_packets" }
11600 static const struct {
11604 #define STATS_FLAGS_PORT 1
11605 #define STATS_FLAGS_FUNC 2
11606 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11607 u8 string[ETH_GSTRING_LEN];
11608 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11609 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11610 8, STATS_FLAGS_BOTH, "rx_bytes" },
11611 { STATS_OFFSET32(error_bytes_received_hi),
11612 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11613 { STATS_OFFSET32(total_unicast_packets_received_hi),
11614 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11615 { STATS_OFFSET32(total_multicast_packets_received_hi),
11616 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11617 { STATS_OFFSET32(total_broadcast_packets_received_hi),
11618 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11619 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11620 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11621 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11622 8, STATS_FLAGS_PORT, "rx_align_errors" },
11623 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11624 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11625 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11626 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11627 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11628 8, STATS_FLAGS_PORT, "rx_fragments" },
11629 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11630 8, STATS_FLAGS_PORT, "rx_jabbers" },
11631 { STATS_OFFSET32(no_buff_discard_hi),
11632 8, STATS_FLAGS_BOTH, "rx_discards" },
11633 { STATS_OFFSET32(mac_filter_discard),
11634 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11635 { STATS_OFFSET32(xxoverflow_discard),
11636 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11637 { STATS_OFFSET32(brb_drop_hi),
11638 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11639 { STATS_OFFSET32(brb_truncate_hi),
11640 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11641 { STATS_OFFSET32(pause_frames_received_hi),
11642 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11643 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11644 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11645 { STATS_OFFSET32(nig_timer_max),
11646 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11647 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11648 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11649 { STATS_OFFSET32(rx_skb_alloc_failed),
11650 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11651 { STATS_OFFSET32(hw_csum_err),
11652 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11654 { STATS_OFFSET32(total_bytes_transmitted_hi),
11655 8, STATS_FLAGS_BOTH, "tx_bytes" },
11656 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11657 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11658 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11659 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11660 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11661 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11662 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11663 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11664 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11665 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11666 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11667 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11668 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11669 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11670 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11671 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11672 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11673 8, STATS_FLAGS_PORT, "tx_deferred" },
11674 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11675 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11676 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11677 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11678 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11679 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11680 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11681 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11682 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11683 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11684 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11685 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11686 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11687 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11688 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11689 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11690 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11691 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11692 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11693 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11694 { STATS_OFFSET32(pause_frames_sent_hi),
11695 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11698 #define IS_PORT_STAT(i) \
11699 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11700 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11701 #define IS_E1HMF_MODE_STAT(bp) \
11702 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11704 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11706 struct bnx2x *bp = netdev_priv(dev);
11709 switch (stringset) {
11711 if (is_multi(bp)) {
11712 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11713 if (!IS_E1HMF_MODE_STAT(bp))
11714 num_stats += BNX2X_NUM_STATS;
11716 if (IS_E1HMF_MODE_STAT(bp)) {
11718 for (i = 0; i < BNX2X_NUM_STATS; i++)
11719 if (IS_FUNC_STAT(i))
11722 num_stats = BNX2X_NUM_STATS;
11727 return BNX2X_NUM_TESTS;
11734 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11736 struct bnx2x *bp = netdev_priv(dev);
11739 switch (stringset) {
11741 if (is_multi(bp)) {
11743 for_each_queue(bp, i) {
11744 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11745 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11746 bnx2x_q_stats_arr[j].string, i);
11747 k += BNX2X_NUM_Q_STATS;
11749 if (IS_E1HMF_MODE_STAT(bp))
11751 for (j = 0; j < BNX2X_NUM_STATS; j++)
11752 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11753 bnx2x_stats_arr[j].string);
11755 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11756 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11758 strcpy(buf + j*ETH_GSTRING_LEN,
11759 bnx2x_stats_arr[i].string);
11766 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11771 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11772 struct ethtool_stats *stats, u64 *buf)
11774 struct bnx2x *bp = netdev_priv(dev);
11775 u32 *hw_stats, *offset;
11778 if (is_multi(bp)) {
11780 for_each_queue(bp, i) {
11781 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11782 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11783 if (bnx2x_q_stats_arr[j].size == 0) {
11784 /* skip this counter */
11788 offset = (hw_stats +
11789 bnx2x_q_stats_arr[j].offset);
11790 if (bnx2x_q_stats_arr[j].size == 4) {
11791 /* 4-byte counter */
11792 buf[k + j] = (u64) *offset;
11795 /* 8-byte counter */
11796 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11798 k += BNX2X_NUM_Q_STATS;
11800 if (IS_E1HMF_MODE_STAT(bp))
11802 hw_stats = (u32 *)&bp->eth_stats;
11803 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11804 if (bnx2x_stats_arr[j].size == 0) {
11805 /* skip this counter */
11809 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11810 if (bnx2x_stats_arr[j].size == 4) {
11811 /* 4-byte counter */
11812 buf[k + j] = (u64) *offset;
11815 /* 8-byte counter */
11816 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11819 hw_stats = (u32 *)&bp->eth_stats;
11820 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11821 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11823 if (bnx2x_stats_arr[i].size == 0) {
11824 /* skip this counter */
11829 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11830 if (bnx2x_stats_arr[i].size == 4) {
11831 /* 4-byte counter */
11832 buf[j] = (u64) *offset;
11836 /* 8-byte counter */
11837 buf[j] = HILO_U64(*offset, *(offset + 1));
11843 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11845 struct bnx2x *bp = netdev_priv(dev);
11848 if (!netif_running(dev))
11857 for (i = 0; i < (data * 2); i++) {
11859 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11862 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11864 msleep_interruptible(500);
11865 if (signal_pending(current))
11869 if (bp->link_vars.link_up)
11870 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11871 bp->link_vars.line_speed);
11876 static const struct ethtool_ops bnx2x_ethtool_ops = {
11877 .get_settings = bnx2x_get_settings,
11878 .set_settings = bnx2x_set_settings,
11879 .get_drvinfo = bnx2x_get_drvinfo,
11880 .get_regs_len = bnx2x_get_regs_len,
11881 .get_regs = bnx2x_get_regs,
11882 .get_wol = bnx2x_get_wol,
11883 .set_wol = bnx2x_set_wol,
11884 .get_msglevel = bnx2x_get_msglevel,
11885 .set_msglevel = bnx2x_set_msglevel,
11886 .nway_reset = bnx2x_nway_reset,
11887 .get_link = bnx2x_get_link,
11888 .get_eeprom_len = bnx2x_get_eeprom_len,
11889 .get_eeprom = bnx2x_get_eeprom,
11890 .set_eeprom = bnx2x_set_eeprom,
11891 .get_coalesce = bnx2x_get_coalesce,
11892 .set_coalesce = bnx2x_set_coalesce,
11893 .get_ringparam = bnx2x_get_ringparam,
11894 .set_ringparam = bnx2x_set_ringparam,
11895 .get_pauseparam = bnx2x_get_pauseparam,
11896 .set_pauseparam = bnx2x_set_pauseparam,
11897 .get_rx_csum = bnx2x_get_rx_csum,
11898 .set_rx_csum = bnx2x_set_rx_csum,
11899 .get_tx_csum = ethtool_op_get_tx_csum,
11900 .set_tx_csum = ethtool_op_set_tx_hw_csum,
11901 .set_flags = bnx2x_set_flags,
11902 .get_flags = ethtool_op_get_flags,
11903 .get_sg = ethtool_op_get_sg,
11904 .set_sg = ethtool_op_set_sg,
11905 .get_tso = ethtool_op_get_tso,
11906 .set_tso = bnx2x_set_tso,
11907 .self_test = bnx2x_self_test,
11908 .get_sset_count = bnx2x_get_sset_count,
11909 .get_strings = bnx2x_get_strings,
11910 .phys_id = bnx2x_phys_id,
11911 .get_ethtool_stats = bnx2x_get_ethtool_stats,
11914 /* end of ethtool_ops */
11916 /****************************************************************************
11917 * General service functions
11918 ****************************************************************************/
11920 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11924 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11928 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11929 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11930 PCI_PM_CTRL_PME_STATUS));
11932 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11933 /* delay required during transition out of D3hot */
11938 /* If there are other clients above don't
11939 shut down the power */
11940 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11942 /* Don't shut down the power for emulation and FPGA */
11943 if (CHIP_REV_IS_SLOW(bp))
11946 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11950 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11952 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11955 /* No more memory access after this point until
11956 * device is brought back to D0.
11966 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11970 /* Tell compiler that status block fields can change */
11972 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11973 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11975 return (fp->rx_comp_cons != rx_cons_sb);
11979 * net_device service functions
11982 static int bnx2x_poll(struct napi_struct *napi, int budget)
11985 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11987 struct bnx2x *bp = fp->bp;
11990 #ifdef BNX2X_STOP_ON_ERROR
11991 if (unlikely(bp->panic)) {
11992 napi_complete(napi);
11997 if (bnx2x_has_tx_work(fp))
12000 if (bnx2x_has_rx_work(fp)) {
12001 work_done += bnx2x_rx_int(fp, budget - work_done);
12003 /* must not complete if we consumed full budget */
12004 if (work_done >= budget)
12008 /* Fall out from the NAPI loop if needed */
12009 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12010 bnx2x_update_fpsb_idx(fp);
12011 /* bnx2x_has_rx_work() reads the status block, thus we need
12012 * to ensure that status block indices have been actually read
12013 * (bnx2x_update_fpsb_idx) prior to this check
12014 * (bnx2x_has_rx_work) so that we won't write the "newer"
12015 * value of the status block to IGU (if there was a DMA right
12016 * after bnx2x_has_rx_work and if there is no rmb, the memory
12017 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12018 * before bnx2x_ack_sb). In this case there will never be
12019 * another interrupt until there is another update of the
12020 * status block, while there is still unhandled work.
12024 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12025 napi_complete(napi);
12026 /* Re-enable interrupts */
12027 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12028 le16_to_cpu(fp->fp_c_idx),
12030 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12031 le16_to_cpu(fp->fp_u_idx),
12032 IGU_INT_ENABLE, 1);
12042 /* we split the first BD into headers and data BDs
12043 * to ease the pain of our fellow microcode engineers
12044 * we use one mapping for both BDs
12045 * So far this has only been observed to happen
12046 * in Other Operating Systems(TM)
12048 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12049 struct bnx2x_fastpath *fp,
12050 struct sw_tx_bd *tx_buf,
12051 struct eth_tx_start_bd **tx_bd, u16 hlen,
12052 u16 bd_prod, int nbd)
12054 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12055 struct eth_tx_bd *d_tx_bd;
12056 dma_addr_t mapping;
12057 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12059 /* first fix first BD */
12060 h_tx_bd->nbd = cpu_to_le16(nbd);
12061 h_tx_bd->nbytes = cpu_to_le16(hlen);
12063 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12064 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12065 h_tx_bd->addr_lo, h_tx_bd->nbd);
12067 /* now get a new data BD
12068 * (after the pbd) and fill it */
12069 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12070 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12072 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12073 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12075 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12076 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12077 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12079 /* this marks the BD as one that has no individual mapping */
12080 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12082 DP(NETIF_MSG_TX_QUEUED,
12083 "TSO split data size is %d (%x:%x)\n",
12084 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12087 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12092 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12095 csum = (u16) ~csum_fold(csum_sub(csum,
12096 csum_partial(t_header - fix, fix, 0)));
12099 csum = (u16) ~csum_fold(csum_add(csum,
12100 csum_partial(t_header, -fix, 0)));
12102 return swab16(csum);
12105 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12109 if (skb->ip_summed != CHECKSUM_PARTIAL)
12113 if (skb->protocol == htons(ETH_P_IPV6)) {
12115 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12116 rc |= XMIT_CSUM_TCP;
12120 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12121 rc |= XMIT_CSUM_TCP;
12125 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12126 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12128 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12129 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12134 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12135 /* check if packet requires linearization (packet is too fragmented)
12136 no need to check fragmentation if page size > 8K (there will be no
12137 violation to FW restrictions) */
12138 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12143 int first_bd_sz = 0;
12145 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12146 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12148 if (xmit_type & XMIT_GSO) {
12149 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12150 /* Check if LSO packet needs to be copied:
12151 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12152 int wnd_size = MAX_FETCH_BD - 3;
12153 /* Number of windows to check */
12154 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12159 /* Headers length */
12160 hlen = (int)(skb_transport_header(skb) - skb->data) +
12163 /* Amount of data (w/o headers) on linear part of SKB*/
12164 first_bd_sz = skb_headlen(skb) - hlen;
12166 wnd_sum = first_bd_sz;
12168 /* Calculate the first sum - it's special */
12169 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12171 skb_shinfo(skb)->frags[frag_idx].size;
12173 /* If there was data on linear skb data - check it */
12174 if (first_bd_sz > 0) {
12175 if (unlikely(wnd_sum < lso_mss)) {
12180 wnd_sum -= first_bd_sz;
12183 /* Others are easier: run through the frag list and
12184 check all windows */
12185 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12187 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12189 if (unlikely(wnd_sum < lso_mss)) {
12194 skb_shinfo(skb)->frags[wnd_idx].size;
12197 /* in non-LSO too fragmented packet should always
12204 if (unlikely(to_copy))
12205 DP(NETIF_MSG_TX_QUEUED,
12206 "Linearization IS REQUIRED for %s packet. "
12207 "num_frags %d hlen %d first_bd_sz %d\n",
12208 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12209 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12215 /* called with netif_tx_lock
12216 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12217 * netif_wake_queue()
12219 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12221 struct bnx2x *bp = netdev_priv(dev);
12222 struct bnx2x_fastpath *fp;
12223 struct netdev_queue *txq;
12224 struct sw_tx_bd *tx_buf;
12225 struct eth_tx_start_bd *tx_start_bd;
12226 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12227 struct eth_tx_parse_bd *pbd = NULL;
12228 u16 pkt_prod, bd_prod;
12230 dma_addr_t mapping;
12231 u32 xmit_type = bnx2x_xmit_type(bp, skb);
12234 __le16 pkt_size = 0;
12235 struct ethhdr *eth;
12236 u8 mac_type = UNICAST_ADDRESS;
12238 #ifdef BNX2X_STOP_ON_ERROR
12239 if (unlikely(bp->panic))
12240 return NETDEV_TX_BUSY;
12243 fp_index = skb_get_queue_mapping(skb);
12244 txq = netdev_get_tx_queue(dev, fp_index);
12246 fp = &bp->fp[fp_index];
12248 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12249 fp->eth_q_stats.driver_xoff++;
12250 netif_tx_stop_queue(txq);
12251 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12252 return NETDEV_TX_BUSY;
12255 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12256 " gso type %x xmit_type %x\n",
12257 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12258 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12260 eth = (struct ethhdr *)skb->data;
12262 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12263 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12264 if (is_broadcast_ether_addr(eth->h_dest))
12265 mac_type = BROADCAST_ADDRESS;
12267 mac_type = MULTICAST_ADDRESS;
12270 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12271 /* First, check if we need to linearize the skb (due to FW
12272 restrictions). No need to check fragmentation if page size > 8K
12273 (there will be no violation to FW restrictions) */
12274 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12275 /* Statistics of linearization */
12277 if (skb_linearize(skb) != 0) {
12278 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12279 "silently dropping this SKB\n");
12280 dev_kfree_skb_any(skb);
12281 return NETDEV_TX_OK;
12287 Please read carefully. First we use one BD which we mark as start,
12288 then we have a parsing info BD (used for TSO or xsum),
12289 and only then we have the rest of the TSO BDs.
12290 (don't forget to mark the last one as last,
12291 and to unmap only AFTER you write to the BD ...)
12292 And above all, all pdb sizes are in words - NOT DWORDS!
12295 pkt_prod = fp->tx_pkt_prod++;
12296 bd_prod = TX_BD(fp->tx_bd_prod);
12298 /* get a tx_buf and first BD */
12299 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12300 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12302 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12303 tx_start_bd->general_data = (mac_type <<
12304 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12306 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12308 /* remember the first BD of the packet */
12309 tx_buf->first_bd = fp->tx_bd_prod;
12313 DP(NETIF_MSG_TX_QUEUED,
12314 "sending pkt %u @%p next_idx %u bd %u @%p\n",
12315 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12318 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12319 (bp->flags & HW_VLAN_TX_FLAG)) {
12320 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12321 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12324 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12326 /* turn on parsing and get a BD */
12327 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12328 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12330 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12332 if (xmit_type & XMIT_CSUM) {
12333 hlen = (skb_network_header(skb) - skb->data) / 2;
12335 /* for now NS flag is not used in Linux */
12337 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12338 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12340 pbd->ip_hlen = (skb_transport_header(skb) -
12341 skb_network_header(skb)) / 2;
12343 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12345 pbd->total_hlen = cpu_to_le16(hlen);
12348 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12350 if (xmit_type & XMIT_CSUM_V4)
12351 tx_start_bd->bd_flags.as_bitfield |=
12352 ETH_TX_BD_FLAGS_IP_CSUM;
12354 tx_start_bd->bd_flags.as_bitfield |=
12355 ETH_TX_BD_FLAGS_IPV6;
12357 if (xmit_type & XMIT_CSUM_TCP) {
12358 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12361 s8 fix = SKB_CS_OFF(skb); /* signed! */
12363 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12365 DP(NETIF_MSG_TX_QUEUED,
12366 "hlen %d fix %d csum before fix %x\n",
12367 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12369 /* HW bug: fixup the CSUM */
12370 pbd->tcp_pseudo_csum =
12371 bnx2x_csum_fix(skb_transport_header(skb),
12374 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12375 pbd->tcp_pseudo_csum);
12379 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12380 skb_headlen(skb), DMA_TO_DEVICE);
12382 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12383 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12384 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12385 tx_start_bd->nbd = cpu_to_le16(nbd);
12386 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12387 pkt_size = tx_start_bd->nbytes;
12389 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
12390 " nbytes %d flags %x vlan %x\n",
12391 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12392 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12393 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12395 if (xmit_type & XMIT_GSO) {
12397 DP(NETIF_MSG_TX_QUEUED,
12398 "TSO packet len %d hlen %d total len %d tso size %d\n",
12399 skb->len, hlen, skb_headlen(skb),
12400 skb_shinfo(skb)->gso_size);
12402 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12404 if (unlikely(skb_headlen(skb) > hlen))
12405 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12406 hlen, bd_prod, ++nbd);
12408 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12409 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12410 pbd->tcp_flags = pbd_tcp_flags(skb);
12412 if (xmit_type & XMIT_GSO_V4) {
12413 pbd->ip_id = swab16(ip_hdr(skb)->id);
12414 pbd->tcp_pseudo_csum =
12415 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12416 ip_hdr(skb)->daddr,
12417 0, IPPROTO_TCP, 0));
12420 pbd->tcp_pseudo_csum =
12421 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12422 &ipv6_hdr(skb)->daddr,
12423 0, IPPROTO_TCP, 0));
12425 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12427 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12429 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12430 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12432 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12433 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12434 if (total_pkt_bd == NULL)
12435 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12437 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12439 frag->size, DMA_TO_DEVICE);
12441 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12442 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12443 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12444 le16_add_cpu(&pkt_size, frag->size);
12446 DP(NETIF_MSG_TX_QUEUED,
12447 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12448 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12449 le16_to_cpu(tx_data_bd->nbytes));
12452 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12454 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12456 /* now send a tx doorbell, counting the next BD
12457 * if the packet contains or ends with it
12459 if (TX_BD_POFF(bd_prod) < nbd)
12462 if (total_pkt_bd != NULL)
12463 total_pkt_bd->total_pkt_bytes = pkt_size;
12466 DP(NETIF_MSG_TX_QUEUED,
12467 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12468 " tcp_flags %x xsum %x seq %u hlen %u\n",
12469 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12470 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12471 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12473 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
12476 * Make sure that the BD data is updated before updating the producer
12477 * since FW might read the BD right after the producer is updated.
12478 * This is only applicable for weak-ordered memory model archs such
12479 * as IA-64. The following barrier is also mandatory since FW will
12480 * assumes packets must have BDs.
12484 fp->tx_db.data.prod += nbd;
12486 DOORBELL(bp, fp->index, fp->tx_db.raw);
12490 fp->tx_bd_prod += nbd;
12492 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12493 netif_tx_stop_queue(txq);
12495 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12496 * ordering of set_bit() in netif_tx_stop_queue() and read of
12497 * fp->bd_tx_cons */
12500 fp->eth_q_stats.driver_xoff++;
12501 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12502 netif_tx_wake_queue(txq);
12506 return NETDEV_TX_OK;
12509 /* called with rtnl_lock */
12510 static int bnx2x_open(struct net_device *dev)
12512 struct bnx2x *bp = netdev_priv(dev);
12514 netif_carrier_off(dev);
12516 bnx2x_set_power_state(bp, PCI_D0);
12518 if (!bnx2x_reset_is_done(bp)) {
12520 /* Reset MCP mail box sequence if there is on going
12525 /* If it's the first function to load and reset done
12526 * is still not cleared it may mean that. We don't
12527 * check the attention state here because it may have
12528 * already been cleared by a "common" reset but we
12529 * shell proceed with "process kill" anyway.
12531 if ((bnx2x_get_load_cnt(bp) == 0) &&
12532 bnx2x_trylock_hw_lock(bp,
12533 HW_LOCK_RESOURCE_RESERVED_08) &&
12534 (!bnx2x_leader_reset(bp))) {
12535 DP(NETIF_MSG_HW, "Recovered in open\n");
12539 bnx2x_set_power_state(bp, PCI_D3hot);
12541 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12542 " completed yet. Try again later. If u still see this"
12543 " message after a few retries then power cycle is"
12544 " required.\n", bp->dev->name);
12550 bp->recovery_state = BNX2X_RECOVERY_DONE;
12552 return bnx2x_nic_load(bp, LOAD_OPEN);
12555 /* called with rtnl_lock */
12556 static int bnx2x_close(struct net_device *dev)
12558 struct bnx2x *bp = netdev_priv(dev);
12560 /* Unload the driver, release IRQs */
12561 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12562 bnx2x_set_power_state(bp, PCI_D3hot);
12567 /* called with netif_tx_lock from dev_mcast.c */
12568 static void bnx2x_set_rx_mode(struct net_device *dev)
12570 struct bnx2x *bp = netdev_priv(dev);
12571 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12572 int port = BP_PORT(bp);
12574 if (bp->state != BNX2X_STATE_OPEN) {
12575 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12579 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12581 if (dev->flags & IFF_PROMISC)
12582 rx_mode = BNX2X_RX_MODE_PROMISC;
12584 else if ((dev->flags & IFF_ALLMULTI) ||
12585 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12587 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12589 else { /* some multicasts */
12590 if (CHIP_IS_E1(bp)) {
12591 int i, old, offset;
12592 struct netdev_hw_addr *ha;
12593 struct mac_configuration_cmd *config =
12594 bnx2x_sp(bp, mcast_config);
12597 netdev_for_each_mc_addr(ha, dev) {
12598 config->config_table[i].
12599 cam_entry.msb_mac_addr =
12600 swab16(*(u16 *)&ha->addr[0]);
12601 config->config_table[i].
12602 cam_entry.middle_mac_addr =
12603 swab16(*(u16 *)&ha->addr[2]);
12604 config->config_table[i].
12605 cam_entry.lsb_mac_addr =
12606 swab16(*(u16 *)&ha->addr[4]);
12607 config->config_table[i].cam_entry.flags =
12609 config->config_table[i].
12610 target_table_entry.flags = 0;
12611 config->config_table[i].target_table_entry.
12612 clients_bit_vector =
12613 cpu_to_le32(1 << BP_L_ID(bp));
12614 config->config_table[i].
12615 target_table_entry.vlan_id = 0;
12618 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12619 config->config_table[i].
12620 cam_entry.msb_mac_addr,
12621 config->config_table[i].
12622 cam_entry.middle_mac_addr,
12623 config->config_table[i].
12624 cam_entry.lsb_mac_addr);
12627 old = config->hdr.length;
12629 for (; i < old; i++) {
12630 if (CAM_IS_INVALID(config->
12631 config_table[i])) {
12632 /* already invalidated */
12636 CAM_INVALIDATE(config->
12641 if (CHIP_REV_IS_SLOW(bp))
12642 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12644 offset = BNX2X_MAX_MULTICAST*(1 + port);
12646 config->hdr.length = i;
12647 config->hdr.offset = offset;
12648 config->hdr.client_id = bp->fp->cl_id;
12649 config->hdr.reserved1 = 0;
12651 bp->set_mac_pending++;
12654 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12655 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12656 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12659 /* Accept one or more multicasts */
12660 struct netdev_hw_addr *ha;
12661 u32 mc_filter[MC_HASH_SIZE];
12662 u32 crc, bit, regidx;
12665 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12667 netdev_for_each_mc_addr(ha, dev) {
12668 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12671 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12672 bit = (crc >> 24) & 0xff;
12675 mc_filter[regidx] |= (1 << bit);
12678 for (i = 0; i < MC_HASH_SIZE; i++)
12679 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12684 bp->rx_mode = rx_mode;
12685 bnx2x_set_storm_rx_mode(bp);
12688 /* called with rtnl_lock */
12689 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12691 struct sockaddr *addr = p;
12692 struct bnx2x *bp = netdev_priv(dev);
12694 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12697 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12698 if (netif_running(dev)) {
12699 if (CHIP_IS_E1(bp))
12700 bnx2x_set_eth_mac_addr_e1(bp, 1);
12702 bnx2x_set_eth_mac_addr_e1h(bp, 1);
12708 /* called with rtnl_lock */
12709 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12710 int devad, u16 addr)
12712 struct bnx2x *bp = netdev_priv(netdev);
12715 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12717 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12718 prtad, devad, addr);
12720 if (prtad != bp->mdio.prtad) {
12721 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12722 prtad, bp->mdio.prtad);
12726 /* The HW expects different devad if CL22 is used */
12727 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12729 bnx2x_acquire_phy_lock(bp);
12730 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12731 devad, addr, &value);
12732 bnx2x_release_phy_lock(bp);
12733 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12740 /* called with rtnl_lock */
12741 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12742 u16 addr, u16 value)
12744 struct bnx2x *bp = netdev_priv(netdev);
12745 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12748 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12749 " value 0x%x\n", prtad, devad, addr, value);
12751 if (prtad != bp->mdio.prtad) {
12752 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12753 prtad, bp->mdio.prtad);
12757 /* The HW expects different devad if CL22 is used */
12758 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12760 bnx2x_acquire_phy_lock(bp);
12761 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12762 devad, addr, value);
12763 bnx2x_release_phy_lock(bp);
12767 /* called with rtnl_lock */
12768 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12770 struct bnx2x *bp = netdev_priv(dev);
12771 struct mii_ioctl_data *mdio = if_mii(ifr);
12773 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12774 mdio->phy_id, mdio->reg_num, mdio->val_in);
12776 if (!netif_running(dev))
12779 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12782 /* called with rtnl_lock */
12783 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12785 struct bnx2x *bp = netdev_priv(dev);
12788 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12789 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12793 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12794 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12797 /* This does not race with packet allocation
12798 * because the actual alloc size is
12799 * only updated as part of load
12801 dev->mtu = new_mtu;
12803 if (netif_running(dev)) {
12804 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12805 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12811 static void bnx2x_tx_timeout(struct net_device *dev)
12813 struct bnx2x *bp = netdev_priv(dev);
12815 #ifdef BNX2X_STOP_ON_ERROR
12819 /* This allows the netif to be shutdown gracefully before resetting */
12820 schedule_delayed_work(&bp->reset_task, 0);
12824 /* called with rtnl_lock */
12825 static void bnx2x_vlan_rx_register(struct net_device *dev,
12826 struct vlan_group *vlgrp)
12828 struct bnx2x *bp = netdev_priv(dev);
12832 /* Set flags according to the required capabilities */
12833 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12835 if (dev->features & NETIF_F_HW_VLAN_TX)
12836 bp->flags |= HW_VLAN_TX_FLAG;
12838 if (dev->features & NETIF_F_HW_VLAN_RX)
12839 bp->flags |= HW_VLAN_RX_FLAG;
12841 if (netif_running(dev))
12842 bnx2x_set_client_config(bp);
12847 #ifdef CONFIG_NET_POLL_CONTROLLER
12848 static void poll_bnx2x(struct net_device *dev)
12850 struct bnx2x *bp = netdev_priv(dev);
12852 disable_irq(bp->pdev->irq);
12853 bnx2x_interrupt(bp->pdev->irq, dev);
12854 enable_irq(bp->pdev->irq);
12858 static const struct net_device_ops bnx2x_netdev_ops = {
12859 .ndo_open = bnx2x_open,
12860 .ndo_stop = bnx2x_close,
12861 .ndo_start_xmit = bnx2x_start_xmit,
12862 .ndo_set_multicast_list = bnx2x_set_rx_mode,
12863 .ndo_set_mac_address = bnx2x_change_mac_addr,
12864 .ndo_validate_addr = eth_validate_addr,
12865 .ndo_do_ioctl = bnx2x_ioctl,
12866 .ndo_change_mtu = bnx2x_change_mtu,
12867 .ndo_tx_timeout = bnx2x_tx_timeout,
12869 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12871 #ifdef CONFIG_NET_POLL_CONTROLLER
12872 .ndo_poll_controller = poll_bnx2x,
12876 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12877 struct net_device *dev)
12882 SET_NETDEV_DEV(dev, &pdev->dev);
12883 bp = netdev_priv(dev);
12888 bp->func = PCI_FUNC(pdev->devfn);
12890 rc = pci_enable_device(pdev);
12892 dev_err(&bp->pdev->dev,
12893 "Cannot enable PCI device, aborting\n");
12897 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12898 dev_err(&bp->pdev->dev,
12899 "Cannot find PCI device base address, aborting\n");
12901 goto err_out_disable;
12904 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12905 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12906 " base address, aborting\n");
12908 goto err_out_disable;
12911 if (atomic_read(&pdev->enable_cnt) == 1) {
12912 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12914 dev_err(&bp->pdev->dev,
12915 "Cannot obtain PCI resources, aborting\n");
12916 goto err_out_disable;
12919 pci_set_master(pdev);
12920 pci_save_state(pdev);
12923 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12924 if (bp->pm_cap == 0) {
12925 dev_err(&bp->pdev->dev,
12926 "Cannot find power management capability, aborting\n");
12928 goto err_out_release;
12931 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12932 if (bp->pcie_cap == 0) {
12933 dev_err(&bp->pdev->dev,
12934 "Cannot find PCI Express capability, aborting\n");
12936 goto err_out_release;
12939 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12940 bp->flags |= USING_DAC_FLAG;
12941 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12942 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12943 " failed, aborting\n");
12945 goto err_out_release;
12948 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12949 dev_err(&bp->pdev->dev,
12950 "System does not support DMA, aborting\n");
12952 goto err_out_release;
12955 dev->mem_start = pci_resource_start(pdev, 0);
12956 dev->base_addr = dev->mem_start;
12957 dev->mem_end = pci_resource_end(pdev, 0);
12959 dev->irq = pdev->irq;
12961 bp->regview = pci_ioremap_bar(pdev, 0);
12962 if (!bp->regview) {
12963 dev_err(&bp->pdev->dev,
12964 "Cannot map register space, aborting\n");
12966 goto err_out_release;
12969 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12970 min_t(u64, BNX2X_DB_SIZE,
12971 pci_resource_len(pdev, 2)));
12972 if (!bp->doorbells) {
12973 dev_err(&bp->pdev->dev,
12974 "Cannot map doorbell space, aborting\n");
12976 goto err_out_unmap;
12979 bnx2x_set_power_state(bp, PCI_D0);
12981 /* clean indirect addresses */
12982 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12983 PCICFG_VENDOR_ID_OFFSET);
12984 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12985 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12986 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12987 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
12989 /* Reset the load counter */
12990 bnx2x_clear_load_cnt(bp);
12992 dev->watchdog_timeo = TX_TIMEOUT;
12994 dev->netdev_ops = &bnx2x_netdev_ops;
12995 dev->ethtool_ops = &bnx2x_ethtool_ops;
12996 dev->features |= NETIF_F_SG;
12997 dev->features |= NETIF_F_HW_CSUM;
12998 if (bp->flags & USING_DAC_FLAG)
12999 dev->features |= NETIF_F_HIGHDMA;
13000 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13001 dev->features |= NETIF_F_TSO6;
13003 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
13004 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
13006 dev->vlan_features |= NETIF_F_SG;
13007 dev->vlan_features |= NETIF_F_HW_CSUM;
13008 if (bp->flags & USING_DAC_FLAG)
13009 dev->vlan_features |= NETIF_F_HIGHDMA;
13010 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13011 dev->vlan_features |= NETIF_F_TSO6;
13014 /* get_port_hwinfo() will set prtad and mmds properly */
13015 bp->mdio.prtad = MDIO_PRTAD_NONE;
13017 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13018 bp->mdio.dev = dev;
13019 bp->mdio.mdio_read = bnx2x_mdio_read;
13020 bp->mdio.mdio_write = bnx2x_mdio_write;
13026 iounmap(bp->regview);
13027 bp->regview = NULL;
13029 if (bp->doorbells) {
13030 iounmap(bp->doorbells);
13031 bp->doorbells = NULL;
13035 if (atomic_read(&pdev->enable_cnt) == 1)
13036 pci_release_regions(pdev);
13039 pci_disable_device(pdev);
13040 pci_set_drvdata(pdev, NULL);
13046 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13047 int *width, int *speed)
13049 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13051 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13053 /* return value of 1=2.5GHz 2=5GHz */
13054 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13057 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13059 const struct firmware *firmware = bp->firmware;
13060 struct bnx2x_fw_file_hdr *fw_hdr;
13061 struct bnx2x_fw_file_section *sections;
13062 u32 offset, len, num_ops;
13067 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13070 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13071 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13073 /* Make sure none of the offsets and sizes make us read beyond
13074 * the end of the firmware data */
13075 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13076 offset = be32_to_cpu(sections[i].offset);
13077 len = be32_to_cpu(sections[i].len);
13078 if (offset + len > firmware->size) {
13079 dev_err(&bp->pdev->dev,
13080 "Section %d length is out of bounds\n", i);
13085 /* Likewise for the init_ops offsets */
13086 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13087 ops_offsets = (u16 *)(firmware->data + offset);
13088 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13090 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13091 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13092 dev_err(&bp->pdev->dev,
13093 "Section offset %d is out of bounds\n", i);
13098 /* Check FW version */
13099 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13100 fw_ver = firmware->data + offset;
13101 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13102 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13103 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13104 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13105 dev_err(&bp->pdev->dev,
13106 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13107 fw_ver[0], fw_ver[1], fw_ver[2],
13108 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13109 BCM_5710_FW_MINOR_VERSION,
13110 BCM_5710_FW_REVISION_VERSION,
13111 BCM_5710_FW_ENGINEERING_VERSION);
13118 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13120 const __be32 *source = (const __be32 *)_source;
13121 u32 *target = (u32 *)_target;
13124 for (i = 0; i < n/4; i++)
13125 target[i] = be32_to_cpu(source[i]);
13129 Ops array is stored in the following format:
13130 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13132 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13134 const __be32 *source = (const __be32 *)_source;
13135 struct raw_op *target = (struct raw_op *)_target;
13138 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13139 tmp = be32_to_cpu(source[j]);
13140 target[i].op = (tmp >> 24) & 0xff;
13141 target[i].offset = tmp & 0xffffff;
13142 target[i].raw_data = be32_to_cpu(source[j + 1]);
13146 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13148 const __be16 *source = (const __be16 *)_source;
13149 u16 *target = (u16 *)_target;
13152 for (i = 0; i < n/2; i++)
13153 target[i] = be16_to_cpu(source[i]);
13156 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13158 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13159 bp->arr = kmalloc(len, GFP_KERNEL); \
13161 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13164 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13165 (u8 *)bp->arr, len); \
13168 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13170 const char *fw_file_name;
13171 struct bnx2x_fw_file_hdr *fw_hdr;
13174 if (CHIP_IS_E1(bp))
13175 fw_file_name = FW_FILE_NAME_E1;
13176 else if (CHIP_IS_E1H(bp))
13177 fw_file_name = FW_FILE_NAME_E1H;
13179 dev_err(dev, "Unsupported chip revision\n");
13183 dev_info(dev, "Loading %s\n", fw_file_name);
13185 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13187 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13188 goto request_firmware_exit;
13191 rc = bnx2x_check_firmware(bp);
13193 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13194 goto request_firmware_exit;
13197 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13199 /* Initialize the pointers to the init arrays */
13201 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13204 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13207 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13210 /* STORMs firmware */
13211 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13212 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13213 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13214 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13215 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13216 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13217 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13218 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13219 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13220 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13221 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13222 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13223 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13224 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13225 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13226 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13230 init_offsets_alloc_err:
13231 kfree(bp->init_ops);
13232 init_ops_alloc_err:
13233 kfree(bp->init_data);
13234 request_firmware_exit:
13235 release_firmware(bp->firmware);
13241 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13242 const struct pci_device_id *ent)
13244 struct net_device *dev = NULL;
13246 int pcie_width, pcie_speed;
13249 /* dev zeroed in init_etherdev */
13250 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13252 dev_err(&pdev->dev, "Cannot allocate net device\n");
13256 bp = netdev_priv(dev);
13257 bp->msg_enable = debug;
13259 pci_set_drvdata(pdev, dev);
13261 rc = bnx2x_init_dev(pdev, dev);
13267 rc = bnx2x_init_bp(bp);
13269 goto init_one_exit;
13271 /* Set init arrays */
13272 rc = bnx2x_init_firmware(bp, &pdev->dev);
13274 dev_err(&pdev->dev, "Error loading firmware\n");
13275 goto init_one_exit;
13278 rc = register_netdev(dev);
13280 dev_err(&pdev->dev, "Cannot register net device\n");
13281 goto init_one_exit;
13284 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13285 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13286 " IRQ %d, ", board_info[ent->driver_data].name,
13287 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13288 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13289 dev->base_addr, bp->pdev->irq);
13290 pr_cont("node addr %pM\n", dev->dev_addr);
13296 iounmap(bp->regview);
13299 iounmap(bp->doorbells);
13303 if (atomic_read(&pdev->enable_cnt) == 1)
13304 pci_release_regions(pdev);
13306 pci_disable_device(pdev);
13307 pci_set_drvdata(pdev, NULL);
13312 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13314 struct net_device *dev = pci_get_drvdata(pdev);
13318 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13321 bp = netdev_priv(dev);
13323 unregister_netdev(dev);
13325 /* Make sure RESET task is not scheduled before continuing */
13326 cancel_delayed_work_sync(&bp->reset_task);
13328 kfree(bp->init_ops_offsets);
13329 kfree(bp->init_ops);
13330 kfree(bp->init_data);
13331 release_firmware(bp->firmware);
13334 iounmap(bp->regview);
13337 iounmap(bp->doorbells);
13341 if (atomic_read(&pdev->enable_cnt) == 1)
13342 pci_release_regions(pdev);
13344 pci_disable_device(pdev);
13345 pci_set_drvdata(pdev, NULL);
13348 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13350 struct net_device *dev = pci_get_drvdata(pdev);
13354 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13357 bp = netdev_priv(dev);
13361 pci_save_state(pdev);
13363 if (!netif_running(dev)) {
13368 netif_device_detach(dev);
13370 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13372 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13379 static int bnx2x_resume(struct pci_dev *pdev)
13381 struct net_device *dev = pci_get_drvdata(pdev);
13386 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13389 bp = netdev_priv(dev);
13391 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13392 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13398 pci_restore_state(pdev);
13400 if (!netif_running(dev)) {
13405 bnx2x_set_power_state(bp, PCI_D0);
13406 netif_device_attach(dev);
13408 rc = bnx2x_nic_load(bp, LOAD_OPEN);
13415 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13419 bp->state = BNX2X_STATE_ERROR;
13421 bp->rx_mode = BNX2X_RX_MODE_NONE;
13423 bnx2x_netif_stop(bp, 0);
13425 del_timer_sync(&bp->timer);
13426 bp->stats_state = STATS_STATE_DISABLED;
13427 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13430 bnx2x_free_irq(bp, false);
13432 if (CHIP_IS_E1(bp)) {
13433 struct mac_configuration_cmd *config =
13434 bnx2x_sp(bp, mcast_config);
13436 for (i = 0; i < config->hdr.length; i++)
13437 CAM_INVALIDATE(config->config_table[i]);
13440 /* Free SKBs, SGEs, TPA pool and driver internals */
13441 bnx2x_free_skbs(bp);
13442 for_each_queue(bp, i)
13443 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13444 for_each_queue(bp, i)
13445 netif_napi_del(&bnx2x_fp(bp, i, napi));
13446 bnx2x_free_mem(bp);
13448 bp->state = BNX2X_STATE_CLOSED;
13450 netif_carrier_off(bp->dev);
13455 static void bnx2x_eeh_recover(struct bnx2x *bp)
13459 mutex_init(&bp->port.phy_mutex);
13461 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13462 bp->link_params.shmem_base = bp->common.shmem_base;
13463 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13465 if (!bp->common.shmem_base ||
13466 (bp->common.shmem_base < 0xA0000) ||
13467 (bp->common.shmem_base >= 0xC0000)) {
13468 BNX2X_DEV_INFO("MCP not active\n");
13469 bp->flags |= NO_MCP_FLAG;
13473 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13474 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13475 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13476 BNX2X_ERR("BAD MCP validity signature\n");
13478 if (!BP_NOMCP(bp)) {
13479 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13480 & DRV_MSG_SEQ_NUMBER_MASK);
13481 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13486 * bnx2x_io_error_detected - called when PCI error is detected
13487 * @pdev: Pointer to PCI device
13488 * @state: The current pci connection state
13490 * This function is called after a PCI bus error affecting
13491 * this device has been detected.
13493 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13494 pci_channel_state_t state)
13496 struct net_device *dev = pci_get_drvdata(pdev);
13497 struct bnx2x *bp = netdev_priv(dev);
13501 netif_device_detach(dev);
13503 if (state == pci_channel_io_perm_failure) {
13505 return PCI_ERS_RESULT_DISCONNECT;
13508 if (netif_running(dev))
13509 bnx2x_eeh_nic_unload(bp);
13511 pci_disable_device(pdev);
13515 /* Request a slot reset */
13516 return PCI_ERS_RESULT_NEED_RESET;
13520 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13521 * @pdev: Pointer to PCI device
13523 * Restart the card from scratch, as if from a cold-boot.
13525 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13527 struct net_device *dev = pci_get_drvdata(pdev);
13528 struct bnx2x *bp = netdev_priv(dev);
13532 if (pci_enable_device(pdev)) {
13533 dev_err(&pdev->dev,
13534 "Cannot re-enable PCI device after reset\n");
13536 return PCI_ERS_RESULT_DISCONNECT;
13539 pci_set_master(pdev);
13540 pci_restore_state(pdev);
13542 if (netif_running(dev))
13543 bnx2x_set_power_state(bp, PCI_D0);
13547 return PCI_ERS_RESULT_RECOVERED;
13551 * bnx2x_io_resume - called when traffic can start flowing again
13552 * @pdev: Pointer to PCI device
13554 * This callback is called when the error recovery driver tells us that
13555 * its OK to resume normal operation.
13557 static void bnx2x_io_resume(struct pci_dev *pdev)
13559 struct net_device *dev = pci_get_drvdata(pdev);
13560 struct bnx2x *bp = netdev_priv(dev);
13562 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13563 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13569 bnx2x_eeh_recover(bp);
13571 if (netif_running(dev))
13572 bnx2x_nic_load(bp, LOAD_NORMAL);
13574 netif_device_attach(dev);
13579 static struct pci_error_handlers bnx2x_err_handler = {
13580 .error_detected = bnx2x_io_error_detected,
13581 .slot_reset = bnx2x_io_slot_reset,
13582 .resume = bnx2x_io_resume,
13585 static struct pci_driver bnx2x_pci_driver = {
13586 .name = DRV_MODULE_NAME,
13587 .id_table = bnx2x_pci_tbl,
13588 .probe = bnx2x_init_one,
13589 .remove = __devexit_p(bnx2x_remove_one),
13590 .suspend = bnx2x_suspend,
13591 .resume = bnx2x_resume,
13592 .err_handler = &bnx2x_err_handler,
13595 static int __init bnx2x_init(void)
13599 pr_info("%s", version);
13601 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13602 if (bnx2x_wq == NULL) {
13603 pr_err("Cannot create workqueue\n");
13607 ret = pci_register_driver(&bnx2x_pci_driver);
13609 pr_err("Cannot register driver\n");
13610 destroy_workqueue(bnx2x_wq);
13615 static void __exit bnx2x_cleanup(void)
13617 pci_unregister_driver(&bnx2x_pci_driver);
13619 destroy_workqueue(bnx2x_wq);
13622 module_init(bnx2x_init);
13623 module_exit(bnx2x_cleanup);
13627 /* count denotes the number of new completions we have seen */
13628 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13630 struct eth_spe *spe;
13632 #ifdef BNX2X_STOP_ON_ERROR
13633 if (unlikely(bp->panic))
13637 spin_lock_bh(&bp->spq_lock);
13638 bp->cnic_spq_pending -= count;
13640 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13641 bp->cnic_spq_pending++) {
13643 if (!bp->cnic_kwq_pending)
13646 spe = bnx2x_sp_get_next(bp);
13647 *spe = *bp->cnic_kwq_cons;
13649 bp->cnic_kwq_pending--;
13651 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13652 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13654 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13655 bp->cnic_kwq_cons = bp->cnic_kwq;
13657 bp->cnic_kwq_cons++;
13659 bnx2x_sp_prod_update(bp);
13660 spin_unlock_bh(&bp->spq_lock);
13663 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13664 struct kwqe_16 *kwqes[], u32 count)
13666 struct bnx2x *bp = netdev_priv(dev);
13669 #ifdef BNX2X_STOP_ON_ERROR
13670 if (unlikely(bp->panic))
13674 spin_lock_bh(&bp->spq_lock);
13676 for (i = 0; i < count; i++) {
13677 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13679 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13682 *bp->cnic_kwq_prod = *spe;
13684 bp->cnic_kwq_pending++;
13686 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13687 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13688 spe->data.mac_config_addr.hi,
13689 spe->data.mac_config_addr.lo,
13690 bp->cnic_kwq_pending);
13692 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13693 bp->cnic_kwq_prod = bp->cnic_kwq;
13695 bp->cnic_kwq_prod++;
13698 spin_unlock_bh(&bp->spq_lock);
13700 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13701 bnx2x_cnic_sp_post(bp, 0);
13706 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13708 struct cnic_ops *c_ops;
13711 mutex_lock(&bp->cnic_mutex);
13712 c_ops = bp->cnic_ops;
13714 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13715 mutex_unlock(&bp->cnic_mutex);
13720 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13722 struct cnic_ops *c_ops;
13726 c_ops = rcu_dereference(bp->cnic_ops);
13728 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13735 * for commands that have no data
13737 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13739 struct cnic_ctl_info ctl = {0};
13743 return bnx2x_cnic_ctl_send(bp, &ctl);
13746 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13748 struct cnic_ctl_info ctl;
13750 /* first we tell CNIC and only then we count this as a completion */
13751 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13752 ctl.data.comp.cid = cid;
13754 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13755 bnx2x_cnic_sp_post(bp, 1);
13758 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13760 struct bnx2x *bp = netdev_priv(dev);
13763 switch (ctl->cmd) {
13764 case DRV_CTL_CTXTBL_WR_CMD: {
13765 u32 index = ctl->data.io.offset;
13766 dma_addr_t addr = ctl->data.io.dma_addr;
13768 bnx2x_ilt_wr(bp, index, addr);
13772 case DRV_CTL_COMPLETION_CMD: {
13773 int count = ctl->data.comp.comp_count;
13775 bnx2x_cnic_sp_post(bp, count);
13779 /* rtnl_lock is held. */
13780 case DRV_CTL_START_L2_CMD: {
13781 u32 cli = ctl->data.ring.client_id;
13783 bp->rx_mode_cl_mask |= (1 << cli);
13784 bnx2x_set_storm_rx_mode(bp);
13788 /* rtnl_lock is held. */
13789 case DRV_CTL_STOP_L2_CMD: {
13790 u32 cli = ctl->data.ring.client_id;
13792 bp->rx_mode_cl_mask &= ~(1 << cli);
13793 bnx2x_set_storm_rx_mode(bp);
13798 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13805 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13807 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13809 if (bp->flags & USING_MSIX_FLAG) {
13810 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13811 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13812 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13814 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13815 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13817 cp->irq_arr[0].status_blk = bp->cnic_sb;
13818 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13819 cp->irq_arr[1].status_blk = bp->def_status_blk;
13820 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13825 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13828 struct bnx2x *bp = netdev_priv(dev);
13829 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13834 if (atomic_read(&bp->intr_sem) != 0)
13837 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13841 bp->cnic_kwq_cons = bp->cnic_kwq;
13842 bp->cnic_kwq_prod = bp->cnic_kwq;
13843 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13845 bp->cnic_spq_pending = 0;
13846 bp->cnic_kwq_pending = 0;
13848 bp->cnic_data = data;
13851 cp->drv_state = CNIC_DRV_STATE_REGD;
13853 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13855 bnx2x_setup_cnic_irq_info(bp);
13856 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13857 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13858 rcu_assign_pointer(bp->cnic_ops, ops);
13863 static int bnx2x_unregister_cnic(struct net_device *dev)
13865 struct bnx2x *bp = netdev_priv(dev);
13866 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13868 mutex_lock(&bp->cnic_mutex);
13869 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13870 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13871 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13874 rcu_assign_pointer(bp->cnic_ops, NULL);
13875 mutex_unlock(&bp->cnic_mutex);
13877 kfree(bp->cnic_kwq);
13878 bp->cnic_kwq = NULL;
13883 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13885 struct bnx2x *bp = netdev_priv(dev);
13886 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13888 cp->drv_owner = THIS_MODULE;
13889 cp->chip_id = CHIP_ID(bp);
13890 cp->pdev = bp->pdev;
13891 cp->io_base = bp->regview;
13892 cp->io_base2 = bp->doorbells;
13893 cp->max_kwqe_pending = 8;
13894 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13895 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13896 cp->ctx_tbl_len = CNIC_ILT_LINES;
13897 cp->starting_cid = BCM_CNIC_CID_START;
13898 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13899 cp->drv_ctl = bnx2x_drv_ctl;
13900 cp->drv_register_cnic = bnx2x_register_cnic;
13901 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13905 EXPORT_SYMBOL(bnx2x_cnic_probe);
13907 #endif /* BCM_CNIC */