1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.1-8"
61 #define DRV_MODULE_RELDATE "2010/04/01"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version[] __devinitdata =
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
126 static struct workqueue_struct *bnx2x_wq;
128 enum bnx2x_board_type {
134 /* indexed by board_type, above */
137 } board_info[] __devinitdata = {
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
158 * locking is done by mcp
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
180 static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
207 struct dmae_command dmae;
208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
220 memset(&dmae, 0, sizeof(struct dmae_command));
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
228 DMAE_CMD_ENDIANITY_DW_SWAP |
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
252 mutex_lock(&bp->dmae_mutex);
256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
264 BNX2X_ERR("DMAE timeout!\n");
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
275 mutex_unlock(&bp->dmae_mutex);
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
280 struct dmae_command dmae;
281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
295 memset(&dmae, 0, sizeof(struct dmae_command));
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
303 DMAE_CMD_ENDIANITY_DW_SWAP |
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
324 mutex_lock(&bp->dmae_mutex);
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
333 while (*wb_comp != DMAE_COMP_VAL) {
336 BNX2X_ERR("DMAE timeout!\n");
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
350 mutex_unlock(&bp->dmae_mutex);
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
359 while (len > dmae_wr_max) {
360 bnx2x_write_dmae(bp, phys_addr + offset,
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
384 REG_RD_DMAE(bp, reg, wb_data, 2);
386 return HILO_U64(wb_data[0], wb_data[1]);
390 static int bnx2x_mc_assert(struct bnx2x *bp)
394 u32 row0, row1, row2, row3;
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
511 static void bnx2x_fw_dump(struct bnx2x *bp)
519 BNX2X_ERR("NO MCP - can not dump\n");
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526 pr_err("begin fw dump (mark 0x%x)\n", mark);
529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530 for (word = 0; word < 8; word++)
531 data[word] = htonl(REG_RD(bp, offset + 4*word));
533 pr_cont("%s", (char *)data);
535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536 for (word = 0; word < 8; word++)
537 data[word] = htonl(REG_RD(bp, offset + 4*word));
539 pr_cont("%s", (char *)data);
541 pr_err("end of fw dump\n");
544 static void bnx2x_panic_dump(struct bnx2x *bp)
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
552 BNX2X_ERR("begin crash dump -----------------\n");
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
563 for_each_queue(bp, i) {
564 struct bnx2x_fastpath *fp = &bp->fp[i];
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
569 i, fp->rx_bd_prod, fp->rx_bd_cons,
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
580 for_each_queue(bp, i) {
581 struct bnx2x_fastpath *fp = &bp->fp[i];
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590 fp->status_blk->c_status_block.status_block_index,
591 fp->tx_db.data.prod);
596 for_each_queue(bp, i) {
597 struct bnx2x_fastpath *fp = &bp->fp[i];
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601 for (j = start; j != end; j = RX_BD(j + 1)) {
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
611 for (j = start; j != end; j = RX_SGE(j + 1)) {
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
630 for_each_queue(bp, i) {
631 struct bnx2x_fastpath *fp = &bp->fp[i];
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
654 BNX2X_ERR("end crash dump -----------------\n");
657 static void bnx2x_int_enable(struct bnx2x *bp)
659 int port = BP_PORT(bp);
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
684 REG_WR(bp, addr, val);
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
692 REG_WR(bp, addr, val);
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
704 /* enable nig and gpio3 attention */
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
713 /* Make sure that interrupts are indeed enabled from here on */
717 static void bnx2x_int_disable(struct bnx2x *bp)
719 int port = BP_PORT(bp);
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
731 /* flush all outstanding writes */
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
744 /* disable interrupt handling */
745 atomic_inc(&bp->intr_sem);
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
752 /* make sure all ISRs are done */
754 synchronize_irq(bp->msix_table[0].vector);
759 for_each_queue(bp, i)
760 synchronize_irq(bp->msix_table[i + offset].vector);
762 synchronize_irq(bp->pdev->irq);
764 /* make sure sp_task is not running */
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
772 * General service functions
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810 u8 storm, u16 index, u8 op, u8 update)
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
814 struct igu_ack_register igu_ack;
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
827 /* Make sure that ACK is written */
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
834 struct host_status_block *fpsb = fp->status_blk;
836 barrier(); /* status block is written to by the chip */
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
855 * fast path service functions
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
860 /* Tell compiler that consumer and producer can change */
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
865 /* free skb in the packet ring at pos idx
866 * return idx of last bd freed
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
874 struct sk_buff *skb = tx_buf->skb;
875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893 BNX2X_ERR("BAD nbd!\n");
897 new_cons = nbd + tx_buf->first_bd;
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
902 /* Skip a parse bd... */
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
926 tx_buf->first_bd = 0;
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
945 #ifdef BNX2X_STOP_ON_ERROR
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
951 return (s16)(fp->bp->tx_ring_size) - used;
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
958 /* Tell compiler that status block fields can change */
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
966 struct bnx2x *bp = fp->bp;
967 struct netdev_queue *txq;
968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
970 #ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
975 txq = netdev_get_tx_queue(bp->dev, fp->index);
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
979 while (sw_cons != hw_cons) {
982 pkt_cons = TX_BD(sw_cons);
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
987 hw_cons, sw_cons, pkt_cons);
989 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1009 /* TBD need a thresh? */
1010 if (unlikely(netif_tx_queue_stopped(txq))) {
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1021 __netif_tx_lock(txq, smp_processor_id());
1023 if ((netif_tx_queue_stopped(txq)) &&
1024 (bp->state == BNX2X_STATE_OPEN) &&
1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026 netif_tx_wake_queue(txq);
1028 __netif_tx_unlock(txq);
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1046 fp->index, cid, command, bp->state,
1047 rr_cqe->ramrod_cqe.ramrod_type);
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1057 fp->state = BNX2X_FP_STATE_OPEN;
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1063 fp->state = BNX2X_FP_STATE_HALTED;
1067 BNX2X_ERR("unexpected MC reply (%d) "
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103 bp->set_mac_pending--;
1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109 bp->set_mac_pending--;
1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1115 command, bp->state);
1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1128 /* Skip "next page" elements */
1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1136 sw_buf->page = NULL;
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1158 if (unlikely(page == NULL))
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1168 sw_buf->page = page;
1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1205 /* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
1226 *prod_bd = *cons_bd;
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1232 u16 last_max = fp->last_max_sge;
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1255 struct bnx2x *bp = fp->bp;
1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
1259 u16 last_max, last_elem, first_elem;
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1346 #ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1353 fp->tpa_queue_used);
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1362 struct sw_rx_page *rx_pg, old_rx_pg;
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1371 /* This is needed in order to enable forwarding support */
1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374 max(frag_size, (u32)len_on_bd));
1376 #ifdef BNX2X_STOP_ON_ERROR
1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394 rx_pg = &fp->rx_page_ring[sge_idx];
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1405 /* Unmap the page as we r going to pass it to the stack */
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1417 frag_size -= frag_len;
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1438 if (likely(new_skb)) {
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1450 prefetch(((char *)(skb)) + 128);
1452 #ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1462 skb_reserve(skb, pad);
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 iph = (struct iphdr *)skb->data;
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1492 napi_gro_receive(&fp->napi, skb);
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1504 /* else drop the packet and keep the buffer in the bin */
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
1507 fp->eth_q_stats.rx_skb_alloc_failed++;
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1518 struct ustorm_eth_rx_producers rx_prods = {0};
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539 ((u32 *)&rx_prods)[i]);
1541 mmiowb(); /* keep prod updates ordered */
1543 DP(NETIF_MSG_RX_STATUS,
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1548 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1550 struct bnx2x *bp = fp->bp;
1551 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1555 #ifdef BNX2X_STOP_ON_ERROR
1556 if (unlikely(bp->panic))
1560 /* CQ "next element" is of the size of the regular element,
1561 that's why it's ok here */
1562 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1566 bd_cons = fp->rx_bd_cons;
1567 bd_prod = fp->rx_bd_prod;
1568 bd_prod_fw = bd_prod;
1569 sw_comp_cons = fp->rx_comp_cons;
1570 sw_comp_prod = fp->rx_comp_prod;
1572 /* Memory barrier necessary as speculative reads of the rx
1573 * buffer can be ahead of the index in the status block
1577 DP(NETIF_MSG_RX_STATUS,
1578 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1579 fp->index, hw_comp_cons, sw_comp_cons);
1581 while (sw_comp_cons != hw_comp_cons) {
1582 struct sw_rx_bd *rx_buf = NULL;
1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe;
1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589 bd_prod = RX_BD(bd_prod);
1590 bd_cons = RX_BD(bd_cons);
1592 /* Prefetch the page containing the BD descriptor
1593 at producer's index. It will be needed when new skb is
1595 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596 (&fp->rx_desc_ring[bd_prod])) -
1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1602 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1603 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1604 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1605 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1607 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1609 /* is this a slowpath msg? */
1610 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1611 bnx2x_sp_event(fp, cqe);
1614 /* this is an rx packet */
1616 rx_buf = &fp->rx_buf_ring[bd_cons];
1619 prefetch((u8 *)skb + 256);
1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621 pad = cqe->fast_path_cqe.placement_offset;
1623 /* If CQE is marked both TPA_START and TPA_END
1624 it is a non-TPA CQE */
1625 if ((!fp->disable_tpa) &&
1626 (TPA_TYPE(cqe_fp_flags) !=
1627 (TPA_TYPE_START | TPA_TYPE_END))) {
1628 u16 queue = cqe->fast_path_cqe.queue_index;
1630 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631 DP(NETIF_MSG_RX_STATUS,
1632 "calling tpa_start on queue %d\n",
1635 bnx2x_tpa_start(fp, queue, skb,
1640 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641 DP(NETIF_MSG_RX_STATUS,
1642 "calling tpa_stop on queue %d\n",
1645 if (!BNX2X_RX_SUM_FIX(cqe))
1646 BNX2X_ERR("STOP on none TCP "
1649 /* This is a size of the linear data
1651 len = le16_to_cpu(cqe->fast_path_cqe.
1653 bnx2x_tpa_stop(bp, fp, queue, pad,
1654 len, cqe, comp_ring_cons);
1655 #ifdef BNX2X_STOP_ON_ERROR
1660 bnx2x_update_sge_prod(fp,
1661 &cqe->fast_path_cqe);
1666 dma_sync_single_for_device(&bp->pdev->dev,
1667 dma_unmap_addr(rx_buf, mapping),
1668 pad + RX_COPY_THRESH,
1671 prefetch(((char *)(skb)) + 128);
1673 /* is this an error packet? */
1674 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1675 DP(NETIF_MSG_RX_ERR,
1676 "ERROR flags %x rx packet %u\n",
1677 cqe_fp_flags, sw_comp_cons);
1678 fp->eth_q_stats.rx_err_discard_pkt++;
1682 /* Since we don't have a jumbo ring
1683 * copy small packets if mtu > 1500
1685 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1686 (len <= RX_COPY_THRESH)) {
1687 struct sk_buff *new_skb;
1689 new_skb = netdev_alloc_skb(bp->dev,
1691 if (new_skb == NULL) {
1692 DP(NETIF_MSG_RX_ERR,
1693 "ERROR packet dropped "
1694 "because of alloc failure\n");
1695 fp->eth_q_stats.rx_skb_alloc_failed++;
1700 skb_copy_from_linear_data_offset(skb, pad,
1701 new_skb->data + pad, len);
1702 skb_reserve(new_skb, pad);
1703 skb_put(new_skb, len);
1705 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1710 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1711 dma_unmap_single(&bp->pdev->dev,
1712 dma_unmap_addr(rx_buf, mapping),
1715 skb_reserve(skb, pad);
1719 DP(NETIF_MSG_RX_ERR,
1720 "ERROR packet dropped because "
1721 "of alloc failure\n");
1722 fp->eth_q_stats.rx_skb_alloc_failed++;
1724 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1728 skb->protocol = eth_type_trans(skb, bp->dev);
1730 skb->ip_summed = CHECKSUM_NONE;
1732 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1733 skb->ip_summed = CHECKSUM_UNNECESSARY;
1735 fp->eth_q_stats.hw_csum_err++;
1739 skb_record_rx_queue(skb, fp->index);
1742 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1743 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1744 PARSING_FLAGS_VLAN))
1745 vlan_gro_receive(&fp->napi, bp->vlgrp,
1746 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1749 napi_gro_receive(&fp->napi, skb);
1755 bd_cons = NEXT_RX_IDX(bd_cons);
1756 bd_prod = NEXT_RX_IDX(bd_prod);
1757 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1760 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1761 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1763 if (rx_pkt == budget)
1767 fp->rx_bd_cons = bd_cons;
1768 fp->rx_bd_prod = bd_prod_fw;
1769 fp->rx_comp_cons = sw_comp_cons;
1770 fp->rx_comp_prod = sw_comp_prod;
1772 /* Update producers */
1773 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1776 fp->rx_pkt += rx_pkt;
1782 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1784 struct bnx2x_fastpath *fp = fp_cookie;
1785 struct bnx2x *bp = fp->bp;
1787 /* Return here if interrupt is disabled */
1788 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1789 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1793 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1794 fp->index, fp->sb_id);
1795 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1797 #ifdef BNX2X_STOP_ON_ERROR
1798 if (unlikely(bp->panic))
1802 /* Handle Rx and Tx according to MSI-X vector */
1803 prefetch(fp->rx_cons_sb);
1804 prefetch(fp->tx_cons_sb);
1805 prefetch(&fp->status_blk->u_status_block.status_block_index);
1806 prefetch(&fp->status_blk->c_status_block.status_block_index);
1807 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1812 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1814 struct bnx2x *bp = netdev_priv(dev_instance);
1815 u16 status = bnx2x_ack_int(bp);
1819 /* Return here if interrupt is shared and it's not for us */
1820 if (unlikely(status == 0)) {
1821 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1824 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1826 /* Return here if interrupt is disabled */
1827 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1828 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1832 #ifdef BNX2X_STOP_ON_ERROR
1833 if (unlikely(bp->panic))
1837 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1838 struct bnx2x_fastpath *fp = &bp->fp[i];
1840 mask = 0x2 << fp->sb_id;
1841 if (status & mask) {
1842 /* Handle Rx and Tx according to SB id */
1843 prefetch(fp->rx_cons_sb);
1844 prefetch(&fp->status_blk->u_status_block.
1845 status_block_index);
1846 prefetch(fp->tx_cons_sb);
1847 prefetch(&fp->status_blk->c_status_block.
1848 status_block_index);
1849 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1855 mask = 0x2 << CNIC_SB_ID(bp);
1856 if (status & (mask | 0x1)) {
1857 struct cnic_ops *c_ops = NULL;
1860 c_ops = rcu_dereference(bp->cnic_ops);
1862 c_ops->cnic_handler(bp->cnic_data, NULL);
1869 if (unlikely(status & 0x1)) {
1870 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1877 if (unlikely(status))
1878 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1884 /* end of fast path */
1886 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1891 * General service functions
1894 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1897 u32 resource_bit = (1 << resource);
1898 int func = BP_FUNC(bp);
1899 u32 hw_lock_control_reg;
1902 /* Validating that the resource is within range */
1903 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1905 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1906 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1911 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1913 hw_lock_control_reg =
1914 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1917 /* Validating that the resource is not already taken */
1918 lock_status = REG_RD(bp, hw_lock_control_reg);
1919 if (lock_status & resource_bit) {
1920 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1921 lock_status, resource_bit);
1925 /* Try for 5 second every 5ms */
1926 for (cnt = 0; cnt < 1000; cnt++) {
1927 /* Try to acquire the lock */
1928 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1929 lock_status = REG_RD(bp, hw_lock_control_reg);
1930 if (lock_status & resource_bit)
1935 DP(NETIF_MSG_HW, "Timeout\n");
1939 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1942 u32 resource_bit = (1 << resource);
1943 int func = BP_FUNC(bp);
1944 u32 hw_lock_control_reg;
1946 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1948 /* Validating that the resource is within range */
1949 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1951 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1952 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1957 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1959 hw_lock_control_reg =
1960 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1963 /* Validating that the resource is currently taken */
1964 lock_status = REG_RD(bp, hw_lock_control_reg);
1965 if (!(lock_status & resource_bit)) {
1966 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1967 lock_status, resource_bit);
1971 REG_WR(bp, hw_lock_control_reg, resource_bit);
1975 /* HW Lock for shared dual port PHYs */
1976 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1978 mutex_lock(&bp->port.phy_mutex);
1980 if (bp->port.need_hw_lock)
1981 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1984 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1986 if (bp->port.need_hw_lock)
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1989 mutex_unlock(&bp->port.phy_mutex);
1992 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1994 /* The GPIO should be swapped if swap register is set and active */
1995 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1996 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1997 int gpio_shift = gpio_num +
1998 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1999 u32 gpio_mask = (1 << gpio_shift);
2003 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2004 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2008 /* read GPIO value */
2009 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2011 /* get the requested pin value */
2012 if ((gpio_reg & gpio_mask) == gpio_mask)
2017 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2022 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2024 /* The GPIO should be swapped if swap register is set and active */
2025 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2026 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2027 int gpio_shift = gpio_num +
2028 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2029 u32 gpio_mask = (1 << gpio_shift);
2032 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2033 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2037 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2038 /* read GPIO and mask except the float bits */
2039 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2042 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2043 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2044 gpio_num, gpio_shift);
2045 /* clear FLOAT and set CLR */
2046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2050 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2051 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2052 gpio_num, gpio_shift);
2053 /* clear FLOAT and set SET */
2054 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2058 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2059 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2060 gpio_num, gpio_shift);
2062 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2069 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2075 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2077 /* The GPIO should be swapped if swap register is set and active */
2078 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2079 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2080 int gpio_shift = gpio_num +
2081 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2082 u32 gpio_mask = (1 << gpio_shift);
2085 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2086 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2090 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2092 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2095 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2096 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2097 "output low\n", gpio_num, gpio_shift);
2098 /* clear SET and set CLR */
2099 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2100 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2103 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2104 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2105 "output high\n", gpio_num, gpio_shift);
2106 /* clear CLR and set SET */
2107 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2115 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2116 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2121 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2123 u32 spio_mask = (1 << spio_num);
2126 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2127 (spio_num > MISC_REGISTERS_SPIO_7)) {
2128 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2132 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2133 /* read SPIO and mask except the float bits */
2134 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2137 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2138 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2139 /* clear FLOAT and set CLR */
2140 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2141 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2144 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2145 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2146 /* clear FLOAT and set SET */
2147 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2148 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2151 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2152 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2154 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2161 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2167 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2169 switch (bp->link_vars.ieee_fc &
2170 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2171 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2172 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2176 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2177 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2181 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2182 bp->port.advertising |= ADVERTISED_Asym_Pause;
2186 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2192 static void bnx2x_link_report(struct bnx2x *bp)
2194 if (bp->flags & MF_FUNC_DIS) {
2195 netif_carrier_off(bp->dev);
2196 netdev_err(bp->dev, "NIC Link is Down\n");
2200 if (bp->link_vars.link_up) {
2203 if (bp->state == BNX2X_STATE_OPEN)
2204 netif_carrier_on(bp->dev);
2205 netdev_info(bp->dev, "NIC Link is Up, ");
2207 line_speed = bp->link_vars.line_speed;
2212 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2213 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2214 if (vn_max_rate < line_speed)
2215 line_speed = vn_max_rate;
2217 pr_cont("%d Mbps ", line_speed);
2219 if (bp->link_vars.duplex == DUPLEX_FULL)
2220 pr_cont("full duplex");
2222 pr_cont("half duplex");
2224 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2225 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2226 pr_cont(", receive ");
2227 if (bp->link_vars.flow_ctrl &
2229 pr_cont("& transmit ");
2231 pr_cont(", transmit ");
2233 pr_cont("flow control ON");
2237 } else { /* link_down */
2238 netif_carrier_off(bp->dev);
2239 netdev_err(bp->dev, "NIC Link is Down\n");
2243 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2245 if (!BP_NOMCP(bp)) {
2248 /* Initialize link parameters structure variables */
2249 /* It is recommended to turn off RX FC for jumbo frames
2250 for better performance */
2251 if (bp->dev->mtu > 5000)
2252 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2254 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2256 bnx2x_acquire_phy_lock(bp);
2258 if (load_mode == LOAD_DIAG)
2259 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2261 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2263 bnx2x_release_phy_lock(bp);
2265 bnx2x_calc_fc_adv(bp);
2267 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2268 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2269 bnx2x_link_report(bp);
2274 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2278 static void bnx2x_link_set(struct bnx2x *bp)
2280 if (!BP_NOMCP(bp)) {
2281 bnx2x_acquire_phy_lock(bp);
2282 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2283 bnx2x_release_phy_lock(bp);
2285 bnx2x_calc_fc_adv(bp);
2287 BNX2X_ERR("Bootcode is missing - can not set link\n");
2290 static void bnx2x__link_reset(struct bnx2x *bp)
2292 if (!BP_NOMCP(bp)) {
2293 bnx2x_acquire_phy_lock(bp);
2294 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2295 bnx2x_release_phy_lock(bp);
2297 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2300 static u8 bnx2x_link_test(struct bnx2x *bp)
2304 if (!BP_NOMCP(bp)) {
2305 bnx2x_acquire_phy_lock(bp);
2306 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2307 bnx2x_release_phy_lock(bp);
2309 BNX2X_ERR("Bootcode is missing - can not test link\n");
2314 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2316 u32 r_param = bp->link_vars.line_speed / 8;
2317 u32 fair_periodic_timeout_usec;
2320 memset(&(bp->cmng.rs_vars), 0,
2321 sizeof(struct rate_shaping_vars_per_port));
2322 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2324 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2325 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2327 /* this is the threshold below which no timer arming will occur
2328 1.25 coefficient is for the threshold to be a little bigger
2329 than the real time, to compensate for timer in-accuracy */
2330 bp->cmng.rs_vars.rs_threshold =
2331 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2333 /* resolution of fairness timer */
2334 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2335 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2336 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2338 /* this is the threshold below which we won't arm the timer anymore */
2339 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2341 /* we multiply by 1e3/8 to get bytes/msec.
2342 We don't want the credits to pass a credit
2343 of the t_fair*FAIR_MEM (algorithm resolution) */
2344 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2345 /* since each tick is 4 usec */
2346 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2349 /* Calculates the sum of vn_min_rates.
2350 It's needed for further normalizing of the min_rates.
2352 sum of vn_min_rates.
2354 0 - if all the min_rates are 0.
2355 In the later case fainess algorithm should be deactivated.
2356 If not all min_rates are zero then those that are zeroes will be set to 1.
2358 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2361 int port = BP_PORT(bp);
2364 bp->vn_weight_sum = 0;
2365 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2366 int func = 2*vn + port;
2367 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2368 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2369 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2371 /* Skip hidden vns */
2372 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2375 /* If min rate is zero - set it to 1 */
2377 vn_min_rate = DEF_MIN_RATE;
2381 bp->vn_weight_sum += vn_min_rate;
2384 /* ... only if all min rates are zeros - disable fairness */
2386 bp->cmng.flags.cmng_enables &=
2387 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2388 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2389 " fairness will be disabled\n");
2391 bp->cmng.flags.cmng_enables |=
2392 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2395 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2397 struct rate_shaping_vars_per_vn m_rs_vn;
2398 struct fairness_vars_per_vn m_fair_vn;
2399 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2400 u16 vn_min_rate, vn_max_rate;
2403 /* If function is hidden - set min and max to zeroes */
2404 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2409 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2410 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2411 /* If min rate is zero - set it to 1 */
2413 vn_min_rate = DEF_MIN_RATE;
2414 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2415 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2418 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2419 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2421 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2422 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2424 /* global vn counter - maximal Mbps for this vn */
2425 m_rs_vn.vn_counter.rate = vn_max_rate;
2427 /* quota - number of bytes transmitted in this period */
2428 m_rs_vn.vn_counter.quota =
2429 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2431 if (bp->vn_weight_sum) {
2432 /* credit for each period of the fairness algorithm:
2433 number of bytes in T_FAIR (the vn share the port rate).
2434 vn_weight_sum should not be larger than 10000, thus
2435 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2437 m_fair_vn.vn_credit_delta =
2438 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2439 (8 * bp->vn_weight_sum))),
2440 (bp->cmng.fair_vars.fair_threshold * 2));
2441 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2442 m_fair_vn.vn_credit_delta);
2445 /* Store it to internal memory */
2446 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2447 REG_WR(bp, BAR_XSTRORM_INTMEM +
2448 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2449 ((u32 *)(&m_rs_vn))[i]);
2451 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2452 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2454 ((u32 *)(&m_fair_vn))[i]);
2458 /* This function is called upon link interrupt */
2459 static void bnx2x_link_attn(struct bnx2x *bp)
2461 /* Make sure that we are synced with the current statistics */
2462 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2464 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2466 if (bp->link_vars.link_up) {
2468 /* dropless flow control */
2469 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2470 int port = BP_PORT(bp);
2471 u32 pause_enabled = 0;
2473 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2476 REG_WR(bp, BAR_USTRORM_INTMEM +
2477 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2481 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2482 struct host_port_stats *pstats;
2484 pstats = bnx2x_sp(bp, port_stats);
2485 /* reset old bmac stats */
2486 memset(&(pstats->mac_stx[0]), 0,
2487 sizeof(struct mac_stx));
2489 if (bp->state == BNX2X_STATE_OPEN)
2490 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2493 /* indicate link status */
2494 bnx2x_link_report(bp);
2497 int port = BP_PORT(bp);
2501 /* Set the attention towards other drivers on the same port */
2502 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2503 if (vn == BP_E1HVN(bp))
2506 func = ((vn << 1) | port);
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2508 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2511 if (bp->link_vars.link_up) {
2514 /* Init rate shaping and fairness contexts */
2515 bnx2x_init_port_minmax(bp);
2517 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2518 bnx2x_init_vn_minmax(bp, 2*vn + port);
2520 /* Store it to internal memory */
2522 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2523 REG_WR(bp, BAR_XSTRORM_INTMEM +
2524 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2525 ((u32 *)(&bp->cmng))[i]);
2530 static void bnx2x__link_status_update(struct bnx2x *bp)
2532 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2535 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2537 if (bp->link_vars.link_up)
2538 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2540 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2542 bnx2x_calc_vn_weight_sum(bp);
2544 /* indicate link status */
2545 bnx2x_link_report(bp);
2548 static void bnx2x_pmf_update(struct bnx2x *bp)
2550 int port = BP_PORT(bp);
2554 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2556 /* enable nig attention */
2557 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2558 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2559 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2561 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2569 * General service functions
2572 /* send the MCP a request, block until there is a reply */
2573 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2575 int func = BP_FUNC(bp);
2576 u32 seq = ++bp->fw_seq;
2579 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2581 mutex_lock(&bp->fw_mb_mutex);
2582 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2583 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2586 /* let the FW do it's magic ... */
2589 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2591 /* Give the FW up to 5 second (500*10ms) */
2592 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2594 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2595 cnt*delay, rc, seq);
2597 /* is this a reply to our command? */
2598 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2599 rc &= FW_MSG_CODE_MASK;
2602 BNX2X_ERR("FW failed to respond!\n");
2606 mutex_unlock(&bp->fw_mb_mutex);
2611 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2612 static void bnx2x_set_rx_mode(struct net_device *dev);
2614 static void bnx2x_e1h_disable(struct bnx2x *bp)
2616 int port = BP_PORT(bp);
2618 netif_tx_disable(bp->dev);
2620 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2622 netif_carrier_off(bp->dev);
2625 static void bnx2x_e1h_enable(struct bnx2x *bp)
2627 int port = BP_PORT(bp);
2629 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2631 /* Tx queue should be only reenabled */
2632 netif_tx_wake_all_queues(bp->dev);
2635 * Should not call netif_carrier_on since it will be called if the link
2636 * is up when checking for link state
2640 static void bnx2x_update_min_max(struct bnx2x *bp)
2642 int port = BP_PORT(bp);
2645 /* Init rate shaping and fairness contexts */
2646 bnx2x_init_port_minmax(bp);
2648 bnx2x_calc_vn_weight_sum(bp);
2650 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2651 bnx2x_init_vn_minmax(bp, 2*vn + port);
2656 /* Set the attention towards other drivers on the same port */
2657 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2658 if (vn == BP_E1HVN(bp))
2661 func = ((vn << 1) | port);
2662 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2663 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2666 /* Store it to internal memory */
2667 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2668 REG_WR(bp, BAR_XSTRORM_INTMEM +
2669 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2670 ((u32 *)(&bp->cmng))[i]);
2674 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2676 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2678 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2681 * This is the only place besides the function initialization
2682 * where the bp->flags can change so it is done without any
2685 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2686 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2687 bp->flags |= MF_FUNC_DIS;
2689 bnx2x_e1h_disable(bp);
2691 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2692 bp->flags &= ~MF_FUNC_DIS;
2694 bnx2x_e1h_enable(bp);
2696 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2698 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2700 bnx2x_update_min_max(bp);
2701 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2704 /* Report results to MCP */
2706 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2708 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2711 /* must be called under the spq lock */
2712 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2714 struct eth_spe *next_spe = bp->spq_prod_bd;
2716 if (bp->spq_prod_bd == bp->spq_last_bd) {
2717 bp->spq_prod_bd = bp->spq;
2718 bp->spq_prod_idx = 0;
2719 DP(NETIF_MSG_TIMER, "end of spq\n");
2727 /* must be called under the spq lock */
2728 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2730 int func = BP_FUNC(bp);
2732 /* Make sure that BD data is updated before writing the producer */
2735 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2740 /* the slow path queue is odd since completions arrive on the fastpath ring */
2741 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2742 u32 data_hi, u32 data_lo, int common)
2744 struct eth_spe *spe;
2746 #ifdef BNX2X_STOP_ON_ERROR
2747 if (unlikely(bp->panic))
2751 spin_lock_bh(&bp->spq_lock);
2753 if (!bp->spq_left) {
2754 BNX2X_ERR("BUG! SPQ ring full!\n");
2755 spin_unlock_bh(&bp->spq_lock);
2760 spe = bnx2x_sp_get_next(bp);
2762 /* CID needs port number to be encoded int it */
2763 spe->hdr.conn_and_cmd_data =
2764 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2766 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2769 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2771 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2772 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2776 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2777 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2778 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2779 (u32)(U64_LO(bp->spq_mapping) +
2780 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2781 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2783 bnx2x_sp_prod_update(bp);
2784 spin_unlock_bh(&bp->spq_lock);
2788 /* acquire split MCP access lock register */
2789 static int bnx2x_acquire_alr(struct bnx2x *bp)
2795 for (j = 0; j < 1000; j++) {
2797 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2798 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2799 if (val & (1L << 31))
2804 if (!(val & (1L << 31))) {
2805 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2812 /* release split MCP access lock register */
2813 static void bnx2x_release_alr(struct bnx2x *bp)
2815 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2818 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2820 struct host_def_status_block *def_sb = bp->def_status_blk;
2823 barrier(); /* status block is written to by the chip */
2824 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2825 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2828 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2829 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2832 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2833 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2836 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2837 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2840 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2841 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2848 * slow path service functions
2851 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2853 int port = BP_PORT(bp);
2854 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2855 COMMAND_REG_ATTN_BITS_SET);
2856 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2857 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2858 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2859 NIG_REG_MASK_INTERRUPT_PORT0;
2863 if (bp->attn_state & asserted)
2864 BNX2X_ERR("IGU ERROR\n");
2866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2867 aeu_mask = REG_RD(bp, aeu_addr);
2869 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2870 aeu_mask, asserted);
2871 aeu_mask &= ~(asserted & 0x3ff);
2872 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2874 REG_WR(bp, aeu_addr, aeu_mask);
2875 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2877 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2878 bp->attn_state |= asserted;
2879 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2881 if (asserted & ATTN_HARD_WIRED_MASK) {
2882 if (asserted & ATTN_NIG_FOR_FUNC) {
2884 bnx2x_acquire_phy_lock(bp);
2886 /* save nig interrupt mask */
2887 nig_mask = REG_RD(bp, nig_int_mask_addr);
2888 REG_WR(bp, nig_int_mask_addr, 0);
2890 bnx2x_link_attn(bp);
2892 /* handle unicore attn? */
2894 if (asserted & ATTN_SW_TIMER_4_FUNC)
2895 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2897 if (asserted & GPIO_2_FUNC)
2898 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2900 if (asserted & GPIO_3_FUNC)
2901 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2903 if (asserted & GPIO_4_FUNC)
2904 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2907 if (asserted & ATTN_GENERAL_ATTN_1) {
2908 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2909 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2911 if (asserted & ATTN_GENERAL_ATTN_2) {
2912 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2913 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2915 if (asserted & ATTN_GENERAL_ATTN_3) {
2916 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2917 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2920 if (asserted & ATTN_GENERAL_ATTN_4) {
2921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2924 if (asserted & ATTN_GENERAL_ATTN_5) {
2925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2928 if (asserted & ATTN_GENERAL_ATTN_6) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2934 } /* if hardwired */
2936 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2938 REG_WR(bp, hc_addr, asserted);
2940 /* now set back the mask */
2941 if (asserted & ATTN_NIG_FOR_FUNC) {
2942 REG_WR(bp, nig_int_mask_addr, nig_mask);
2943 bnx2x_release_phy_lock(bp);
2947 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2949 int port = BP_PORT(bp);
2951 /* mark the failure */
2952 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2953 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2954 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2955 bp->link_params.ext_phy_config);
2957 /* log the failure */
2958 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2959 " the driver to shutdown the card to prevent permanent"
2960 " damage. Please contact OEM Support for assistance\n");
2963 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2965 int port = BP_PORT(bp);
2967 u32 val, swap_val, swap_override;
2969 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2970 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2972 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2974 val = REG_RD(bp, reg_offset);
2975 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2976 REG_WR(bp, reg_offset, val);
2978 BNX2X_ERR("SPIO5 hw attention\n");
2980 /* Fan failure attention */
2981 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2982 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2983 /* Low power mode is controlled by GPIO 2 */
2984 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2985 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2986 /* The PHY reset is controlled by GPIO 1 */
2987 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2988 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2992 /* The PHY reset is controlled by GPIO 1 */
2993 /* fake the port number to cancel the swap done in
2995 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2996 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2997 port = (swap_val && swap_override) ^ 1;
2998 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2999 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3005 bnx2x_fan_failure(bp);
3008 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3009 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3010 bnx2x_acquire_phy_lock(bp);
3011 bnx2x_handle_module_detect_int(&bp->link_params);
3012 bnx2x_release_phy_lock(bp);
3015 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3017 val = REG_RD(bp, reg_offset);
3018 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3019 REG_WR(bp, reg_offset, val);
3021 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3022 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3027 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3031 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3033 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3034 BNX2X_ERR("DB hw attention 0x%x\n", val);
3035 /* DORQ discard attention */
3037 BNX2X_ERR("FATAL error from DORQ\n");
3040 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3042 int port = BP_PORT(bp);
3045 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3046 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3048 val = REG_RD(bp, reg_offset);
3049 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3050 REG_WR(bp, reg_offset, val);
3052 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3053 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3058 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3062 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3064 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3065 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3066 /* CFC error attention */
3068 BNX2X_ERR("FATAL error from CFC\n");
3071 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3073 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3074 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3075 /* RQ_USDMDP_FIFO_OVERFLOW */
3077 BNX2X_ERR("FATAL error from PXP\n");
3080 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3082 int port = BP_PORT(bp);
3085 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3086 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3088 val = REG_RD(bp, reg_offset);
3089 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3090 REG_WR(bp, reg_offset, val);
3092 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3093 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3098 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3102 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3104 if (attn & BNX2X_PMF_LINK_ASSERT) {
3105 int func = BP_FUNC(bp);
3107 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3108 bp->mf_config = SHMEM_RD(bp,
3109 mf_cfg.func_mf_config[func].config);
3110 val = SHMEM_RD(bp, func_mb[func].drv_status);
3111 if (val & DRV_STATUS_DCC_EVENT_MASK)
3113 (val & DRV_STATUS_DCC_EVENT_MASK));
3114 bnx2x__link_status_update(bp);
3115 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3116 bnx2x_pmf_update(bp);
3118 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3120 BNX2X_ERR("MC assert!\n");
3121 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3122 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3123 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3127 } else if (attn & BNX2X_MCP_ASSERT) {
3129 BNX2X_ERR("MCP assert!\n");
3130 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3134 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3137 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3138 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3139 if (attn & BNX2X_GRC_TIMEOUT) {
3140 val = CHIP_IS_E1H(bp) ?
3141 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3142 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3144 if (attn & BNX2X_GRC_RSV) {
3145 val = CHIP_IS_E1H(bp) ?
3146 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3147 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3149 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3153 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3154 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3157 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3158 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3159 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3160 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3161 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3162 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3164 * should be run under rtnl lock
3166 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3168 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3170 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3176 * should be run under rtnl lock
3178 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3180 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3182 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3188 * should be run under rtnl lock
3190 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3192 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3193 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3194 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3198 * should be run under rtnl lock
3200 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3202 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3204 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3206 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3207 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3213 * should be run under rtnl lock
3215 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3217 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3219 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3221 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3222 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230 * should be run under rtnl lock
3232 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3234 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3237 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3239 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3240 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3243 static inline void _print_next_block(int idx, const char *blk)
3250 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3254 for (i = 0; sig; i++) {
3255 cur_bit = ((u32)0x1 << i);
3256 if (sig & cur_bit) {
3258 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3259 _print_next_block(par_num++, "BRB");
3261 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3262 _print_next_block(par_num++, "PARSER");
3264 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3265 _print_next_block(par_num++, "TSDM");
3267 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3268 _print_next_block(par_num++, "SEARCHER");
3270 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3271 _print_next_block(par_num++, "TSEMI");
3283 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3287 for (i = 0; sig; i++) {
3288 cur_bit = ((u32)0x1 << i);
3289 if (sig & cur_bit) {
3291 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3292 _print_next_block(par_num++, "PBCLIENT");
3294 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3295 _print_next_block(par_num++, "QM");
3297 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3298 _print_next_block(par_num++, "XSDM");
3300 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3301 _print_next_block(par_num++, "XSEMI");
3303 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3304 _print_next_block(par_num++, "DOORBELLQ");
3306 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3307 _print_next_block(par_num++, "VAUX PCI CORE");
3309 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3310 _print_next_block(par_num++, "DEBUG");
3312 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3313 _print_next_block(par_num++, "USDM");
3315 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3316 _print_next_block(par_num++, "USEMI");
3318 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3319 _print_next_block(par_num++, "UPB");
3321 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3322 _print_next_block(par_num++, "CSDM");
3334 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3338 for (i = 0; sig; i++) {
3339 cur_bit = ((u32)0x1 << i);
3340 if (sig & cur_bit) {
3342 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3343 _print_next_block(par_num++, "CSEMI");
3345 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3346 _print_next_block(par_num++, "PXP");
3348 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3349 _print_next_block(par_num++,
3350 "PXPPCICLOCKCLIENT");
3352 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3353 _print_next_block(par_num++, "CFC");
3355 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3356 _print_next_block(par_num++, "CDU");
3358 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3359 _print_next_block(par_num++, "IGU");
3361 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3362 _print_next_block(par_num++, "MISC");
3374 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3378 for (i = 0; sig; i++) {
3379 cur_bit = ((u32)0x1 << i);
3380 if (sig & cur_bit) {
3382 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3383 _print_next_block(par_num++, "MCP ROM");
3385 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3386 _print_next_block(par_num++, "MCP UMP RX");
3388 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3389 _print_next_block(par_num++, "MCP UMP TX");
3391 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3392 _print_next_block(par_num++, "MCP SCPAD");
3404 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3407 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3408 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3410 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3411 "[0]:0x%08x [1]:0x%08x "
3412 "[2]:0x%08x [3]:0x%08x\n",
3413 sig0 & HW_PRTY_ASSERT_SET_0,
3414 sig1 & HW_PRTY_ASSERT_SET_1,
3415 sig2 & HW_PRTY_ASSERT_SET_2,
3416 sig3 & HW_PRTY_ASSERT_SET_3);
3417 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3419 par_num = bnx2x_print_blocks_with_parity0(
3420 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3421 par_num = bnx2x_print_blocks_with_parity1(
3422 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3423 par_num = bnx2x_print_blocks_with_parity2(
3424 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3425 par_num = bnx2x_print_blocks_with_parity3(
3426 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3433 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3435 struct attn_route attn;
3436 int port = BP_PORT(bp);
3438 attn.sig[0] = REG_RD(bp,
3439 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3441 attn.sig[1] = REG_RD(bp,
3442 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3444 attn.sig[2] = REG_RD(bp,
3445 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3447 attn.sig[3] = REG_RD(bp,
3448 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3451 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3455 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3457 struct attn_route attn, *group_mask;
3458 int port = BP_PORT(bp);
3464 /* need to take HW lock because MCP or other port might also
3465 try to handle this event */
3466 bnx2x_acquire_alr(bp);
3468 if (bnx2x_chk_parity_attn(bp)) {
3469 bp->recovery_state = BNX2X_RECOVERY_INIT;
3470 bnx2x_set_reset_in_progress(bp);
3471 schedule_delayed_work(&bp->reset_task, 0);
3472 /* Disable HW interrupts */
3473 bnx2x_int_disable(bp);
3474 bnx2x_release_alr(bp);
3475 /* In case of parity errors don't handle attentions so that
3476 * other function would "see" parity errors.
3481 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3482 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3483 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3484 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3485 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3486 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3488 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3489 if (deasserted & (1 << index)) {
3490 group_mask = &bp->attn_group[index];
3492 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3493 index, group_mask->sig[0], group_mask->sig[1],
3494 group_mask->sig[2], group_mask->sig[3]);
3496 bnx2x_attn_int_deasserted3(bp,
3497 attn.sig[3] & group_mask->sig[3]);
3498 bnx2x_attn_int_deasserted1(bp,
3499 attn.sig[1] & group_mask->sig[1]);
3500 bnx2x_attn_int_deasserted2(bp,
3501 attn.sig[2] & group_mask->sig[2]);
3502 bnx2x_attn_int_deasserted0(bp,
3503 attn.sig[0] & group_mask->sig[0]);
3507 bnx2x_release_alr(bp);
3509 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3512 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3514 REG_WR(bp, reg_addr, val);
3516 if (~bp->attn_state & deasserted)
3517 BNX2X_ERR("IGU ERROR\n");
3519 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3520 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3522 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3523 aeu_mask = REG_RD(bp, reg_addr);
3525 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3526 aeu_mask, deasserted);
3527 aeu_mask |= (deasserted & 0x3ff);
3528 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3530 REG_WR(bp, reg_addr, aeu_mask);
3531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3533 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3534 bp->attn_state &= ~deasserted;
3535 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3538 static void bnx2x_attn_int(struct bnx2x *bp)
3540 /* read local copy of bits */
3541 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3543 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3545 u32 attn_state = bp->attn_state;
3547 /* look for changed bits */
3548 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3549 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3552 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3553 attn_bits, attn_ack, asserted, deasserted);
3555 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3556 BNX2X_ERR("BAD attention state\n");
3558 /* handle bits that were raised */
3560 bnx2x_attn_int_asserted(bp, asserted);
3563 bnx2x_attn_int_deasserted(bp, deasserted);
3566 static void bnx2x_sp_task(struct work_struct *work)
3568 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3571 /* Return here if interrupt is disabled */
3572 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3573 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3577 status = bnx2x_update_dsb_idx(bp);
3578 /* if (status == 0) */
3579 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3581 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3589 /* CStorm events: STAT_QUERY */
3591 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3595 if (unlikely(status))
3596 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3599 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3601 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3603 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3605 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3607 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3611 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3613 struct net_device *dev = dev_instance;
3614 struct bnx2x *bp = netdev_priv(dev);
3616 /* Return here if interrupt is disabled */
3617 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3618 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3622 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3624 #ifdef BNX2X_STOP_ON_ERROR
3625 if (unlikely(bp->panic))
3631 struct cnic_ops *c_ops;
3634 c_ops = rcu_dereference(bp->cnic_ops);
3636 c_ops->cnic_handler(bp->cnic_data, NULL);
3640 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3645 /* end of slow path */
3649 /****************************************************************************
3651 ****************************************************************************/
3653 /* sum[hi:lo] += add[hi:lo] */
3654 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3657 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3660 /* difference = minuend - subtrahend */
3661 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3663 if (m_lo < s_lo) { \
3665 d_hi = m_hi - s_hi; \
3667 /* we can 'loan' 1 */ \
3669 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3671 /* m_hi <= s_hi */ \
3676 /* m_lo >= s_lo */ \
3677 if (m_hi < s_hi) { \
3681 /* m_hi >= s_hi */ \
3682 d_hi = m_hi - s_hi; \
3683 d_lo = m_lo - s_lo; \
3688 #define UPDATE_STAT64(s, t) \
3690 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3691 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3692 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3693 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3694 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3695 pstats->mac_stx[1].t##_lo, diff.lo); \
3698 #define UPDATE_STAT64_NIG(s, t) \
3700 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3701 diff.lo, new->s##_lo, old->s##_lo); \
3702 ADD_64(estats->t##_hi, diff.hi, \
3703 estats->t##_lo, diff.lo); \
3706 /* sum[hi:lo] += add */
3707 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3710 s_hi += (s_lo < a) ? 1 : 0; \
3713 #define UPDATE_EXTEND_STAT(s) \
3715 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3716 pstats->mac_stx[1].s##_lo, \
3720 #define UPDATE_EXTEND_TSTAT(s, t) \
3722 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3723 old_tclient->s = tclient->s; \
3724 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3727 #define UPDATE_EXTEND_USTAT(s, t) \
3729 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3730 old_uclient->s = uclient->s; \
3731 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3734 #define UPDATE_EXTEND_XSTAT(s, t) \
3736 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3737 old_xclient->s = xclient->s; \
3738 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3741 /* minuend -= subtrahend */
3742 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3744 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3747 /* minuend[hi:lo] -= subtrahend */
3748 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3750 SUB_64(m_hi, 0, m_lo, s); \
3753 #define SUB_EXTEND_USTAT(s, t) \
3755 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3756 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3760 * General service functions
3763 static inline long bnx2x_hilo(u32 *hiref)
3765 u32 lo = *(hiref + 1);
3766 #if (BITS_PER_LONG == 64)
3769 return HILO_U64(hi, lo);
3776 * Init service functions
3779 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3781 if (!bp->stats_pending) {
3782 struct eth_query_ramrod_data ramrod_data = {0};
3785 ramrod_data.drv_counter = bp->stats_counter++;
3786 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3787 for_each_queue(bp, i)
3788 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3790 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3791 ((u32 *)&ramrod_data)[1],
3792 ((u32 *)&ramrod_data)[0], 0);
3794 /* stats ramrod has it's own slot on the spq */
3796 bp->stats_pending = 1;
3801 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3803 struct dmae_command *dmae = &bp->stats_dmae;
3804 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3806 *stats_comp = DMAE_COMP_VAL;
3807 if (CHIP_REV_IS_SLOW(bp))
3811 if (bp->executer_idx) {
3812 int loader_idx = PMF_DMAE_C(bp);
3814 memset(dmae, 0, sizeof(struct dmae_command));
3816 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3817 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3818 DMAE_CMD_DST_RESET |
3820 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3822 DMAE_CMD_ENDIANITY_DW_SWAP |
3824 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3826 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3827 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3828 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3829 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3830 sizeof(struct dmae_command) *
3831 (loader_idx + 1)) >> 2;
3832 dmae->dst_addr_hi = 0;
3833 dmae->len = sizeof(struct dmae_command) >> 2;
3836 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3837 dmae->comp_addr_hi = 0;
3841 bnx2x_post_dmae(bp, dmae, loader_idx);
3843 } else if (bp->func_stx) {
3845 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3849 static int bnx2x_stats_comp(struct bnx2x *bp)
3851 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3855 while (*stats_comp != DMAE_COMP_VAL) {
3857 BNX2X_ERR("timeout waiting for stats finished\n");
3867 * Statistics service functions
3870 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3872 struct dmae_command *dmae;
3874 int loader_idx = PMF_DMAE_C(bp);
3875 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3878 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3879 BNX2X_ERR("BUG!\n");
3883 bp->executer_idx = 0;
3885 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3887 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3889 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3891 DMAE_CMD_ENDIANITY_DW_SWAP |
3893 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3894 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3896 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3897 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3898 dmae->src_addr_lo = bp->port.port_stx >> 2;
3899 dmae->src_addr_hi = 0;
3900 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3901 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3902 dmae->len = DMAE_LEN32_RD_MAX;
3903 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3904 dmae->comp_addr_hi = 0;
3907 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3908 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3909 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3910 dmae->src_addr_hi = 0;
3911 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3912 DMAE_LEN32_RD_MAX * 4);
3913 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3914 DMAE_LEN32_RD_MAX * 4);
3915 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3916 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3917 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3918 dmae->comp_val = DMAE_COMP_VAL;
3921 bnx2x_hw_stats_post(bp);
3922 bnx2x_stats_comp(bp);
3925 static void bnx2x_port_stats_init(struct bnx2x *bp)
3927 struct dmae_command *dmae;
3928 int port = BP_PORT(bp);
3929 int vn = BP_E1HVN(bp);
3931 int loader_idx = PMF_DMAE_C(bp);
3933 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3936 if (!bp->link_vars.link_up || !bp->port.pmf) {
3937 BNX2X_ERR("BUG!\n");
3941 bp->executer_idx = 0;
3944 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3945 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3946 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3948 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3950 DMAE_CMD_ENDIANITY_DW_SWAP |
3952 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3953 (vn << DMAE_CMD_E1HVN_SHIFT));
3955 if (bp->port.port_stx) {
3957 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3958 dmae->opcode = opcode;
3959 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3960 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3961 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3962 dmae->dst_addr_hi = 0;
3963 dmae->len = sizeof(struct host_port_stats) >> 2;
3964 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3965 dmae->comp_addr_hi = 0;
3971 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3972 dmae->opcode = opcode;
3973 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3974 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3975 dmae->dst_addr_lo = bp->func_stx >> 2;
3976 dmae->dst_addr_hi = 0;
3977 dmae->len = sizeof(struct host_func_stats) >> 2;
3978 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3979 dmae->comp_addr_hi = 0;
3984 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3985 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3986 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3988 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3990 DMAE_CMD_ENDIANITY_DW_SWAP |
3992 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3993 (vn << DMAE_CMD_E1HVN_SHIFT));
3995 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3997 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3998 NIG_REG_INGRESS_BMAC0_MEM);
4000 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4001 BIGMAC_REGISTER_TX_STAT_GTBYT */
4002 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4003 dmae->opcode = opcode;
4004 dmae->src_addr_lo = (mac_addr +
4005 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4006 dmae->src_addr_hi = 0;
4007 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4008 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4009 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4010 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4011 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4012 dmae->comp_addr_hi = 0;
4015 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4016 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4017 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4018 dmae->opcode = opcode;
4019 dmae->src_addr_lo = (mac_addr +
4020 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4021 dmae->src_addr_hi = 0;
4022 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4023 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4024 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4025 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4026 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4027 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4028 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4029 dmae->comp_addr_hi = 0;
4032 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4034 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4036 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4037 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4038 dmae->opcode = opcode;
4039 dmae->src_addr_lo = (mac_addr +
4040 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4041 dmae->src_addr_hi = 0;
4042 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4043 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4044 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4045 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4046 dmae->comp_addr_hi = 0;
4049 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4050 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4051 dmae->opcode = opcode;
4052 dmae->src_addr_lo = (mac_addr +
4053 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4054 dmae->src_addr_hi = 0;
4055 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4056 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4057 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4058 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4060 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4061 dmae->comp_addr_hi = 0;
4064 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4065 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066 dmae->opcode = opcode;
4067 dmae->src_addr_lo = (mac_addr +
4068 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4069 dmae->src_addr_hi = 0;
4070 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4071 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4072 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4073 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4074 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4075 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4076 dmae->comp_addr_hi = 0;
4081 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4082 dmae->opcode = opcode;
4083 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4084 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4085 dmae->src_addr_hi = 0;
4086 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4087 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4088 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4089 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4090 dmae->comp_addr_hi = 0;
4093 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094 dmae->opcode = opcode;
4095 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4096 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4097 dmae->src_addr_hi = 0;
4098 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4099 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4100 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4101 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4102 dmae->len = (2*sizeof(u32)) >> 2;
4103 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4104 dmae->comp_addr_hi = 0;
4107 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4108 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4109 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4110 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4112 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4114 DMAE_CMD_ENDIANITY_DW_SWAP |
4116 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4117 (vn << DMAE_CMD_E1HVN_SHIFT));
4118 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4119 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4120 dmae->src_addr_hi = 0;
4121 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4122 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4123 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4124 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4125 dmae->len = (2*sizeof(u32)) >> 2;
4126 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4127 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4128 dmae->comp_val = DMAE_COMP_VAL;
4133 static void bnx2x_func_stats_init(struct bnx2x *bp)
4135 struct dmae_command *dmae = &bp->stats_dmae;
4136 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4139 if (!bp->func_stx) {
4140 BNX2X_ERR("BUG!\n");
4144 bp->executer_idx = 0;
4145 memset(dmae, 0, sizeof(struct dmae_command));
4147 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4148 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4149 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4151 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4153 DMAE_CMD_ENDIANITY_DW_SWAP |
4155 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4156 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4157 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4158 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4159 dmae->dst_addr_lo = bp->func_stx >> 2;
4160 dmae->dst_addr_hi = 0;
4161 dmae->len = sizeof(struct host_func_stats) >> 2;
4162 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4163 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4164 dmae->comp_val = DMAE_COMP_VAL;
4169 static void bnx2x_stats_start(struct bnx2x *bp)
4172 bnx2x_port_stats_init(bp);
4174 else if (bp->func_stx)
4175 bnx2x_func_stats_init(bp);
4177 bnx2x_hw_stats_post(bp);
4178 bnx2x_storm_stats_post(bp);
4181 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4183 bnx2x_stats_comp(bp);
4184 bnx2x_stats_pmf_update(bp);
4185 bnx2x_stats_start(bp);
4188 static void bnx2x_stats_restart(struct bnx2x *bp)
4190 bnx2x_stats_comp(bp);
4191 bnx2x_stats_start(bp);
4194 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4196 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4197 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4198 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4204 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4205 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4206 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4207 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4208 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4209 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4210 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4211 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4212 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4213 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4214 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4215 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4216 UPDATE_STAT64(tx_stat_gt127,
4217 tx_stat_etherstatspkts65octetsto127octets);
4218 UPDATE_STAT64(tx_stat_gt255,
4219 tx_stat_etherstatspkts128octetsto255octets);
4220 UPDATE_STAT64(tx_stat_gt511,
4221 tx_stat_etherstatspkts256octetsto511octets);
4222 UPDATE_STAT64(tx_stat_gt1023,
4223 tx_stat_etherstatspkts512octetsto1023octets);
4224 UPDATE_STAT64(tx_stat_gt1518,
4225 tx_stat_etherstatspkts1024octetsto1522octets);
4226 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4227 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4228 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4229 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4230 UPDATE_STAT64(tx_stat_gterr,
4231 tx_stat_dot3statsinternalmactransmiterrors);
4232 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4234 estats->pause_frames_received_hi =
4235 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4236 estats->pause_frames_received_lo =
4237 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4239 estats->pause_frames_sent_hi =
4240 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4241 estats->pause_frames_sent_lo =
4242 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4245 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4247 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4248 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4249 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4251 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4252 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4253 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4254 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4255 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4256 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4257 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4258 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4259 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4260 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4261 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4262 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4263 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4264 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4265 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4266 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4267 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4268 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4269 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4270 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4271 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4272 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4273 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4274 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4275 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4276 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4277 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4278 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4279 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4280 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4281 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4283 estats->pause_frames_received_hi =
4284 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4285 estats->pause_frames_received_lo =
4286 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4287 ADD_64(estats->pause_frames_received_hi,
4288 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4289 estats->pause_frames_received_lo,
4290 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4292 estats->pause_frames_sent_hi =
4293 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4294 estats->pause_frames_sent_lo =
4295 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4296 ADD_64(estats->pause_frames_sent_hi,
4297 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4298 estats->pause_frames_sent_lo,
4299 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4302 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4304 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4305 struct nig_stats *old = &(bp->port.old_nig_stats);
4306 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4307 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4313 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4314 bnx2x_bmac_stats_update(bp);
4316 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4317 bnx2x_emac_stats_update(bp);
4319 else { /* unreached */
4320 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4324 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4325 new->brb_discard - old->brb_discard);
4326 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4327 new->brb_truncate - old->brb_truncate);
4329 UPDATE_STAT64_NIG(egress_mac_pkt0,
4330 etherstatspkts1024octetsto1522octets);
4331 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4333 memcpy(old, new, sizeof(struct nig_stats));
4335 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4336 sizeof(struct mac_stx));
4337 estats->brb_drop_hi = pstats->brb_drop_hi;
4338 estats->brb_drop_lo = pstats->brb_drop_lo;
4340 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4342 if (!BP_NOMCP(bp)) {
4344 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4345 if (nig_timer_max != estats->nig_timer_max) {
4346 estats->nig_timer_max = nig_timer_max;
4347 BNX2X_ERR("NIG timer max (%u)\n",
4348 estats->nig_timer_max);
4355 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4357 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4358 struct tstorm_per_port_stats *tport =
4359 &stats->tstorm_common.port_statistics;
4360 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4361 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4364 memcpy(&(fstats->total_bytes_received_hi),
4365 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4366 sizeof(struct host_func_stats) - 2*sizeof(u32));
4367 estats->error_bytes_received_hi = 0;
4368 estats->error_bytes_received_lo = 0;
4369 estats->etherstatsoverrsizepkts_hi = 0;
4370 estats->etherstatsoverrsizepkts_lo = 0;
4371 estats->no_buff_discard_hi = 0;
4372 estats->no_buff_discard_lo = 0;
4374 for_each_queue(bp, i) {
4375 struct bnx2x_fastpath *fp = &bp->fp[i];
4376 int cl_id = fp->cl_id;
4377 struct tstorm_per_client_stats *tclient =
4378 &stats->tstorm_common.client_statistics[cl_id];
4379 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4380 struct ustorm_per_client_stats *uclient =
4381 &stats->ustorm_common.client_statistics[cl_id];
4382 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4383 struct xstorm_per_client_stats *xclient =
4384 &stats->xstorm_common.client_statistics[cl_id];
4385 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4386 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4389 /* are storm stats valid? */
4390 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4391 bp->stats_counter) {
4392 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4393 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4394 i, xclient->stats_counter, bp->stats_counter);
4397 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4398 bp->stats_counter) {
4399 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4400 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4401 i, tclient->stats_counter, bp->stats_counter);
4404 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4405 bp->stats_counter) {
4406 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4407 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4408 i, uclient->stats_counter, bp->stats_counter);
4412 qstats->total_bytes_received_hi =
4413 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4414 qstats->total_bytes_received_lo =
4415 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4417 ADD_64(qstats->total_bytes_received_hi,
4418 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4419 qstats->total_bytes_received_lo,
4420 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4422 ADD_64(qstats->total_bytes_received_hi,
4423 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4424 qstats->total_bytes_received_lo,
4425 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4427 SUB_64(qstats->total_bytes_received_hi,
4428 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4429 qstats->total_bytes_received_lo,
4430 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4432 SUB_64(qstats->total_bytes_received_hi,
4433 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4434 qstats->total_bytes_received_lo,
4435 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4437 SUB_64(qstats->total_bytes_received_hi,
4438 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4439 qstats->total_bytes_received_lo,
4440 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4442 qstats->valid_bytes_received_hi =
4443 qstats->total_bytes_received_hi;
4444 qstats->valid_bytes_received_lo =
4445 qstats->total_bytes_received_lo;
4447 qstats->error_bytes_received_hi =
4448 le32_to_cpu(tclient->rcv_error_bytes.hi);
4449 qstats->error_bytes_received_lo =
4450 le32_to_cpu(tclient->rcv_error_bytes.lo);
4452 ADD_64(qstats->total_bytes_received_hi,
4453 qstats->error_bytes_received_hi,
4454 qstats->total_bytes_received_lo,
4455 qstats->error_bytes_received_lo);
4457 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4458 total_unicast_packets_received);
4459 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4460 total_multicast_packets_received);
4461 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4462 total_broadcast_packets_received);
4463 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4464 etherstatsoverrsizepkts);
4465 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4467 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4468 total_unicast_packets_received);
4469 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4470 total_multicast_packets_received);
4471 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4472 total_broadcast_packets_received);
4473 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4474 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4475 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4477 qstats->total_bytes_transmitted_hi =
4478 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4479 qstats->total_bytes_transmitted_lo =
4480 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4482 ADD_64(qstats->total_bytes_transmitted_hi,
4483 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4484 qstats->total_bytes_transmitted_lo,
4485 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4487 ADD_64(qstats->total_bytes_transmitted_hi,
4488 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4489 qstats->total_bytes_transmitted_lo,
4490 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4492 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4493 total_unicast_packets_transmitted);
4494 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4495 total_multicast_packets_transmitted);
4496 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4497 total_broadcast_packets_transmitted);
4499 old_tclient->checksum_discard = tclient->checksum_discard;
4500 old_tclient->ttl0_discard = tclient->ttl0_discard;
4502 ADD_64(fstats->total_bytes_received_hi,
4503 qstats->total_bytes_received_hi,
4504 fstats->total_bytes_received_lo,
4505 qstats->total_bytes_received_lo);
4506 ADD_64(fstats->total_bytes_transmitted_hi,
4507 qstats->total_bytes_transmitted_hi,
4508 fstats->total_bytes_transmitted_lo,
4509 qstats->total_bytes_transmitted_lo);
4510 ADD_64(fstats->total_unicast_packets_received_hi,
4511 qstats->total_unicast_packets_received_hi,
4512 fstats->total_unicast_packets_received_lo,
4513 qstats->total_unicast_packets_received_lo);
4514 ADD_64(fstats->total_multicast_packets_received_hi,
4515 qstats->total_multicast_packets_received_hi,
4516 fstats->total_multicast_packets_received_lo,
4517 qstats->total_multicast_packets_received_lo);
4518 ADD_64(fstats->total_broadcast_packets_received_hi,
4519 qstats->total_broadcast_packets_received_hi,
4520 fstats->total_broadcast_packets_received_lo,
4521 qstats->total_broadcast_packets_received_lo);
4522 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4523 qstats->total_unicast_packets_transmitted_hi,
4524 fstats->total_unicast_packets_transmitted_lo,
4525 qstats->total_unicast_packets_transmitted_lo);
4526 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4527 qstats->total_multicast_packets_transmitted_hi,
4528 fstats->total_multicast_packets_transmitted_lo,
4529 qstats->total_multicast_packets_transmitted_lo);
4530 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4531 qstats->total_broadcast_packets_transmitted_hi,
4532 fstats->total_broadcast_packets_transmitted_lo,
4533 qstats->total_broadcast_packets_transmitted_lo);
4534 ADD_64(fstats->valid_bytes_received_hi,
4535 qstats->valid_bytes_received_hi,
4536 fstats->valid_bytes_received_lo,
4537 qstats->valid_bytes_received_lo);
4539 ADD_64(estats->error_bytes_received_hi,
4540 qstats->error_bytes_received_hi,
4541 estats->error_bytes_received_lo,
4542 qstats->error_bytes_received_lo);
4543 ADD_64(estats->etherstatsoverrsizepkts_hi,
4544 qstats->etherstatsoverrsizepkts_hi,
4545 estats->etherstatsoverrsizepkts_lo,
4546 qstats->etherstatsoverrsizepkts_lo);
4547 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4548 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4551 ADD_64(fstats->total_bytes_received_hi,
4552 estats->rx_stat_ifhcinbadoctets_hi,
4553 fstats->total_bytes_received_lo,
4554 estats->rx_stat_ifhcinbadoctets_lo);
4556 memcpy(estats, &(fstats->total_bytes_received_hi),
4557 sizeof(struct host_func_stats) - 2*sizeof(u32));
4559 ADD_64(estats->etherstatsoverrsizepkts_hi,
4560 estats->rx_stat_dot3statsframestoolong_hi,
4561 estats->etherstatsoverrsizepkts_lo,
4562 estats->rx_stat_dot3statsframestoolong_lo);
4563 ADD_64(estats->error_bytes_received_hi,
4564 estats->rx_stat_ifhcinbadoctets_hi,
4565 estats->error_bytes_received_lo,
4566 estats->rx_stat_ifhcinbadoctets_lo);
4569 estats->mac_filter_discard =
4570 le32_to_cpu(tport->mac_filter_discard);
4571 estats->xxoverflow_discard =
4572 le32_to_cpu(tport->xxoverflow_discard);
4573 estats->brb_truncate_discard =
4574 le32_to_cpu(tport->brb_truncate_discard);
4575 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4578 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4580 bp->stats_pending = 0;
4585 static void bnx2x_net_stats_update(struct bnx2x *bp)
4587 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4588 struct net_device_stats *nstats = &bp->dev->stats;
4591 nstats->rx_packets =
4592 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4593 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4594 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4596 nstats->tx_packets =
4597 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4598 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4599 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4601 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4603 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4605 nstats->rx_dropped = estats->mac_discard;
4606 for_each_queue(bp, i)
4607 nstats->rx_dropped +=
4608 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4610 nstats->tx_dropped = 0;
4613 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4615 nstats->collisions =
4616 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4618 nstats->rx_length_errors =
4619 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4620 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4621 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4622 bnx2x_hilo(&estats->brb_truncate_hi);
4623 nstats->rx_crc_errors =
4624 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4625 nstats->rx_frame_errors =
4626 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4627 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4628 nstats->rx_missed_errors = estats->xxoverflow_discard;
4630 nstats->rx_errors = nstats->rx_length_errors +
4631 nstats->rx_over_errors +
4632 nstats->rx_crc_errors +
4633 nstats->rx_frame_errors +
4634 nstats->rx_fifo_errors +
4635 nstats->rx_missed_errors;
4637 nstats->tx_aborted_errors =
4638 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4639 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4640 nstats->tx_carrier_errors =
4641 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4642 nstats->tx_fifo_errors = 0;
4643 nstats->tx_heartbeat_errors = 0;
4644 nstats->tx_window_errors = 0;
4646 nstats->tx_errors = nstats->tx_aborted_errors +
4647 nstats->tx_carrier_errors +
4648 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4651 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4653 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4656 estats->driver_xoff = 0;
4657 estats->rx_err_discard_pkt = 0;
4658 estats->rx_skb_alloc_failed = 0;
4659 estats->hw_csum_err = 0;
4660 for_each_queue(bp, i) {
4661 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4663 estats->driver_xoff += qstats->driver_xoff;
4664 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4665 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4666 estats->hw_csum_err += qstats->hw_csum_err;
4670 static void bnx2x_stats_update(struct bnx2x *bp)
4672 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4674 if (*stats_comp != DMAE_COMP_VAL)
4678 bnx2x_hw_stats_update(bp);
4680 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4681 BNX2X_ERR("storm stats were not updated for 3 times\n");
4686 bnx2x_net_stats_update(bp);
4687 bnx2x_drv_stats_update(bp);
4689 if (netif_msg_timer(bp)) {
4690 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4693 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4695 estats->brb_drop_lo, estats->brb_truncate_lo);
4697 for_each_queue(bp, i) {
4698 struct bnx2x_fastpath *fp = &bp->fp[i];
4699 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4701 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4702 " rx pkt(%lu) rx calls(%lu %lu)\n",
4703 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4705 le16_to_cpu(*fp->rx_cons_sb),
4706 bnx2x_hilo(&qstats->
4707 total_unicast_packets_received_hi),
4708 fp->rx_calls, fp->rx_pkt);
4711 for_each_queue(bp, i) {
4712 struct bnx2x_fastpath *fp = &bp->fp[i];
4713 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4714 struct netdev_queue *txq =
4715 netdev_get_tx_queue(bp->dev, i);
4717 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4718 " tx pkt(%lu) tx calls (%lu)"
4719 " %s (Xoff events %u)\n",
4720 fp->name, bnx2x_tx_avail(fp),
4721 le16_to_cpu(*fp->tx_cons_sb),
4722 bnx2x_hilo(&qstats->
4723 total_unicast_packets_transmitted_hi),
4725 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4726 qstats->driver_xoff);
4730 bnx2x_hw_stats_post(bp);
4731 bnx2x_storm_stats_post(bp);
4734 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4736 struct dmae_command *dmae;
4738 int loader_idx = PMF_DMAE_C(bp);
4739 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4741 bp->executer_idx = 0;
4743 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4745 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4747 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4749 DMAE_CMD_ENDIANITY_DW_SWAP |
4751 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4752 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4754 if (bp->port.port_stx) {
4756 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4758 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4760 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4761 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4762 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4763 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4764 dmae->dst_addr_hi = 0;
4765 dmae->len = sizeof(struct host_port_stats) >> 2;
4767 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4768 dmae->comp_addr_hi = 0;
4771 dmae->comp_addr_lo =
4772 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4773 dmae->comp_addr_hi =
4774 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4775 dmae->comp_val = DMAE_COMP_VAL;
4783 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4784 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4785 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4786 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4787 dmae->dst_addr_lo = bp->func_stx >> 2;
4788 dmae->dst_addr_hi = 0;
4789 dmae->len = sizeof(struct host_func_stats) >> 2;
4790 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4791 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4792 dmae->comp_val = DMAE_COMP_VAL;
4798 static void bnx2x_stats_stop(struct bnx2x *bp)
4802 bnx2x_stats_comp(bp);
4805 update = (bnx2x_hw_stats_update(bp) == 0);
4807 update |= (bnx2x_storm_stats_update(bp) == 0);
4810 bnx2x_net_stats_update(bp);
4813 bnx2x_port_stats_stop(bp);
4815 bnx2x_hw_stats_post(bp);
4816 bnx2x_stats_comp(bp);
4820 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4824 static const struct {
4825 void (*action)(struct bnx2x *bp);
4826 enum bnx2x_stats_state next_state;
4827 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4830 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4831 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4832 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4833 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4836 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4837 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4838 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4839 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4843 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4845 enum bnx2x_stats_state state = bp->stats_state;
4847 if (unlikely(bp->panic))
4850 bnx2x_stats_stm[state][event].action(bp);
4851 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4853 /* Make sure the state has been "changed" */
4856 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4857 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4858 state, event, bp->stats_state);
4861 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4863 struct dmae_command *dmae;
4864 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4867 if (!bp->port.pmf || !bp->port.port_stx) {
4868 BNX2X_ERR("BUG!\n");
4872 bp->executer_idx = 0;
4874 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4875 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4876 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4877 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4879 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4881 DMAE_CMD_ENDIANITY_DW_SWAP |
4883 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4884 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4885 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4886 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4887 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4888 dmae->dst_addr_hi = 0;
4889 dmae->len = sizeof(struct host_port_stats) >> 2;
4890 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4891 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4892 dmae->comp_val = DMAE_COMP_VAL;
4895 bnx2x_hw_stats_post(bp);
4896 bnx2x_stats_comp(bp);
4899 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4901 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4902 int port = BP_PORT(bp);
4907 if (!bp->port.pmf || !bp->func_stx) {
4908 BNX2X_ERR("BUG!\n");
4912 /* save our func_stx */
4913 func_stx = bp->func_stx;
4915 for (vn = VN_0; vn < vn_max; vn++) {
4918 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4919 bnx2x_func_stats_init(bp);
4920 bnx2x_hw_stats_post(bp);
4921 bnx2x_stats_comp(bp);
4924 /* restore our func_stx */
4925 bp->func_stx = func_stx;
4928 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4930 struct dmae_command *dmae = &bp->stats_dmae;
4931 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4934 if (!bp->func_stx) {
4935 BNX2X_ERR("BUG!\n");
4939 bp->executer_idx = 0;
4940 memset(dmae, 0, sizeof(struct dmae_command));
4942 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4943 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4944 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4946 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4948 DMAE_CMD_ENDIANITY_DW_SWAP |
4950 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4951 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4952 dmae->src_addr_lo = bp->func_stx >> 2;
4953 dmae->src_addr_hi = 0;
4954 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4955 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4956 dmae->len = sizeof(struct host_func_stats) >> 2;
4957 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4958 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4959 dmae->comp_val = DMAE_COMP_VAL;
4962 bnx2x_hw_stats_post(bp);
4963 bnx2x_stats_comp(bp);
4966 static void bnx2x_stats_init(struct bnx2x *bp)
4968 int port = BP_PORT(bp);
4969 int func = BP_FUNC(bp);
4972 bp->stats_pending = 0;
4973 bp->executer_idx = 0;
4974 bp->stats_counter = 0;
4976 /* port and func stats for management */
4977 if (!BP_NOMCP(bp)) {
4978 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4979 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4982 bp->port.port_stx = 0;
4985 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4986 bp->port.port_stx, bp->func_stx);
4989 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4990 bp->port.old_nig_stats.brb_discard =
4991 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4992 bp->port.old_nig_stats.brb_truncate =
4993 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4994 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4995 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4996 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4997 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4999 /* function stats */
5000 for_each_queue(bp, i) {
5001 struct bnx2x_fastpath *fp = &bp->fp[i];
5003 memset(&fp->old_tclient, 0,
5004 sizeof(struct tstorm_per_client_stats));
5005 memset(&fp->old_uclient, 0,
5006 sizeof(struct ustorm_per_client_stats));
5007 memset(&fp->old_xclient, 0,
5008 sizeof(struct xstorm_per_client_stats));
5009 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5012 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5013 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5015 bp->stats_state = STATS_STATE_DISABLED;
5018 if (bp->port.port_stx)
5019 bnx2x_port_stats_base_init(bp);
5022 bnx2x_func_stats_base_init(bp);
5024 } else if (bp->func_stx)
5025 bnx2x_func_stats_base_update(bp);
5028 static void bnx2x_timer(unsigned long data)
5030 struct bnx2x *bp = (struct bnx2x *) data;
5032 if (!netif_running(bp->dev))
5035 if (atomic_read(&bp->intr_sem) != 0)
5039 struct bnx2x_fastpath *fp = &bp->fp[0];
5043 rc = bnx2x_rx_int(fp, 1000);
5046 if (!BP_NOMCP(bp)) {
5047 int func = BP_FUNC(bp);
5051 ++bp->fw_drv_pulse_wr_seq;
5052 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5053 /* TBD - add SYSTEM_TIME */
5054 drv_pulse = bp->fw_drv_pulse_wr_seq;
5055 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5057 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5058 MCP_PULSE_SEQ_MASK);
5059 /* The delta between driver pulse and mcp response
5060 * should be 1 (before mcp response) or 0 (after mcp response)
5062 if ((drv_pulse != mcp_pulse) &&
5063 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5064 /* someone lost a heartbeat... */
5065 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5066 drv_pulse, mcp_pulse);
5070 if (bp->state == BNX2X_STATE_OPEN)
5071 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5074 mod_timer(&bp->timer, jiffies + bp->current_interval);
5077 /* end of Statistics */
5082 * nic init service functions
5085 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5087 int port = BP_PORT(bp);
5090 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5091 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5092 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5093 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5094 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5095 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5098 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5099 dma_addr_t mapping, int sb_id)
5101 int port = BP_PORT(bp);
5102 int func = BP_FUNC(bp);
5107 section = ((u64)mapping) + offsetof(struct host_status_block,
5109 sb->u_status_block.status_block_id = sb_id;
5111 REG_WR(bp, BAR_CSTRORM_INTMEM +
5112 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5113 REG_WR(bp, BAR_CSTRORM_INTMEM +
5114 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5116 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5117 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5119 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5120 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5121 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5124 section = ((u64)mapping) + offsetof(struct host_status_block,
5126 sb->c_status_block.status_block_id = sb_id;
5128 REG_WR(bp, BAR_CSTRORM_INTMEM +
5129 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5130 REG_WR(bp, BAR_CSTRORM_INTMEM +
5131 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5133 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5134 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5136 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5137 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5138 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5140 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5143 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5145 int func = BP_FUNC(bp);
5147 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5148 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5149 sizeof(struct tstorm_def_status_block)/4);
5150 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5151 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5152 sizeof(struct cstorm_def_status_block_u)/4);
5153 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5154 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5155 sizeof(struct cstorm_def_status_block_c)/4);
5156 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5157 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5158 sizeof(struct xstorm_def_status_block)/4);
5161 static void bnx2x_init_def_sb(struct bnx2x *bp,
5162 struct host_def_status_block *def_sb,
5163 dma_addr_t mapping, int sb_id)
5165 int port = BP_PORT(bp);
5166 int func = BP_FUNC(bp);
5167 int index, val, reg_offset;
5171 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5172 atten_status_block);
5173 def_sb->atten_status_block.status_block_id = sb_id;
5177 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5178 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5180 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5181 bp->attn_group[index].sig[0] = REG_RD(bp,
5182 reg_offset + 0x10*index);
5183 bp->attn_group[index].sig[1] = REG_RD(bp,
5184 reg_offset + 0x4 + 0x10*index);
5185 bp->attn_group[index].sig[2] = REG_RD(bp,
5186 reg_offset + 0x8 + 0x10*index);
5187 bp->attn_group[index].sig[3] = REG_RD(bp,
5188 reg_offset + 0xc + 0x10*index);
5191 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5192 HC_REG_ATTN_MSG0_ADDR_L);
5194 REG_WR(bp, reg_offset, U64_LO(section));
5195 REG_WR(bp, reg_offset + 4, U64_HI(section));
5197 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5199 val = REG_RD(bp, reg_offset);
5201 REG_WR(bp, reg_offset, val);
5204 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5205 u_def_status_block);
5206 def_sb->u_def_status_block.status_block_id = sb_id;
5208 REG_WR(bp, BAR_CSTRORM_INTMEM +
5209 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5210 REG_WR(bp, BAR_CSTRORM_INTMEM +
5211 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5213 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5214 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5216 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5217 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5218 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5221 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5222 c_def_status_block);
5223 def_sb->c_def_status_block.status_block_id = sb_id;
5225 REG_WR(bp, BAR_CSTRORM_INTMEM +
5226 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5227 REG_WR(bp, BAR_CSTRORM_INTMEM +
5228 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5230 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5231 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5233 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5234 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5235 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5238 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5239 t_def_status_block);
5240 def_sb->t_def_status_block.status_block_id = sb_id;
5242 REG_WR(bp, BAR_TSTRORM_INTMEM +
5243 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5244 REG_WR(bp, BAR_TSTRORM_INTMEM +
5245 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5247 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5248 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5250 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5251 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5252 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5255 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5256 x_def_status_block);
5257 def_sb->x_def_status_block.status_block_id = sb_id;
5259 REG_WR(bp, BAR_XSTRORM_INTMEM +
5260 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5261 REG_WR(bp, BAR_XSTRORM_INTMEM +
5262 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5264 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5265 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5267 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5268 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5269 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5271 bp->stats_pending = 0;
5272 bp->set_mac_pending = 0;
5274 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5277 static void bnx2x_update_coalesce(struct bnx2x *bp)
5279 int port = BP_PORT(bp);
5282 for_each_queue(bp, i) {
5283 int sb_id = bp->fp[i].sb_id;
5285 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5286 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5287 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5288 U_SB_ETH_RX_CQ_INDEX),
5289 bp->rx_ticks/(4 * BNX2X_BTR));
5290 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5291 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5292 U_SB_ETH_RX_CQ_INDEX),
5293 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5295 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5296 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5297 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5298 C_SB_ETH_TX_CQ_INDEX),
5299 bp->tx_ticks/(4 * BNX2X_BTR));
5300 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5301 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5302 C_SB_ETH_TX_CQ_INDEX),
5303 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5307 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5308 struct bnx2x_fastpath *fp, int last)
5312 for (i = 0; i < last; i++) {
5313 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5314 struct sk_buff *skb = rx_buf->skb;
5317 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5321 if (fp->tpa_state[i] == BNX2X_TPA_START)
5322 dma_unmap_single(&bp->pdev->dev,
5323 dma_unmap_addr(rx_buf, mapping),
5324 bp->rx_buf_size, DMA_FROM_DEVICE);
5331 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5333 int func = BP_FUNC(bp);
5334 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5335 ETH_MAX_AGGREGATION_QUEUES_E1H;
5336 u16 ring_prod, cqe_ring_prod;
5339 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5341 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5343 if (bp->flags & TPA_ENABLE_FLAG) {
5345 for_each_queue(bp, j) {
5346 struct bnx2x_fastpath *fp = &bp->fp[j];
5348 for (i = 0; i < max_agg_queues; i++) {
5349 fp->tpa_pool[i].skb =
5350 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5351 if (!fp->tpa_pool[i].skb) {
5352 BNX2X_ERR("Failed to allocate TPA "
5353 "skb pool for queue[%d] - "
5354 "disabling TPA on this "
5356 bnx2x_free_tpa_pool(bp, fp, i);
5357 fp->disable_tpa = 1;
5360 dma_unmap_addr_set((struct sw_rx_bd *)
5361 &bp->fp->tpa_pool[i],
5363 fp->tpa_state[i] = BNX2X_TPA_STOP;
5368 for_each_queue(bp, j) {
5369 struct bnx2x_fastpath *fp = &bp->fp[j];
5372 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5373 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5375 /* "next page" elements initialization */
5377 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5378 struct eth_rx_sge *sge;
5380 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5382 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5383 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5385 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5386 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5389 bnx2x_init_sge_ring_bit_mask(fp);
5392 for (i = 1; i <= NUM_RX_RINGS; i++) {
5393 struct eth_rx_bd *rx_bd;
5395 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5397 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5398 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5400 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5401 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5405 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5406 struct eth_rx_cqe_next_page *nextpg;
5408 nextpg = (struct eth_rx_cqe_next_page *)
5409 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5411 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5412 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5414 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5415 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5418 /* Allocate SGEs and initialize the ring elements */
5419 for (i = 0, ring_prod = 0;
5420 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5422 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5423 BNX2X_ERR("was only able to allocate "
5425 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5426 /* Cleanup already allocated elements */
5427 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5428 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5429 fp->disable_tpa = 1;
5433 ring_prod = NEXT_SGE_IDX(ring_prod);
5435 fp->rx_sge_prod = ring_prod;
5437 /* Allocate BDs and initialize BD ring */
5438 fp->rx_comp_cons = 0;
5439 cqe_ring_prod = ring_prod = 0;
5440 for (i = 0; i < bp->rx_ring_size; i++) {
5441 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5442 BNX2X_ERR("was only able to allocate "
5443 "%d rx skbs on queue[%d]\n", i, j);
5444 fp->eth_q_stats.rx_skb_alloc_failed++;
5447 ring_prod = NEXT_RX_IDX(ring_prod);
5448 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5449 WARN_ON(ring_prod <= i);
5452 fp->rx_bd_prod = ring_prod;
5453 /* must not have more available CQEs than BDs */
5454 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5456 fp->rx_pkt = fp->rx_calls = 0;
5459 * this will generate an interrupt (to the TSTORM)
5460 * must only be done after chip is initialized
5462 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5467 REG_WR(bp, BAR_USTRORM_INTMEM +
5468 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5469 U64_LO(fp->rx_comp_mapping));
5470 REG_WR(bp, BAR_USTRORM_INTMEM +
5471 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5472 U64_HI(fp->rx_comp_mapping));
5476 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5480 for_each_queue(bp, j) {
5481 struct bnx2x_fastpath *fp = &bp->fp[j];
5483 for (i = 1; i <= NUM_TX_RINGS; i++) {
5484 struct eth_tx_next_bd *tx_next_bd =
5485 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5487 tx_next_bd->addr_hi =
5488 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5489 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5490 tx_next_bd->addr_lo =
5491 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5492 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5495 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5496 fp->tx_db.data.zero_fill1 = 0;
5497 fp->tx_db.data.prod = 0;
5499 fp->tx_pkt_prod = 0;
5500 fp->tx_pkt_cons = 0;
5503 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5508 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5510 int func = BP_FUNC(bp);
5512 spin_lock_init(&bp->spq_lock);
5514 bp->spq_left = MAX_SPQ_PENDING;
5515 bp->spq_prod_idx = 0;
5516 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5517 bp->spq_prod_bd = bp->spq;
5518 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5520 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5521 U64_LO(bp->spq_mapping));
5523 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5524 U64_HI(bp->spq_mapping));
5526 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5530 static void bnx2x_init_context(struct bnx2x *bp)
5535 for_each_queue(bp, i) {
5536 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5537 struct bnx2x_fastpath *fp = &bp->fp[i];
5538 u8 cl_id = fp->cl_id;
5540 context->ustorm_st_context.common.sb_index_numbers =
5541 BNX2X_RX_SB_INDEX_NUM;
5542 context->ustorm_st_context.common.clientId = cl_id;
5543 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5544 context->ustorm_st_context.common.flags =
5545 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5546 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5547 context->ustorm_st_context.common.statistics_counter_id =
5549 context->ustorm_st_context.common.mc_alignment_log_size =
5550 BNX2X_RX_ALIGN_SHIFT;
5551 context->ustorm_st_context.common.bd_buff_size =
5553 context->ustorm_st_context.common.bd_page_base_hi =
5554 U64_HI(fp->rx_desc_mapping);
5555 context->ustorm_st_context.common.bd_page_base_lo =
5556 U64_LO(fp->rx_desc_mapping);
5557 if (!fp->disable_tpa) {
5558 context->ustorm_st_context.common.flags |=
5559 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5560 context->ustorm_st_context.common.sge_buff_size =
5561 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5563 context->ustorm_st_context.common.sge_page_base_hi =
5564 U64_HI(fp->rx_sge_mapping);
5565 context->ustorm_st_context.common.sge_page_base_lo =
5566 U64_LO(fp->rx_sge_mapping);
5568 context->ustorm_st_context.common.max_sges_for_packet =
5569 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5570 context->ustorm_st_context.common.max_sges_for_packet =
5571 ((context->ustorm_st_context.common.
5572 max_sges_for_packet + PAGES_PER_SGE - 1) &
5573 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5576 context->ustorm_ag_context.cdu_usage =
5577 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5578 CDU_REGION_NUMBER_UCM_AG,
5579 ETH_CONNECTION_TYPE);
5581 context->xstorm_ag_context.cdu_reserved =
5582 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5583 CDU_REGION_NUMBER_XCM_AG,
5584 ETH_CONNECTION_TYPE);
5588 for_each_queue(bp, i) {
5589 struct bnx2x_fastpath *fp = &bp->fp[i];
5590 struct eth_context *context =
5591 bnx2x_sp(bp, context[i].eth);
5593 context->cstorm_st_context.sb_index_number =
5594 C_SB_ETH_TX_CQ_INDEX;
5595 context->cstorm_st_context.status_block_id = fp->sb_id;
5597 context->xstorm_st_context.tx_bd_page_base_hi =
5598 U64_HI(fp->tx_desc_mapping);
5599 context->xstorm_st_context.tx_bd_page_base_lo =
5600 U64_LO(fp->tx_desc_mapping);
5601 context->xstorm_st_context.statistics_data = (fp->cl_id |
5602 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5606 static void bnx2x_init_ind_table(struct bnx2x *bp)
5608 int func = BP_FUNC(bp);
5611 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5615 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5616 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5617 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5618 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5619 bp->fp->cl_id + (i % bp->num_queues));
5622 static void bnx2x_set_client_config(struct bnx2x *bp)
5624 struct tstorm_eth_client_config tstorm_client = {0};
5625 int port = BP_PORT(bp);
5628 tstorm_client.mtu = bp->dev->mtu;
5629 tstorm_client.config_flags =
5630 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5631 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5633 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5634 tstorm_client.config_flags |=
5635 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5636 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5640 for_each_queue(bp, i) {
5641 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5643 REG_WR(bp, BAR_TSTRORM_INTMEM +
5644 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5645 ((u32 *)&tstorm_client)[0]);
5646 REG_WR(bp, BAR_TSTRORM_INTMEM +
5647 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5648 ((u32 *)&tstorm_client)[1]);
5651 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5652 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5655 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5657 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5658 int mode = bp->rx_mode;
5659 int mask = bp->rx_mode_cl_mask;
5660 int func = BP_FUNC(bp);
5661 int port = BP_PORT(bp);
5663 /* All but management unicast packets should pass to the host as well */
5665 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5666 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5667 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5668 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5670 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5673 case BNX2X_RX_MODE_NONE: /* no Rx */
5674 tstorm_mac_filter.ucast_drop_all = mask;
5675 tstorm_mac_filter.mcast_drop_all = mask;
5676 tstorm_mac_filter.bcast_drop_all = mask;
5679 case BNX2X_RX_MODE_NORMAL:
5680 tstorm_mac_filter.bcast_accept_all = mask;
5683 case BNX2X_RX_MODE_ALLMULTI:
5684 tstorm_mac_filter.mcast_accept_all = mask;
5685 tstorm_mac_filter.bcast_accept_all = mask;
5688 case BNX2X_RX_MODE_PROMISC:
5689 tstorm_mac_filter.ucast_accept_all = mask;
5690 tstorm_mac_filter.mcast_accept_all = mask;
5691 tstorm_mac_filter.bcast_accept_all = mask;
5692 /* pass management unicast packets as well */
5693 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5697 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5702 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5705 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5706 REG_WR(bp, BAR_TSTRORM_INTMEM +
5707 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5708 ((u32 *)&tstorm_mac_filter)[i]);
5710 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5711 ((u32 *)&tstorm_mac_filter)[i]); */
5714 if (mode != BNX2X_RX_MODE_NONE)
5715 bnx2x_set_client_config(bp);
5718 static void bnx2x_init_internal_common(struct bnx2x *bp)
5722 /* Zero this manually as its initialization is
5723 currently missing in the initTool */
5724 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5725 REG_WR(bp, BAR_USTRORM_INTMEM +
5726 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5729 static void bnx2x_init_internal_port(struct bnx2x *bp)
5731 int port = BP_PORT(bp);
5734 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5736 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5737 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5738 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5741 static void bnx2x_init_internal_func(struct bnx2x *bp)
5743 struct tstorm_eth_function_common_config tstorm_config = {0};
5744 struct stats_indication_flags stats_flags = {0};
5745 int port = BP_PORT(bp);
5746 int func = BP_FUNC(bp);
5752 tstorm_config.config_flags = MULTI_FLAGS(bp);
5753 tstorm_config.rss_result_mask = MULTI_MASK;
5756 /* Enable TPA if needed */
5757 if (bp->flags & TPA_ENABLE_FLAG)
5758 tstorm_config.config_flags |=
5759 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5762 tstorm_config.config_flags |=
5763 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5765 tstorm_config.leading_client_id = BP_L_ID(bp);
5767 REG_WR(bp, BAR_TSTRORM_INTMEM +
5768 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5769 (*(u32 *)&tstorm_config));
5771 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5772 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5773 bnx2x_set_storm_rx_mode(bp);
5775 for_each_queue(bp, i) {
5776 u8 cl_id = bp->fp[i].cl_id;
5778 /* reset xstorm per client statistics */
5779 offset = BAR_XSTRORM_INTMEM +
5780 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5782 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5783 REG_WR(bp, offset + j*4, 0);
5785 /* reset tstorm per client statistics */
5786 offset = BAR_TSTRORM_INTMEM +
5787 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5789 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5790 REG_WR(bp, offset + j*4, 0);
5792 /* reset ustorm per client statistics */
5793 offset = BAR_USTRORM_INTMEM +
5794 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5796 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5797 REG_WR(bp, offset + j*4, 0);
5800 /* Init statistics related context */
5801 stats_flags.collect_eth = 1;
5803 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5804 ((u32 *)&stats_flags)[0]);
5805 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5806 ((u32 *)&stats_flags)[1]);
5808 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5809 ((u32 *)&stats_flags)[0]);
5810 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5811 ((u32 *)&stats_flags)[1]);
5813 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5814 ((u32 *)&stats_flags)[0]);
5815 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5816 ((u32 *)&stats_flags)[1]);
5818 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5819 ((u32 *)&stats_flags)[0]);
5820 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5821 ((u32 *)&stats_flags)[1]);
5823 REG_WR(bp, BAR_XSTRORM_INTMEM +
5824 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5825 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5826 REG_WR(bp, BAR_XSTRORM_INTMEM +
5827 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5828 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5830 REG_WR(bp, BAR_TSTRORM_INTMEM +
5831 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5832 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5833 REG_WR(bp, BAR_TSTRORM_INTMEM +
5834 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5835 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5837 REG_WR(bp, BAR_USTRORM_INTMEM +
5838 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5839 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5840 REG_WR(bp, BAR_USTRORM_INTMEM +
5841 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5842 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5844 if (CHIP_IS_E1H(bp)) {
5845 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5847 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5849 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5851 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5854 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5858 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5859 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5860 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5861 for_each_queue(bp, i) {
5862 struct bnx2x_fastpath *fp = &bp->fp[i];
5864 REG_WR(bp, BAR_USTRORM_INTMEM +
5865 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5866 U64_LO(fp->rx_comp_mapping));
5867 REG_WR(bp, BAR_USTRORM_INTMEM +
5868 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5869 U64_HI(fp->rx_comp_mapping));
5872 REG_WR(bp, BAR_USTRORM_INTMEM +
5873 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5874 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5875 REG_WR(bp, BAR_USTRORM_INTMEM +
5876 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5877 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5879 REG_WR16(bp, BAR_USTRORM_INTMEM +
5880 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5884 /* dropless flow control */
5885 if (CHIP_IS_E1H(bp)) {
5886 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5888 rx_pause.bd_thr_low = 250;
5889 rx_pause.cqe_thr_low = 250;
5891 rx_pause.sge_thr_low = 0;
5892 rx_pause.bd_thr_high = 350;
5893 rx_pause.cqe_thr_high = 350;
5894 rx_pause.sge_thr_high = 0;
5896 for_each_queue(bp, i) {
5897 struct bnx2x_fastpath *fp = &bp->fp[i];
5899 if (!fp->disable_tpa) {
5900 rx_pause.sge_thr_low = 150;
5901 rx_pause.sge_thr_high = 250;
5905 offset = BAR_USTRORM_INTMEM +
5906 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5909 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5911 REG_WR(bp, offset + j*4,
5912 ((u32 *)&rx_pause)[j]);
5916 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5918 /* Init rate shaping and fairness contexts */
5922 /* During init there is no active link
5923 Until link is up, set link rate to 10Gbps */
5924 bp->link_vars.line_speed = SPEED_10000;
5925 bnx2x_init_port_minmax(bp);
5929 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5930 bnx2x_calc_vn_weight_sum(bp);
5932 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5933 bnx2x_init_vn_minmax(bp, 2*vn + port);
5935 /* Enable rate shaping and fairness */
5936 bp->cmng.flags.cmng_enables |=
5937 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5940 /* rate shaping and fairness are disabled */
5942 "single function mode minmax will be disabled\n");
5946 /* Store cmng structures to internal memory */
5948 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5949 REG_WR(bp, BAR_XSTRORM_INTMEM +
5950 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5951 ((u32 *)(&bp->cmng))[i]);
5954 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5956 switch (load_code) {
5957 case FW_MSG_CODE_DRV_LOAD_COMMON:
5958 bnx2x_init_internal_common(bp);
5961 case FW_MSG_CODE_DRV_LOAD_PORT:
5962 bnx2x_init_internal_port(bp);
5965 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5966 bnx2x_init_internal_func(bp);
5970 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5975 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5979 for_each_queue(bp, i) {
5980 struct bnx2x_fastpath *fp = &bp->fp[i];
5983 fp->state = BNX2X_FP_STATE_CLOSED;
5985 fp->cl_id = BP_L_ID(bp) + i;
5987 fp->sb_id = fp->cl_id + 1;
5989 fp->sb_id = fp->cl_id;
5992 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5993 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5994 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5996 bnx2x_update_fpsb_idx(fp);
5999 /* ensure status block indices were read */
6003 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6005 bnx2x_update_dsb_idx(bp);
6006 bnx2x_update_coalesce(bp);
6007 bnx2x_init_rx_rings(bp);
6008 bnx2x_init_tx_ring(bp);
6009 bnx2x_init_sp_ring(bp);
6010 bnx2x_init_context(bp);
6011 bnx2x_init_internal(bp, load_code);
6012 bnx2x_init_ind_table(bp);
6013 bnx2x_stats_init(bp);
6015 /* At this point, we are ready for interrupts */
6016 atomic_set(&bp->intr_sem, 0);
6018 /* flush all before enabling interrupts */
6022 bnx2x_int_enable(bp);
6024 /* Check for SPIO5 */
6025 bnx2x_attn_int_deasserted0(bp,
6026 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6027 AEU_INPUTS_ATTN_BITS_SPIO5);
6030 /* end of nic init */
6033 * gzip service functions
6036 static int bnx2x_gunzip_init(struct bnx2x *bp)
6038 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6039 &bp->gunzip_mapping, GFP_KERNEL);
6040 if (bp->gunzip_buf == NULL)
6043 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6044 if (bp->strm == NULL)
6047 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6049 if (bp->strm->workspace == NULL)
6059 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6060 bp->gunzip_mapping);
6061 bp->gunzip_buf = NULL;
6064 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6065 " un-compression\n");
6069 static void bnx2x_gunzip_end(struct bnx2x *bp)
6071 kfree(bp->strm->workspace);
6076 if (bp->gunzip_buf) {
6077 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6078 bp->gunzip_mapping);
6079 bp->gunzip_buf = NULL;
6083 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6087 /* check gzip header */
6088 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6089 BNX2X_ERR("Bad gzip header\n");
6097 if (zbuf[3] & FNAME)
6098 while ((zbuf[n++] != 0) && (n < len));
6100 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6101 bp->strm->avail_in = len - n;
6102 bp->strm->next_out = bp->gunzip_buf;
6103 bp->strm->avail_out = FW_BUF_SIZE;
6105 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6109 rc = zlib_inflate(bp->strm, Z_FINISH);
6110 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6111 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6114 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6115 if (bp->gunzip_outlen & 0x3)
6116 netdev_err(bp->dev, "Firmware decompression error:"
6117 " gunzip_outlen (%d) not aligned\n",
6119 bp->gunzip_outlen >>= 2;
6121 zlib_inflateEnd(bp->strm);
6123 if (rc == Z_STREAM_END)
6129 /* nic load/unload */
6132 * General service functions
6135 /* send a NIG loopback debug packet */
6136 static void bnx2x_lb_pckt(struct bnx2x *bp)
6140 /* Ethernet source and destination addresses */
6141 wb_write[0] = 0x55555555;
6142 wb_write[1] = 0x55555555;
6143 wb_write[2] = 0x20; /* SOP */
6144 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6146 /* NON-IP protocol */
6147 wb_write[0] = 0x09000000;
6148 wb_write[1] = 0x55555555;
6149 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
6150 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6153 /* some of the internal memories
6154 * are not directly readable from the driver
6155 * to test them we send debug packets
6157 static int bnx2x_int_mem_test(struct bnx2x *bp)
6163 if (CHIP_REV_IS_FPGA(bp))
6165 else if (CHIP_REV_IS_EMUL(bp))
6170 DP(NETIF_MSG_HW, "start part1\n");
6172 /* Disable inputs of parser neighbor blocks */
6173 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6174 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6175 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6176 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6178 /* Write 0 to parser credits for CFC search request */
6179 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6181 /* send Ethernet packet */
6184 /* TODO do i reset NIG statistic? */
6185 /* Wait until NIG register shows 1 packet of size 0x10 */
6186 count = 1000 * factor;
6189 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6190 val = *bnx2x_sp(bp, wb_data[0]);
6198 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6202 /* Wait until PRS register shows 1 packet */
6203 count = 1000 * factor;
6205 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6213 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6217 /* Reset and init BRB, PRS */
6218 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6220 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6222 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6223 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6225 DP(NETIF_MSG_HW, "part2\n");
6227 /* Disable inputs of parser neighbor blocks */
6228 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6229 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6230 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6231 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6233 /* Write 0 to parser credits for CFC search request */
6234 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6236 /* send 10 Ethernet packets */
6237 for (i = 0; i < 10; i++)
6240 /* Wait until NIG register shows 10 + 1
6241 packets of size 11*0x10 = 0xb0 */
6242 count = 1000 * factor;
6245 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6246 val = *bnx2x_sp(bp, wb_data[0]);
6254 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6258 /* Wait until PRS register shows 2 packets */
6259 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6261 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6263 /* Write 1 to parser credits for CFC search request */
6264 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6266 /* Wait until PRS register shows 3 packets */
6267 msleep(10 * factor);
6268 /* Wait until NIG register shows 1 packet of size 0x10 */
6269 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6271 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6273 /* clear NIG EOP FIFO */
6274 for (i = 0; i < 11; i++)
6275 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6276 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6278 BNX2X_ERR("clear of NIG failed\n");
6282 /* Reset and init BRB, PRS, NIG */
6283 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6285 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6287 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6288 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6291 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6294 /* Enable inputs of parser neighbor blocks */
6295 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6296 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6297 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6298 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6300 DP(NETIF_MSG_HW, "done\n");
6305 static void enable_blocks_attention(struct bnx2x *bp)
6307 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6308 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6309 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6310 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6311 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6312 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6313 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6314 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6315 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6316 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6317 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6318 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6319 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6320 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6321 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6322 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6323 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6324 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6325 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6326 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6327 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6328 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6329 if (CHIP_REV_IS_FPGA(bp))
6330 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6332 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6333 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6334 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6335 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6336 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6337 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6338 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6339 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6340 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6341 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6344 static const struct {
6347 } bnx2x_parity_mask[] = {
6348 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6349 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6350 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6351 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6352 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6353 {QM_REG_QM_PRTY_MASK, 0x0},
6354 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6355 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6356 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6357 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6358 {CDU_REG_CDU_PRTY_MASK, 0x0},
6359 {CFC_REG_CFC_PRTY_MASK, 0x0},
6360 {DBG_REG_DBG_PRTY_MASK, 0x0},
6361 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6362 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6363 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6364 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6365 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6366 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6367 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6368 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6369 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6370 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6371 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6372 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6373 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6374 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6375 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6378 static void enable_blocks_parity(struct bnx2x *bp)
6380 int i, mask_arr_len =
6381 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6383 for (i = 0; i < mask_arr_len; i++)
6384 REG_WR(bp, bnx2x_parity_mask[i].addr,
6385 bnx2x_parity_mask[i].mask);
6389 static void bnx2x_reset_common(struct bnx2x *bp)
6392 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6394 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6397 static void bnx2x_init_pxp(struct bnx2x *bp)
6400 int r_order, w_order;
6402 pci_read_config_word(bp->pdev,
6403 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6404 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6405 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6407 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6409 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6413 bnx2x_init_pxp_arb(bp, r_order, w_order);
6416 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6426 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6427 SHARED_HW_CFG_FAN_FAILURE_MASK;
6429 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6433 * The fan failure mechanism is usually related to the PHY type since
6434 * the power consumption of the board is affected by the PHY. Currently,
6435 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6437 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6438 for (port = PORT_0; port < PORT_MAX; port++) {
6440 SHMEM_RD(bp, dev_info.port_hw_config[port].
6441 external_phy_config) &
6442 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6445 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6447 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6449 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6452 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6454 if (is_required == 0)
6457 /* Fan failure is indicated by SPIO 5 */
6458 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6459 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6461 /* set to active low mode */
6462 val = REG_RD(bp, MISC_REG_SPIO_INT);
6463 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6464 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6465 REG_WR(bp, MISC_REG_SPIO_INT, val);
6467 /* enable interrupt to signal the IGU */
6468 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6469 val |= (1 << MISC_REGISTERS_SPIO_5);
6470 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6473 static int bnx2x_init_common(struct bnx2x *bp)
6480 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6482 bnx2x_reset_common(bp);
6483 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6484 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6486 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6487 if (CHIP_IS_E1H(bp))
6488 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6490 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6492 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6494 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6495 if (CHIP_IS_E1(bp)) {
6496 /* enable HW interrupt from PXP on USDM overflow
6497 bit 16 on INT_MASK_0 */
6498 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6501 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6505 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6506 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6507 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6508 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6509 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6510 /* make sure this value is 0 */
6511 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6513 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6514 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6515 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6516 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6517 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6520 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6522 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6523 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6524 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6527 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6528 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6530 /* let the HW do it's magic ... */
6532 /* finish PXP init */
6533 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6535 BNX2X_ERR("PXP2 CFG failed\n");
6538 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6540 BNX2X_ERR("PXP2 RD_INIT failed\n");
6544 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6545 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6547 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6549 /* clean the DMAE memory */
6551 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6553 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6554 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6555 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6556 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6558 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6559 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6560 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6561 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6563 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6568 for (i = 0; i < 64; i++) {
6569 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6570 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6572 if (CHIP_IS_E1H(bp)) {
6573 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6574 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6579 /* soft reset pulse */
6580 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6581 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6584 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6587 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6588 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6589 if (!CHIP_REV_IS_SLOW(bp)) {
6590 /* enable hw interrupt from doorbell Q */
6591 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6594 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6595 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6596 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6599 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6601 if (CHIP_IS_E1H(bp))
6602 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6604 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6605 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6606 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6607 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6609 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6610 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6611 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6612 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6614 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6615 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6616 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6617 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6620 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6622 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6625 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6626 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6627 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6629 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6630 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6631 REG_WR(bp, i, 0xc0cac01a);
6632 /* TODO: replace with something meaningful */
6634 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6636 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6637 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6638 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6639 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6640 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6641 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6642 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6643 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6644 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6645 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6647 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6649 if (sizeof(union cdu_context) != 1024)
6650 /* we currently assume that a context is 1024 bytes */
6651 dev_alert(&bp->pdev->dev, "please adjust the size "
6652 "of cdu_context(%ld)\n",
6653 (long)sizeof(union cdu_context));
6655 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6656 val = (4 << 24) + (0 << 12) + 1024;
6657 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6659 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6660 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6661 /* enable context validation interrupt from CFC */
6662 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6664 /* set the thresholds to prevent CFC/CDU race */
6665 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6667 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6668 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6670 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6671 /* Reset PCIE errors for debug */
6672 REG_WR(bp, 0x2814, 0xffffffff);
6673 REG_WR(bp, 0x3820, 0xffffffff);
6675 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6676 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6677 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6678 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6680 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6681 if (CHIP_IS_E1H(bp)) {
6682 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6683 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6686 if (CHIP_REV_IS_SLOW(bp))
6689 /* finish CFC init */
6690 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6692 BNX2X_ERR("CFC LL_INIT failed\n");
6695 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6697 BNX2X_ERR("CFC AC_INIT failed\n");
6700 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6702 BNX2X_ERR("CFC CAM_INIT failed\n");
6705 REG_WR(bp, CFC_REG_DEBUG0, 0);
6707 /* read NIG statistic
6708 to see if this is our first up since powerup */
6709 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6710 val = *bnx2x_sp(bp, wb_data[0]);
6712 /* do internal memory self test */
6713 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6714 BNX2X_ERR("internal mem self test failed\n");
6718 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6723 bp->port.need_hw_lock = 1;
6730 bnx2x_setup_fan_failure_detection(bp);
6732 /* clear PXP2 attentions */
6733 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6735 enable_blocks_attention(bp);
6736 if (CHIP_PARITY_SUPPORTED(bp))
6737 enable_blocks_parity(bp);
6739 if (!BP_NOMCP(bp)) {
6740 bnx2x_acquire_phy_lock(bp);
6741 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6742 bnx2x_release_phy_lock(bp);
6744 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6749 static int bnx2x_init_port(struct bnx2x *bp)
6751 int port = BP_PORT(bp);
6752 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6756 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6758 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6760 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6761 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6763 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6764 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6765 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6766 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6769 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6771 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6772 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6773 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6776 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6778 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6779 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6780 /* no pause for emulation and FPGA */
6785 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6786 else if (bp->dev->mtu > 4096) {
6787 if (bp->flags & ONE_PORT_FLAG)
6791 /* (24*1024 + val*4)/256 */
6792 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6795 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6796 high = low + 56; /* 14*1024/256 */
6798 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6799 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6802 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6804 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6805 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6806 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6807 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6809 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6810 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6811 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6812 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6814 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6815 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6817 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6819 /* configure PBF to work without PAUSE mtu 9000 */
6820 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6822 /* update threshold */
6823 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6824 /* update init credit */
6825 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6828 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6830 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6833 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6835 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6836 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6838 if (CHIP_IS_E1(bp)) {
6839 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6840 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6842 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6844 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6845 /* init aeu_mask_attn_func_0/1:
6846 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6847 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6848 * bits 4-7 are used for "per vn group attention" */
6849 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6850 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6852 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6853 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6854 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6855 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6856 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6858 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6860 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6862 if (CHIP_IS_E1H(bp)) {
6863 /* 0x2 disable e1hov, 0x1 enable */
6864 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6865 (IS_E1HMF(bp) ? 0x1 : 0x2));
6868 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6869 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6870 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6874 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6875 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6877 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6880 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6882 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6883 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6885 /* The GPIO should be swapped if the swap register is
6887 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6888 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6890 /* Select function upon port-swap configuration */
6892 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6893 aeu_gpio_mask = (swap_val && swap_override) ?
6894 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6895 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6897 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6898 aeu_gpio_mask = (swap_val && swap_override) ?
6899 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6900 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6902 val = REG_RD(bp, offset);
6903 /* add GPIO3 to group */
6904 val |= aeu_gpio_mask;
6905 REG_WR(bp, offset, val);
6909 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6910 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6911 /* add SPIO 5 to group 0 */
6913 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6914 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6915 val = REG_RD(bp, reg_addr);
6916 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6917 REG_WR(bp, reg_addr, val);
6925 bnx2x__link_reset(bp);
6930 #define ILT_PER_FUNC (768/2)
6931 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6932 /* the phys address is shifted right 12 bits and has an added
6933 1=valid bit added to the 53rd bit
6934 then since this is a wide register(TM)
6935 we split it into two 32 bit writes
6937 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6938 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6939 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6940 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6943 #define CNIC_ILT_LINES 127
6944 #define CNIC_CTX_PER_ILT 16
6946 #define CNIC_ILT_LINES 0
6949 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6953 if (CHIP_IS_E1H(bp))
6954 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6956 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6958 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6961 static int bnx2x_init_func(struct bnx2x *bp)
6963 int port = BP_PORT(bp);
6964 int func = BP_FUNC(bp);
6968 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6970 /* set MSI reconfigure capability */
6971 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6972 val = REG_RD(bp, addr);
6973 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6974 REG_WR(bp, addr, val);
6976 i = FUNC_ILT_BASE(func);
6978 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6979 if (CHIP_IS_E1H(bp)) {
6980 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6981 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6983 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6984 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6987 i += 1 + CNIC_ILT_LINES;
6988 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6990 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6992 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6993 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6997 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6999 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7001 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7002 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7006 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7008 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7010 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7011 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7014 /* tell the searcher where the T2 table is */
7015 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7017 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7018 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7020 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7021 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7022 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7024 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7027 if (CHIP_IS_E1H(bp)) {
7028 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7029 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7030 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7031 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7032 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7033 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7034 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7035 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7036 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7038 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7039 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7042 /* HC init per function */
7043 if (CHIP_IS_E1H(bp)) {
7044 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7046 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7047 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7049 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7051 /* Reset PCIE errors for debug */
7052 REG_WR(bp, 0x2114, 0xffffffff);
7053 REG_WR(bp, 0x2120, 0xffffffff);
7058 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7062 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7063 BP_FUNC(bp), load_code);
7066 mutex_init(&bp->dmae_mutex);
7067 rc = bnx2x_gunzip_init(bp);
7071 switch (load_code) {
7072 case FW_MSG_CODE_DRV_LOAD_COMMON:
7073 rc = bnx2x_init_common(bp);
7078 case FW_MSG_CODE_DRV_LOAD_PORT:
7080 rc = bnx2x_init_port(bp);
7085 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7087 rc = bnx2x_init_func(bp);
7093 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7097 if (!BP_NOMCP(bp)) {
7098 int func = BP_FUNC(bp);
7100 bp->fw_drv_pulse_wr_seq =
7101 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7102 DRV_PULSE_SEQ_MASK);
7103 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7106 /* this needs to be done before gunzip end */
7107 bnx2x_zero_def_sb(bp);
7108 for_each_queue(bp, i)
7109 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7111 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7115 bnx2x_gunzip_end(bp);
7120 static void bnx2x_free_mem(struct bnx2x *bp)
7123 #define BNX2X_PCI_FREE(x, y, size) \
7126 dma_free_coherent(&bp->pdev->dev, size, x, y); \
7132 #define BNX2X_FREE(x) \
7144 for_each_queue(bp, i) {
7147 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7148 bnx2x_fp(bp, i, status_blk_mapping),
7149 sizeof(struct host_status_block));
7152 for_each_queue(bp, i) {
7154 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7155 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7156 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7157 bnx2x_fp(bp, i, rx_desc_mapping),
7158 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7160 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7161 bnx2x_fp(bp, i, rx_comp_mapping),
7162 sizeof(struct eth_fast_path_rx_cqe) *
7166 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7167 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7168 bnx2x_fp(bp, i, rx_sge_mapping),
7169 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7172 for_each_queue(bp, i) {
7174 /* fastpath tx rings: tx_buf tx_desc */
7175 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7176 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7177 bnx2x_fp(bp, i, tx_desc_mapping),
7178 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7180 /* end of fastpath */
7182 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7183 sizeof(struct host_def_status_block));
7185 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7186 sizeof(struct bnx2x_slowpath));
7189 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7190 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7191 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7192 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7193 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7194 sizeof(struct host_status_block));
7196 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7198 #undef BNX2X_PCI_FREE
7202 static int bnx2x_alloc_mem(struct bnx2x *bp)
7205 #define BNX2X_PCI_ALLOC(x, y, size) \
7207 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7209 goto alloc_mem_err; \
7210 memset(x, 0, size); \
7213 #define BNX2X_ALLOC(x, size) \
7215 x = vmalloc(size); \
7217 goto alloc_mem_err; \
7218 memset(x, 0, size); \
7225 for_each_queue(bp, i) {
7226 bnx2x_fp(bp, i, bp) = bp;
7229 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7230 &bnx2x_fp(bp, i, status_blk_mapping),
7231 sizeof(struct host_status_block));
7234 for_each_queue(bp, i) {
7236 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7237 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7238 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7239 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7240 &bnx2x_fp(bp, i, rx_desc_mapping),
7241 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7243 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7244 &bnx2x_fp(bp, i, rx_comp_mapping),
7245 sizeof(struct eth_fast_path_rx_cqe) *
7249 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7250 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7251 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7252 &bnx2x_fp(bp, i, rx_sge_mapping),
7253 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7256 for_each_queue(bp, i) {
7258 /* fastpath tx rings: tx_buf tx_desc */
7259 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7260 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7261 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7262 &bnx2x_fp(bp, i, tx_desc_mapping),
7263 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7265 /* end of fastpath */
7267 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7268 sizeof(struct host_def_status_block));
7270 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7271 sizeof(struct bnx2x_slowpath));
7274 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7276 /* allocate searcher T2 table
7277 we allocate 1/4 of alloc num for T2
7278 (which is not entered into the ILT) */
7279 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7281 /* Initialize T2 (for 1024 connections) */
7282 for (i = 0; i < 16*1024; i += 64)
7283 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7285 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7286 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7288 /* QM queues (128*MAX_CONN) */
7289 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7291 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7292 sizeof(struct host_status_block));
7295 /* Slow path ring */
7296 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7304 #undef BNX2X_PCI_ALLOC
7308 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7312 for_each_queue(bp, i) {
7313 struct bnx2x_fastpath *fp = &bp->fp[i];
7315 u16 bd_cons = fp->tx_bd_cons;
7316 u16 sw_prod = fp->tx_pkt_prod;
7317 u16 sw_cons = fp->tx_pkt_cons;
7319 while (sw_cons != sw_prod) {
7320 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7326 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7330 for_each_queue(bp, j) {
7331 struct bnx2x_fastpath *fp = &bp->fp[j];
7333 for (i = 0; i < NUM_RX_BD; i++) {
7334 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7335 struct sk_buff *skb = rx_buf->skb;
7340 dma_unmap_single(&bp->pdev->dev,
7341 dma_unmap_addr(rx_buf, mapping),
7342 bp->rx_buf_size, DMA_FROM_DEVICE);
7347 if (!fp->disable_tpa)
7348 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7349 ETH_MAX_AGGREGATION_QUEUES_E1 :
7350 ETH_MAX_AGGREGATION_QUEUES_E1H);
7354 static void bnx2x_free_skbs(struct bnx2x *bp)
7356 bnx2x_free_tx_skbs(bp);
7357 bnx2x_free_rx_skbs(bp);
7360 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7364 free_irq(bp->msix_table[0].vector, bp->dev);
7365 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7366 bp->msix_table[0].vector);
7371 for_each_queue(bp, i) {
7372 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
7373 "state %x\n", i, bp->msix_table[i + offset].vector,
7374 bnx2x_fp(bp, i, state));
7376 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7380 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7382 if (bp->flags & USING_MSIX_FLAG) {
7384 bnx2x_free_msix_irqs(bp);
7385 pci_disable_msix(bp->pdev);
7386 bp->flags &= ~USING_MSIX_FLAG;
7388 } else if (bp->flags & USING_MSI_FLAG) {
7390 free_irq(bp->pdev->irq, bp->dev);
7391 pci_disable_msi(bp->pdev);
7392 bp->flags &= ~USING_MSI_FLAG;
7394 } else if (!disable_only)
7395 free_irq(bp->pdev->irq, bp->dev);
7398 static int bnx2x_enable_msix(struct bnx2x *bp)
7400 int i, rc, offset = 1;
7403 bp->msix_table[0].entry = igu_vec;
7404 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7407 igu_vec = BP_L_ID(bp) + offset;
7408 bp->msix_table[1].entry = igu_vec;
7409 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7412 for_each_queue(bp, i) {
7413 igu_vec = BP_L_ID(bp) + offset + i;
7414 bp->msix_table[i + offset].entry = igu_vec;
7415 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7416 "(fastpath #%u)\n", i + offset, igu_vec, i);
7419 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7420 BNX2X_NUM_QUEUES(bp) + offset);
7422 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7426 bp->flags |= USING_MSIX_FLAG;
7431 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7433 int i, rc, offset = 1;
7435 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7436 bp->dev->name, bp->dev);
7438 BNX2X_ERR("request sp irq failed\n");
7445 for_each_queue(bp, i) {
7446 struct bnx2x_fastpath *fp = &bp->fp[i];
7447 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7450 rc = request_irq(bp->msix_table[i + offset].vector,
7451 bnx2x_msix_fp_int, 0, fp->name, fp);
7453 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7454 bnx2x_free_msix_irqs(bp);
7458 fp->state = BNX2X_FP_STATE_IRQ;
7461 i = BNX2X_NUM_QUEUES(bp);
7462 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7464 bp->msix_table[0].vector,
7465 0, bp->msix_table[offset].vector,
7466 i - 1, bp->msix_table[offset + i - 1].vector);
7471 static int bnx2x_enable_msi(struct bnx2x *bp)
7475 rc = pci_enable_msi(bp->pdev);
7477 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7480 bp->flags |= USING_MSI_FLAG;
7485 static int bnx2x_req_irq(struct bnx2x *bp)
7487 unsigned long flags;
7490 if (bp->flags & USING_MSI_FLAG)
7493 flags = IRQF_SHARED;
7495 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7496 bp->dev->name, bp->dev);
7498 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7503 static void bnx2x_napi_enable(struct bnx2x *bp)
7507 for_each_queue(bp, i)
7508 napi_enable(&bnx2x_fp(bp, i, napi));
7511 static void bnx2x_napi_disable(struct bnx2x *bp)
7515 for_each_queue(bp, i)
7516 napi_disable(&bnx2x_fp(bp, i, napi));
7519 static void bnx2x_netif_start(struct bnx2x *bp)
7523 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7524 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7527 if (netif_running(bp->dev)) {
7528 bnx2x_napi_enable(bp);
7529 bnx2x_int_enable(bp);
7530 if (bp->state == BNX2X_STATE_OPEN)
7531 netif_tx_wake_all_queues(bp->dev);
7536 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7538 bnx2x_int_disable_sync(bp, disable_hw);
7539 bnx2x_napi_disable(bp);
7540 netif_tx_disable(bp->dev);
7544 * Init service functions
7548 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7550 * @param bp driver descriptor
7551 * @param set set or clear an entry (1 or 0)
7552 * @param mac pointer to a buffer containing a MAC
7553 * @param cl_bit_vec bit vector of clients to register a MAC for
7554 * @param cam_offset offset in a CAM to use
7555 * @param with_bcast set broadcast MAC as well
7557 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7558 u32 cl_bit_vec, u8 cam_offset,
7561 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7562 int port = BP_PORT(bp);
7565 * unicasts 0-31:port0 32-63:port1
7566 * multicast 64-127:port0 128-191:port1
7568 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7569 config->hdr.offset = cam_offset;
7570 config->hdr.client_id = 0xff;
7571 config->hdr.reserved1 = 0;
7574 config->config_table[0].cam_entry.msb_mac_addr =
7575 swab16(*(u16 *)&mac[0]);
7576 config->config_table[0].cam_entry.middle_mac_addr =
7577 swab16(*(u16 *)&mac[2]);
7578 config->config_table[0].cam_entry.lsb_mac_addr =
7579 swab16(*(u16 *)&mac[4]);
7580 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7582 config->config_table[0].target_table_entry.flags = 0;
7584 CAM_INVALIDATE(config->config_table[0]);
7585 config->config_table[0].target_table_entry.clients_bit_vector =
7586 cpu_to_le32(cl_bit_vec);
7587 config->config_table[0].target_table_entry.vlan_id = 0;
7589 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7590 (set ? "setting" : "clearing"),
7591 config->config_table[0].cam_entry.msb_mac_addr,
7592 config->config_table[0].cam_entry.middle_mac_addr,
7593 config->config_table[0].cam_entry.lsb_mac_addr);
7597 config->config_table[1].cam_entry.msb_mac_addr =
7598 cpu_to_le16(0xffff);
7599 config->config_table[1].cam_entry.middle_mac_addr =
7600 cpu_to_le16(0xffff);
7601 config->config_table[1].cam_entry.lsb_mac_addr =
7602 cpu_to_le16(0xffff);
7603 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7605 config->config_table[1].target_table_entry.flags =
7606 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7608 CAM_INVALIDATE(config->config_table[1]);
7609 config->config_table[1].target_table_entry.clients_bit_vector =
7610 cpu_to_le32(cl_bit_vec);
7611 config->config_table[1].target_table_entry.vlan_id = 0;
7614 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7615 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7616 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7620 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7622 * @param bp driver descriptor
7623 * @param set set or clear an entry (1 or 0)
7624 * @param mac pointer to a buffer containing a MAC
7625 * @param cl_bit_vec bit vector of clients to register a MAC for
7626 * @param cam_offset offset in a CAM to use
7628 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7629 u32 cl_bit_vec, u8 cam_offset)
7631 struct mac_configuration_cmd_e1h *config =
7632 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7634 config->hdr.length = 1;
7635 config->hdr.offset = cam_offset;
7636 config->hdr.client_id = 0xff;
7637 config->hdr.reserved1 = 0;
7640 config->config_table[0].msb_mac_addr =
7641 swab16(*(u16 *)&mac[0]);
7642 config->config_table[0].middle_mac_addr =
7643 swab16(*(u16 *)&mac[2]);
7644 config->config_table[0].lsb_mac_addr =
7645 swab16(*(u16 *)&mac[4]);
7646 config->config_table[0].clients_bit_vector =
7647 cpu_to_le32(cl_bit_vec);
7648 config->config_table[0].vlan_id = 0;
7649 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7651 config->config_table[0].flags = BP_PORT(bp);
7653 config->config_table[0].flags =
7654 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7656 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7657 (set ? "setting" : "clearing"),
7658 config->config_table[0].msb_mac_addr,
7659 config->config_table[0].middle_mac_addr,
7660 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7662 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7663 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7664 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7667 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7668 int *state_p, int poll)
7670 /* can take a while if any port is running */
7673 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7674 poll ? "polling" : "waiting", state, idx);
7679 bnx2x_rx_int(bp->fp, 10);
7680 /* if index is different from 0
7681 * the reply for some commands will
7682 * be on the non default queue
7685 bnx2x_rx_int(&bp->fp[idx], 10);
7688 mb(); /* state is changed by bnx2x_sp_event() */
7689 if (*state_p == state) {
7690 #ifdef BNX2X_STOP_ON_ERROR
7691 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7703 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7704 poll ? "polling" : "waiting", state, idx);
7705 #ifdef BNX2X_STOP_ON_ERROR
7712 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7714 bp->set_mac_pending++;
7717 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7718 (1 << bp->fp->cl_id), BP_FUNC(bp));
7720 /* Wait for a completion */
7721 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7724 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7726 bp->set_mac_pending++;
7729 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7730 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7733 /* Wait for a completion */
7734 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7739 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7740 * MAC(s). This function will wait until the ramdord completion
7743 * @param bp driver handle
7744 * @param set set or clear the CAM entry
7746 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7748 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7750 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7752 bp->set_mac_pending++;
7755 /* Send a SET_MAC ramrod */
7757 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7758 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7761 /* CAM allocation for E1H
7762 * unicasts: by func number
7763 * multicast: 20+FUNC*20, 20 each
7765 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7766 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7768 /* Wait for a completion when setting */
7769 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7775 static int bnx2x_setup_leading(struct bnx2x *bp)
7779 /* reset IGU state */
7780 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7783 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7785 /* Wait for completion */
7786 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7791 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7793 struct bnx2x_fastpath *fp = &bp->fp[index];
7795 /* reset IGU state */
7796 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7799 fp->state = BNX2X_FP_STATE_OPENING;
7800 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7803 /* Wait for completion */
7804 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7808 static int bnx2x_poll(struct napi_struct *napi, int budget);
7810 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7813 switch (bp->multi_mode) {
7814 case ETH_RSS_MODE_DISABLED:
7818 case ETH_RSS_MODE_REGULAR:
7820 bp->num_queues = min_t(u32, num_queues,
7821 BNX2X_MAX_QUEUES(bp));
7823 bp->num_queues = min_t(u32, num_online_cpus(),
7824 BNX2X_MAX_QUEUES(bp));
7834 static int bnx2x_set_num_queues(struct bnx2x *bp)
7842 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7847 /* Set number of queues according to bp->multi_mode value */
7848 bnx2x_set_num_queues_msix(bp);
7850 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7853 /* if we can't use MSI-X we only need one fp,
7854 * so try to enable MSI-X with the requested number of fp's
7855 * and fallback to MSI or legacy INTx with one fp
7857 rc = bnx2x_enable_msix(bp);
7859 /* failed to enable MSI-X */
7863 bp->dev->real_num_tx_queues = bp->num_queues;
7868 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7869 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7872 /* must be called with rtnl_lock */
7873 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7878 #ifdef BNX2X_STOP_ON_ERROR
7879 if (unlikely(bp->panic))
7883 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7885 rc = bnx2x_set_num_queues(bp);
7887 if (bnx2x_alloc_mem(bp)) {
7888 bnx2x_free_irq(bp, true);
7892 for_each_queue(bp, i)
7893 bnx2x_fp(bp, i, disable_tpa) =
7894 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7896 for_each_queue(bp, i)
7897 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7900 bnx2x_napi_enable(bp);
7902 if (bp->flags & USING_MSIX_FLAG) {
7903 rc = bnx2x_req_msix_irqs(bp);
7905 bnx2x_free_irq(bp, true);
7909 /* Fall to INTx if failed to enable MSI-X due to lack of
7910 memory (in bnx2x_set_num_queues()) */
7911 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7912 bnx2x_enable_msi(bp);
7914 rc = bnx2x_req_irq(bp);
7916 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7917 bnx2x_free_irq(bp, true);
7920 if (bp->flags & USING_MSI_FLAG) {
7921 bp->dev->irq = bp->pdev->irq;
7922 netdev_info(bp->dev, "using MSI IRQ %d\n",
7927 /* Send LOAD_REQUEST command to MCP
7928 Returns the type of LOAD command:
7929 if it is the first port to be initialized
7930 common blocks should be initialized, otherwise - not
7932 if (!BP_NOMCP(bp)) {
7933 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7935 BNX2X_ERR("MCP response failure, aborting\n");
7939 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7940 rc = -EBUSY; /* other port in diagnostic mode */
7945 int port = BP_PORT(bp);
7947 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7948 load_count[0], load_count[1], load_count[2]);
7950 load_count[1 + port]++;
7951 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7952 load_count[0], load_count[1], load_count[2]);
7953 if (load_count[0] == 1)
7954 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7955 else if (load_count[1 + port] == 1)
7956 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7958 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7961 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7962 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7966 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7969 rc = bnx2x_init_hw(bp, load_code);
7971 BNX2X_ERR("HW init failed, aborting\n");
7972 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7973 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7974 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7978 /* Setup NIC internals and enable interrupts */
7979 bnx2x_nic_init(bp, load_code);
7981 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7982 (bp->common.shmem2_base))
7983 SHMEM2_WR(bp, dcc_support,
7984 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7985 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7987 /* Send LOAD_DONE command to MCP */
7988 if (!BP_NOMCP(bp)) {
7989 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7991 BNX2X_ERR("MCP response failure, aborting\n");
7997 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7999 rc = bnx2x_setup_leading(bp);
8001 BNX2X_ERR("Setup leading failed!\n");
8002 #ifndef BNX2X_STOP_ON_ERROR
8010 if (CHIP_IS_E1H(bp))
8011 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8012 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8013 bp->flags |= MF_FUNC_DIS;
8016 if (bp->state == BNX2X_STATE_OPEN) {
8018 /* Enable Timer scan */
8019 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8021 for_each_nondefault_queue(bp, i) {
8022 rc = bnx2x_setup_multi(bp, i);
8032 bnx2x_set_eth_mac_addr_e1(bp, 1);
8034 bnx2x_set_eth_mac_addr_e1h(bp, 1);
8036 /* Set iSCSI L2 MAC */
8037 mutex_lock(&bp->cnic_mutex);
8038 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8039 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8040 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8041 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8044 mutex_unlock(&bp->cnic_mutex);
8049 bnx2x_initial_phy_init(bp, load_mode);
8051 /* Start fast path */
8052 switch (load_mode) {
8054 if (bp->state == BNX2X_STATE_OPEN) {
8055 /* Tx queue should be only reenabled */
8056 netif_tx_wake_all_queues(bp->dev);
8058 /* Initialize the receive filter. */
8059 bnx2x_set_rx_mode(bp->dev);
8063 netif_tx_start_all_queues(bp->dev);
8064 if (bp->state != BNX2X_STATE_OPEN)
8065 netif_tx_disable(bp->dev);
8066 /* Initialize the receive filter. */
8067 bnx2x_set_rx_mode(bp->dev);
8071 /* Initialize the receive filter. */
8072 bnx2x_set_rx_mode(bp->dev);
8073 bp->state = BNX2X_STATE_DIAG;
8081 bnx2x__link_status_update(bp);
8083 /* start the timer */
8084 mod_timer(&bp->timer, jiffies + bp->current_interval);
8087 bnx2x_setup_cnic_irq_info(bp);
8088 if (bp->state == BNX2X_STATE_OPEN)
8089 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8091 bnx2x_inc_load_cnt(bp);
8097 /* Disable Timer scan */
8098 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8101 bnx2x_int_disable_sync(bp, 1);
8102 if (!BP_NOMCP(bp)) {
8103 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8104 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8107 /* Free SKBs, SGEs, TPA pool and driver internals */
8108 bnx2x_free_skbs(bp);
8109 for_each_queue(bp, i)
8110 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8113 bnx2x_free_irq(bp, false);
8115 bnx2x_napi_disable(bp);
8116 for_each_queue(bp, i)
8117 netif_napi_del(&bnx2x_fp(bp, i, napi));
8123 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8125 struct bnx2x_fastpath *fp = &bp->fp[index];
8128 /* halt the connection */
8129 fp->state = BNX2X_FP_STATE_HALTING;
8130 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8132 /* Wait for completion */
8133 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8135 if (rc) /* timeout */
8138 /* delete cfc entry */
8139 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8141 /* Wait for completion */
8142 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8147 static int bnx2x_stop_leading(struct bnx2x *bp)
8149 __le16 dsb_sp_prod_idx;
8150 /* if the other port is handling traffic,
8151 this can take a lot of time */
8157 /* Send HALT ramrod */
8158 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8159 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8161 /* Wait for completion */
8162 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8163 &(bp->fp[0].state), 1);
8164 if (rc) /* timeout */
8167 dsb_sp_prod_idx = *bp->dsb_sp_prod;
8169 /* Send PORT_DELETE ramrod */
8170 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8172 /* Wait for completion to arrive on default status block
8173 we are going to reset the chip anyway
8174 so there is not much to do if this times out
8176 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8178 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8179 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8180 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8181 #ifdef BNX2X_STOP_ON_ERROR
8189 rmb(); /* Refresh the dsb_sp_prod */
8191 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8192 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8197 static void bnx2x_reset_func(struct bnx2x *bp)
8199 int port = BP_PORT(bp);
8200 int func = BP_FUNC(bp);
8204 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8205 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8208 /* Disable Timer scan */
8209 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8211 * Wait for at least 10ms and up to 2 second for the timers scan to
8214 for (i = 0; i < 200; i++) {
8216 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8221 base = FUNC_ILT_BASE(func);
8222 for (i = base; i < base + ILT_PER_FUNC; i++)
8223 bnx2x_ilt_wr(bp, i, 0);
8226 static void bnx2x_reset_port(struct bnx2x *bp)
8228 int port = BP_PORT(bp);
8231 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8233 /* Do not rcv packets to BRB */
8234 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8235 /* Do not direct rcv packets that are not for MCP to the BRB */
8236 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8237 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8240 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8243 /* Check for BRB port occupancy */
8244 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8246 DP(NETIF_MSG_IFDOWN,
8247 "BRB1 is not empty %d blocks are occupied\n", val);
8249 /* TODO: Close Doorbell port? */
8252 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8254 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8255 BP_FUNC(bp), reset_code);
8257 switch (reset_code) {
8258 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8259 bnx2x_reset_port(bp);
8260 bnx2x_reset_func(bp);
8261 bnx2x_reset_common(bp);
8264 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8265 bnx2x_reset_port(bp);
8266 bnx2x_reset_func(bp);
8269 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8270 bnx2x_reset_func(bp);
8274 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8279 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8281 int port = BP_PORT(bp);
8285 /* Wait until tx fastpath tasks complete */
8286 for_each_queue(bp, i) {
8287 struct bnx2x_fastpath *fp = &bp->fp[i];
8290 while (bnx2x_has_tx_work_unload(fp)) {
8294 BNX2X_ERR("timeout waiting for queue[%d]\n",
8296 #ifdef BNX2X_STOP_ON_ERROR
8307 /* Give HW time to discard old tx messages */
8310 if (CHIP_IS_E1(bp)) {
8311 struct mac_configuration_cmd *config =
8312 bnx2x_sp(bp, mcast_config);
8314 bnx2x_set_eth_mac_addr_e1(bp, 0);
8316 for (i = 0; i < config->hdr.length; i++)
8317 CAM_INVALIDATE(config->config_table[i]);
8319 config->hdr.length = i;
8320 if (CHIP_REV_IS_SLOW(bp))
8321 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8323 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8324 config->hdr.client_id = bp->fp->cl_id;
8325 config->hdr.reserved1 = 0;
8327 bp->set_mac_pending++;
8330 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8331 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8332 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8335 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8337 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8339 for (i = 0; i < MC_HASH_SIZE; i++)
8340 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8342 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8345 /* Clear iSCSI L2 MAC */
8346 mutex_lock(&bp->cnic_mutex);
8347 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8348 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8349 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8351 mutex_unlock(&bp->cnic_mutex);
8354 if (unload_mode == UNLOAD_NORMAL)
8355 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8357 else if (bp->flags & NO_WOL_FLAG)
8358 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8361 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8362 u8 *mac_addr = bp->dev->dev_addr;
8364 /* The mac address is written to entries 1-4 to
8365 preserve entry 0 which is used by the PMF */
8366 u8 entry = (BP_E1HVN(bp) + 1)*8;
8368 val = (mac_addr[0] << 8) | mac_addr[1];
8369 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8371 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8372 (mac_addr[4] << 8) | mac_addr[5];
8373 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8375 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8378 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8380 /* Close multi and leading connections
8381 Completions for ramrods are collected in a synchronous way */
8382 for_each_nondefault_queue(bp, i)
8383 if (bnx2x_stop_multi(bp, i))
8386 rc = bnx2x_stop_leading(bp);
8388 BNX2X_ERR("Stop leading failed!\n");
8389 #ifdef BNX2X_STOP_ON_ERROR
8398 reset_code = bnx2x_fw_command(bp, reset_code);
8400 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8401 load_count[0], load_count[1], load_count[2]);
8403 load_count[1 + port]--;
8404 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8405 load_count[0], load_count[1], load_count[2]);
8406 if (load_count[0] == 0)
8407 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8408 else if (load_count[1 + port] == 0)
8409 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8411 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8414 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8415 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8416 bnx2x__link_reset(bp);
8418 /* Reset the chip */
8419 bnx2x_reset_chip(bp, reset_code);
8421 /* Report UNLOAD_DONE to MCP */
8423 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8427 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8431 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8433 if (CHIP_IS_E1(bp)) {
8434 int port = BP_PORT(bp);
8435 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8436 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8438 val = REG_RD(bp, addr);
8440 REG_WR(bp, addr, val);
8441 } else if (CHIP_IS_E1H(bp)) {
8442 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8443 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8444 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8445 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8449 /* must be called with rtnl_lock */
8450 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8454 if (bp->state == BNX2X_STATE_CLOSED) {
8455 /* Interface has been removed - nothing to recover */
8456 bp->recovery_state = BNX2X_RECOVERY_DONE;
8458 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8465 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8467 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8469 /* Set "drop all" */
8470 bp->rx_mode = BNX2X_RX_MODE_NONE;
8471 bnx2x_set_storm_rx_mode(bp);
8473 /* Disable HW interrupts, NAPI and Tx */
8474 bnx2x_netif_stop(bp, 1);
8476 del_timer_sync(&bp->timer);
8477 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8478 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8479 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8482 bnx2x_free_irq(bp, false);
8484 /* Cleanup the chip if needed */
8485 if (unload_mode != UNLOAD_RECOVERY)
8486 bnx2x_chip_cleanup(bp, unload_mode);
8490 /* Free SKBs, SGEs, TPA pool and driver internals */
8491 bnx2x_free_skbs(bp);
8492 for_each_queue(bp, i)
8493 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8494 for_each_queue(bp, i)
8495 netif_napi_del(&bnx2x_fp(bp, i, napi));
8498 bp->state = BNX2X_STATE_CLOSED;
8500 netif_carrier_off(bp->dev);
8502 /* The last driver must disable a "close the gate" if there is no
8503 * parity attention or "process kill" pending.
8505 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8506 bnx2x_reset_is_done(bp))
8507 bnx2x_disable_close_the_gate(bp);
8509 /* Reset MCP mail box sequence if there is on going recovery */
8510 if (unload_mode == UNLOAD_RECOVERY)
8516 /* Close gates #2, #3 and #4: */
8517 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8521 /* Gates #2 and #4a are closed/opened for "not E1" only */
8522 if (!CHIP_IS_E1(bp)) {
8524 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8525 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8526 close ? (val | 0x1) : (val & (~(u32)1)));
8528 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8529 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8530 close ? (val | 0x1) : (val & (~(u32)1)));
8534 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8535 val = REG_RD(bp, addr);
8536 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8538 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8539 close ? "closing" : "opening");
8543 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8545 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8547 /* Do some magic... */
8548 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8549 *magic_val = val & SHARED_MF_CLP_MAGIC;
8550 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8553 /* Restore the value of the `magic' bit.
8555 * @param pdev Device handle.
8556 * @param magic_val Old value of the `magic' bit.
8558 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8560 /* Restore the `magic' bit value... */
8561 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8562 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8563 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8564 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8565 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8566 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8569 /* Prepares for MCP reset: takes care of CLP configurations.
8572 * @param magic_val Old value of 'magic' bit.
8574 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8577 u32 validity_offset;
8579 DP(NETIF_MSG_HW, "Starting\n");
8581 /* Set `magic' bit in order to save MF config */
8582 if (!CHIP_IS_E1(bp))
8583 bnx2x_clp_reset_prep(bp, magic_val);
8585 /* Get shmem offset */
8586 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8587 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8589 /* Clear validity map flags */
8591 REG_WR(bp, shmem + validity_offset, 0);
8594 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8595 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
8597 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8598 * depending on the HW type.
8602 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8604 /* special handling for emulation and FPGA,
8605 wait 10 times longer */
8606 if (CHIP_REV_IS_SLOW(bp))
8607 msleep(MCP_ONE_TIMEOUT*10);
8609 msleep(MCP_ONE_TIMEOUT);
8612 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8614 u32 shmem, cnt, validity_offset, val;
8619 /* Get shmem offset */
8620 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8622 BNX2X_ERR("Shmem 0 return failure\n");
8627 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8629 /* Wait for MCP to come up */
8630 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8631 /* TBD: its best to check validity map of last port.
8632 * currently checks on port 0.
8634 val = REG_RD(bp, shmem + validity_offset);
8635 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8636 shmem + validity_offset, val);
8638 /* check that shared memory is valid. */
8639 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8640 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8643 bnx2x_mcp_wait_one(bp);
8646 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8648 /* Check that shared memory is valid. This indicates that MCP is up. */
8649 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8650 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8651 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8657 /* Restore the `magic' bit value */
8658 if (!CHIP_IS_E1(bp))
8659 bnx2x_clp_reset_done(bp, magic_val);
8664 static void bnx2x_pxp_prep(struct bnx2x *bp)
8666 if (!CHIP_IS_E1(bp)) {
8667 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8668 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8669 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8675 * Reset the whole chip except for:
8677 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8680 * - MISC (including AEU)
8684 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8686 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8689 MISC_REGISTERS_RESET_REG_1_RST_HC |
8690 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8691 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8694 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8695 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8696 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8697 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8698 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8699 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8700 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8701 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8703 reset_mask1 = 0xffffffff;
8706 reset_mask2 = 0xffff;
8708 reset_mask2 = 0x1ffff;
8710 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8711 reset_mask1 & (~not_reset_mask1));
8712 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8713 reset_mask2 & (~not_reset_mask2));
8718 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8719 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8723 static int bnx2x_process_kill(struct bnx2x *bp)
8727 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8730 /* Empty the Tetris buffer, wait for 1s */
8732 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8733 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8734 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8735 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8736 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8737 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8738 ((port_is_idle_0 & 0x1) == 0x1) &&
8739 ((port_is_idle_1 & 0x1) == 0x1) &&
8740 (pgl_exp_rom2 == 0xffffffff))
8743 } while (cnt-- > 0);
8746 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8748 " outstanding read requests after 1s!\n");
8749 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8750 " port_is_idle_0=0x%08x,"
8751 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8752 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8759 /* Close gates #2, #3 and #4 */
8760 bnx2x_set_234_gates(bp, true);
8762 /* TBD: Indicate that "process kill" is in progress to MCP */
8764 /* Clear "unprepared" bit */
8765 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8768 /* Make sure all is written to the chip before the reset */
8771 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8772 * PSWHST, GRC and PSWRD Tetris buffer.
8776 /* Prepare to chip reset: */
8778 bnx2x_reset_mcp_prep(bp, &val);
8784 /* reset the chip */
8785 bnx2x_process_kill_chip_reset(bp);
8788 /* Recover after reset: */
8790 if (bnx2x_reset_mcp_comp(bp, val))
8796 /* Open the gates #2, #3 and #4 */
8797 bnx2x_set_234_gates(bp, false);
8799 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8800 * reset state, re-enable attentions. */
8805 static int bnx2x_leader_reset(struct bnx2x *bp)
8808 /* Try to recover after the failure */
8809 if (bnx2x_process_kill(bp)) {
8810 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8813 goto exit_leader_reset;
8816 /* Clear "reset is in progress" bit and update the driver state */
8817 bnx2x_set_reset_done(bp);
8818 bp->recovery_state = BNX2X_RECOVERY_DONE;
8822 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8827 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8829 /* Assumption: runs under rtnl lock. This together with the fact
8830 * that it's called only from bnx2x_reset_task() ensure that it
8831 * will never be called when netif_running(bp->dev) is false.
8833 static void bnx2x_parity_recover(struct bnx2x *bp)
8835 DP(NETIF_MSG_HW, "Handling parity\n");
8837 switch (bp->recovery_state) {
8838 case BNX2X_RECOVERY_INIT:
8839 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8840 /* Try to get a LEADER_LOCK HW lock */
8841 if (bnx2x_trylock_hw_lock(bp,
8842 HW_LOCK_RESOURCE_RESERVED_08))
8845 /* Stop the driver */
8846 /* If interface has been removed - break */
8847 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8850 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8851 /* Ensure "is_leader" and "recovery_state"
8852 * update values are seen on other CPUs
8857 case BNX2X_RECOVERY_WAIT:
8858 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8859 if (bp->is_leader) {
8860 u32 load_counter = bnx2x_get_load_cnt(bp);
8862 /* Wait until all other functions get
8865 schedule_delayed_work(&bp->reset_task,
8869 /* If all other functions got down -
8870 * try to bring the chip back to
8871 * normal. In any case it's an exit
8872 * point for a leader.
8874 if (bnx2x_leader_reset(bp) ||
8875 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8876 printk(KERN_ERR"%s: Recovery "
8877 "has failed. Power cycle is "
8878 "needed.\n", bp->dev->name);
8879 /* Disconnect this device */
8880 netif_device_detach(bp->dev);
8881 /* Block ifup for all function
8882 * of this ASIC until
8883 * "process kill" or power
8886 bnx2x_set_reset_in_progress(bp);
8887 /* Shut down the power */
8888 bnx2x_set_power_state(bp,
8895 } else { /* non-leader */
8896 if (!bnx2x_reset_is_done(bp)) {
8897 /* Try to get a LEADER_LOCK HW lock as
8898 * long as a former leader may have
8899 * been unloaded by the user or
8900 * released a leadership by another
8903 if (bnx2x_trylock_hw_lock(bp,
8904 HW_LOCK_RESOURCE_RESERVED_08)) {
8905 /* I'm a leader now! Restart a
8912 schedule_delayed_work(&bp->reset_task,
8916 } else { /* A leader has completed
8917 * the "process kill". It's an exit
8918 * point for a non-leader.
8920 bnx2x_nic_load(bp, LOAD_NORMAL);
8921 bp->recovery_state =
8922 BNX2X_RECOVERY_DONE;
8933 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8934 * scheduled on a general queue in order to prevent a dead lock.
8936 static void bnx2x_reset_task(struct work_struct *work)
8938 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8940 #ifdef BNX2X_STOP_ON_ERROR
8941 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8942 " so reset not done to allow debug dump,\n"
8943 KERN_ERR " you will need to reboot when done\n");
8949 if (!netif_running(bp->dev))
8950 goto reset_task_exit;
8952 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8953 bnx2x_parity_recover(bp);
8955 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8956 bnx2x_nic_load(bp, LOAD_NORMAL);
8963 /* end of nic load/unload */
8968 * Init service functions
8971 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8974 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8975 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8976 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8977 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8978 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8979 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8980 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8981 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8983 BNX2X_ERR("Unsupported function index: %d\n", func);
8988 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8990 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8992 /* Flush all outstanding writes */
8995 /* Pretend to be function 0 */
8997 /* Flush the GRC transaction (in the chip) */
8998 new_val = REG_RD(bp, reg);
9000 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9005 /* From now we are in the "like-E1" mode */
9006 bnx2x_int_disable(bp);
9008 /* Flush all outstanding writes */
9011 /* Restore the original funtion settings */
9012 REG_WR(bp, reg, orig_func);
9013 new_val = REG_RD(bp, reg);
9014 if (new_val != orig_func) {
9015 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9016 orig_func, new_val);
9021 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9023 if (CHIP_IS_E1H(bp))
9024 bnx2x_undi_int_disable_e1h(bp, func);
9026 bnx2x_int_disable(bp);
9029 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9033 /* Check if there is any driver already loaded */
9034 val = REG_RD(bp, MISC_REG_UNPREPARED);
9036 /* Check if it is the UNDI driver
9037 * UNDI driver initializes CID offset for normal bell to 0x7
9039 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9040 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9042 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9044 int func = BP_FUNC(bp);
9048 /* clear the UNDI indication */
9049 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9051 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9053 /* try unload UNDI on port 0 */
9056 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9057 DRV_MSG_SEQ_NUMBER_MASK);
9058 reset_code = bnx2x_fw_command(bp, reset_code);
9060 /* if UNDI is loaded on the other port */
9061 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9063 /* send "DONE" for previous unload */
9064 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9066 /* unload UNDI on port 1 */
9069 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9070 DRV_MSG_SEQ_NUMBER_MASK);
9071 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9073 bnx2x_fw_command(bp, reset_code);
9076 /* now it's safe to release the lock */
9077 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9079 bnx2x_undi_int_disable(bp, func);
9081 /* close input traffic and wait for it */
9082 /* Do not rcv packets to BRB */
9084 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9085 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9086 /* Do not direct rcv packets that are not for MCP to
9089 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9090 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9093 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9094 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9097 /* save NIG port swap info */
9098 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9099 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9102 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9105 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9107 /* take the NIG out of reset and restore swap values */
9109 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9110 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9111 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9112 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9114 /* send unload done to the MCP */
9115 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9117 /* restore our func and fw_seq */
9120 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9121 DRV_MSG_SEQ_NUMBER_MASK);
9124 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9128 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9130 u32 val, val2, val3, val4, id;
9133 /* Get the chip revision id and number. */
9134 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9135 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9136 id = ((val & 0xffff) << 16);
9137 val = REG_RD(bp, MISC_REG_CHIP_REV);
9138 id |= ((val & 0xf) << 12);
9139 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9140 id |= ((val & 0xff) << 4);
9141 val = REG_RD(bp, MISC_REG_BOND_ID);
9143 bp->common.chip_id = id;
9144 bp->link_params.chip_id = bp->common.chip_id;
9145 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9147 val = (REG_RD(bp, 0x2874) & 0x55);
9148 if ((bp->common.chip_id & 0x1) ||
9149 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9150 bp->flags |= ONE_PORT_FLAG;
9151 BNX2X_DEV_INFO("single port device\n");
9154 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9155 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9156 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9157 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9158 bp->common.flash_size, bp->common.flash_size);
9160 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9161 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9162 bp->link_params.shmem_base = bp->common.shmem_base;
9163 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9164 bp->common.shmem_base, bp->common.shmem2_base);
9166 if (!bp->common.shmem_base ||
9167 (bp->common.shmem_base < 0xA0000) ||
9168 (bp->common.shmem_base >= 0xC0000)) {
9169 BNX2X_DEV_INFO("MCP not active\n");
9170 bp->flags |= NO_MCP_FLAG;
9174 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9175 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9176 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9177 BNX2X_ERROR("BAD MCP validity signature\n");
9179 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9180 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9182 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9183 SHARED_HW_CFG_LED_MODE_MASK) >>
9184 SHARED_HW_CFG_LED_MODE_SHIFT);
9186 bp->link_params.feature_config_flags = 0;
9187 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9188 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9189 bp->link_params.feature_config_flags |=
9190 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9192 bp->link_params.feature_config_flags &=
9193 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9195 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9196 bp->common.bc_ver = val;
9197 BNX2X_DEV_INFO("bc_ver %X\n", val);
9198 if (val < BNX2X_BC_VER) {
9199 /* for now only warn
9200 * later we might need to enforce this */
9201 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9202 "please upgrade BC\n", BNX2X_BC_VER, val);
9204 bp->link_params.feature_config_flags |=
9205 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9206 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9208 if (BP_E1HVN(bp) == 0) {
9209 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9210 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9212 /* no WOL capability for E1HVN != 0 */
9213 bp->flags |= NO_WOL_FLAG;
9215 BNX2X_DEV_INFO("%sWoL capable\n",
9216 (bp->flags & NO_WOL_FLAG) ? "not " : "");
9218 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9219 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9220 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9221 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9223 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9224 val, val2, val3, val4);
9227 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9230 int port = BP_PORT(bp);
9233 switch (switch_cfg) {
9235 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9238 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9239 switch (ext_phy_type) {
9240 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9241 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9244 bp->port.supported |= (SUPPORTED_10baseT_Half |
9245 SUPPORTED_10baseT_Full |
9246 SUPPORTED_100baseT_Half |
9247 SUPPORTED_100baseT_Full |
9248 SUPPORTED_1000baseT_Full |
9249 SUPPORTED_2500baseX_Full |
9254 SUPPORTED_Asym_Pause);
9257 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9258 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9261 bp->port.supported |= (SUPPORTED_10baseT_Half |
9262 SUPPORTED_10baseT_Full |
9263 SUPPORTED_100baseT_Half |
9264 SUPPORTED_100baseT_Full |
9265 SUPPORTED_1000baseT_Full |
9270 SUPPORTED_Asym_Pause);
9274 BNX2X_ERR("NVRAM config error. "
9275 "BAD SerDes ext_phy_config 0x%x\n",
9276 bp->link_params.ext_phy_config);
9280 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9282 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9285 case SWITCH_CFG_10G:
9286 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9289 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9290 switch (ext_phy_type) {
9291 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9292 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9295 bp->port.supported |= (SUPPORTED_10baseT_Half |
9296 SUPPORTED_10baseT_Full |
9297 SUPPORTED_100baseT_Half |
9298 SUPPORTED_100baseT_Full |
9299 SUPPORTED_1000baseT_Full |
9300 SUPPORTED_2500baseX_Full |
9301 SUPPORTED_10000baseT_Full |
9306 SUPPORTED_Asym_Pause);
9309 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9310 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9313 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9314 SUPPORTED_1000baseT_Full |
9318 SUPPORTED_Asym_Pause);
9321 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9322 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9325 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9326 SUPPORTED_2500baseX_Full |
9327 SUPPORTED_1000baseT_Full |
9331 SUPPORTED_Asym_Pause);
9334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9335 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9338 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9341 SUPPORTED_Asym_Pause);
9344 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9345 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9348 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9349 SUPPORTED_1000baseT_Full |
9352 SUPPORTED_Asym_Pause);
9355 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9356 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9359 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9360 SUPPORTED_1000baseT_Full |
9364 SUPPORTED_Asym_Pause);
9367 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9368 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9371 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9372 SUPPORTED_1000baseT_Full |
9376 SUPPORTED_Asym_Pause);
9379 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9380 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9383 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9387 SUPPORTED_Asym_Pause);
9390 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9391 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9394 bp->port.supported |= (SUPPORTED_10baseT_Half |
9395 SUPPORTED_10baseT_Full |
9396 SUPPORTED_100baseT_Half |
9397 SUPPORTED_100baseT_Full |
9398 SUPPORTED_1000baseT_Full |
9399 SUPPORTED_10000baseT_Full |
9403 SUPPORTED_Asym_Pause);
9406 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9407 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9408 bp->link_params.ext_phy_config);
9412 BNX2X_ERR("NVRAM config error. "
9413 "BAD XGXS ext_phy_config 0x%x\n",
9414 bp->link_params.ext_phy_config);
9418 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9420 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9425 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9426 bp->port.link_config);
9429 bp->link_params.phy_addr = bp->port.phy_addr;
9431 /* mask what we support according to speed_cap_mask */
9432 if (!(bp->link_params.speed_cap_mask &
9433 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9434 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9436 if (!(bp->link_params.speed_cap_mask &
9437 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9438 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9440 if (!(bp->link_params.speed_cap_mask &
9441 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9442 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9444 if (!(bp->link_params.speed_cap_mask &
9445 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9446 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9448 if (!(bp->link_params.speed_cap_mask &
9449 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9450 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9451 SUPPORTED_1000baseT_Full);
9453 if (!(bp->link_params.speed_cap_mask &
9454 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9455 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9457 if (!(bp->link_params.speed_cap_mask &
9458 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9459 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9461 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9464 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9466 bp->link_params.req_duplex = DUPLEX_FULL;
9468 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9469 case PORT_FEATURE_LINK_SPEED_AUTO:
9470 if (bp->port.supported & SUPPORTED_Autoneg) {
9471 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9472 bp->port.advertising = bp->port.supported;
9475 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9477 if ((ext_phy_type ==
9478 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9480 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9481 /* force 10G, no AN */
9482 bp->link_params.req_line_speed = SPEED_10000;
9483 bp->port.advertising =
9484 (ADVERTISED_10000baseT_Full |
9488 BNX2X_ERR("NVRAM config error. "
9489 "Invalid link_config 0x%x"
9490 " Autoneg not supported\n",
9491 bp->port.link_config);
9496 case PORT_FEATURE_LINK_SPEED_10M_FULL:
9497 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9498 bp->link_params.req_line_speed = SPEED_10;
9499 bp->port.advertising = (ADVERTISED_10baseT_Full |
9502 BNX2X_ERROR("NVRAM config error. "
9503 "Invalid link_config 0x%x"
9504 " speed_cap_mask 0x%x\n",
9505 bp->port.link_config,
9506 bp->link_params.speed_cap_mask);
9511 case PORT_FEATURE_LINK_SPEED_10M_HALF:
9512 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9513 bp->link_params.req_line_speed = SPEED_10;
9514 bp->link_params.req_duplex = DUPLEX_HALF;
9515 bp->port.advertising = (ADVERTISED_10baseT_Half |
9518 BNX2X_ERROR("NVRAM config error. "
9519 "Invalid link_config 0x%x"
9520 " speed_cap_mask 0x%x\n",
9521 bp->port.link_config,
9522 bp->link_params.speed_cap_mask);
9527 case PORT_FEATURE_LINK_SPEED_100M_FULL:
9528 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9529 bp->link_params.req_line_speed = SPEED_100;
9530 bp->port.advertising = (ADVERTISED_100baseT_Full |
9533 BNX2X_ERROR("NVRAM config error. "
9534 "Invalid link_config 0x%x"
9535 " speed_cap_mask 0x%x\n",
9536 bp->port.link_config,
9537 bp->link_params.speed_cap_mask);
9542 case PORT_FEATURE_LINK_SPEED_100M_HALF:
9543 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9544 bp->link_params.req_line_speed = SPEED_100;
9545 bp->link_params.req_duplex = DUPLEX_HALF;
9546 bp->port.advertising = (ADVERTISED_100baseT_Half |
9549 BNX2X_ERROR("NVRAM config error. "
9550 "Invalid link_config 0x%x"
9551 " speed_cap_mask 0x%x\n",
9552 bp->port.link_config,
9553 bp->link_params.speed_cap_mask);
9558 case PORT_FEATURE_LINK_SPEED_1G:
9559 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9560 bp->link_params.req_line_speed = SPEED_1000;
9561 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9564 BNX2X_ERROR("NVRAM config error. "
9565 "Invalid link_config 0x%x"
9566 " speed_cap_mask 0x%x\n",
9567 bp->port.link_config,
9568 bp->link_params.speed_cap_mask);
9573 case PORT_FEATURE_LINK_SPEED_2_5G:
9574 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9575 bp->link_params.req_line_speed = SPEED_2500;
9576 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9579 BNX2X_ERROR("NVRAM config error. "
9580 "Invalid link_config 0x%x"
9581 " speed_cap_mask 0x%x\n",
9582 bp->port.link_config,
9583 bp->link_params.speed_cap_mask);
9588 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9589 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9590 case PORT_FEATURE_LINK_SPEED_10G_KR:
9591 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9592 bp->link_params.req_line_speed = SPEED_10000;
9593 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9596 BNX2X_ERROR("NVRAM config error. "
9597 "Invalid link_config 0x%x"
9598 " speed_cap_mask 0x%x\n",
9599 bp->port.link_config,
9600 bp->link_params.speed_cap_mask);
9606 BNX2X_ERROR("NVRAM config error. "
9607 "BAD link speed link_config 0x%x\n",
9608 bp->port.link_config);
9609 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9610 bp->port.advertising = bp->port.supported;
9614 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9615 PORT_FEATURE_FLOW_CONTROL_MASK);
9616 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9617 !(bp->port.supported & SUPPORTED_Autoneg))
9618 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9620 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
9621 " advertising 0x%x\n",
9622 bp->link_params.req_line_speed,
9623 bp->link_params.req_duplex,
9624 bp->link_params.req_flow_ctrl, bp->port.advertising);
9627 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9629 mac_hi = cpu_to_be16(mac_hi);
9630 mac_lo = cpu_to_be32(mac_lo);
9631 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9632 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9635 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9637 int port = BP_PORT(bp);
9643 bp->link_params.bp = bp;
9644 bp->link_params.port = port;
9646 bp->link_params.lane_config =
9647 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9648 bp->link_params.ext_phy_config =
9650 dev_info.port_hw_config[port].external_phy_config);
9651 /* BCM8727_NOC => BCM8727 no over current */
9652 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9653 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9654 bp->link_params.ext_phy_config &=
9655 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9656 bp->link_params.ext_phy_config |=
9657 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9658 bp->link_params.feature_config_flags |=
9659 FEATURE_CONFIG_BCM8727_NOC;
9662 bp->link_params.speed_cap_mask =
9664 dev_info.port_hw_config[port].speed_capability_mask);
9666 bp->port.link_config =
9667 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9669 /* Get the 4 lanes xgxs config rx and tx */
9670 for (i = 0; i < 2; i++) {
9672 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9673 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9674 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9677 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9678 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9679 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9682 /* If the device is capable of WoL, set the default state according
9685 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9686 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9687 (config & PORT_FEATURE_WOL_ENABLED));
9689 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9690 " speed_cap_mask 0x%08x link_config 0x%08x\n",
9691 bp->link_params.lane_config,
9692 bp->link_params.ext_phy_config,
9693 bp->link_params.speed_cap_mask, bp->port.link_config);
9695 bp->link_params.switch_cfg |= (bp->port.link_config &
9696 PORT_FEATURE_CONNECTED_SWITCH_MASK);
9697 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9699 bnx2x_link_settings_requested(bp);
9702 * If connected directly, work with the internal PHY, otherwise, work
9703 * with the external PHY
9705 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9706 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9707 bp->mdio.prtad = bp->link_params.phy_addr;
9709 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9710 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9712 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9714 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9715 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9716 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9717 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9718 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9721 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9722 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9723 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9727 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9729 int func = BP_FUNC(bp);
9733 bnx2x_get_common_hwinfo(bp);
9737 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9739 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9741 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9742 FUNC_MF_CFG_E1HOV_TAG_MASK);
9743 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9745 BNX2X_DEV_INFO("%s function mode\n",
9746 IS_E1HMF(bp) ? "multi" : "single");
9749 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9751 FUNC_MF_CFG_E1HOV_TAG_MASK);
9752 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9754 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9756 func, bp->e1hov, bp->e1hov);
9758 BNX2X_ERROR("No valid E1HOV for func %d,"
9759 " aborting\n", func);
9764 BNX2X_ERROR("VN %d in single function mode,"
9765 " aborting\n", BP_E1HVN(bp));
9771 if (!BP_NOMCP(bp)) {
9772 bnx2x_get_port_hwinfo(bp);
9774 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9775 DRV_MSG_SEQ_NUMBER_MASK);
9776 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9780 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9781 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9782 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9783 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9784 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9785 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9786 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9787 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9788 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9789 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9790 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9792 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9800 /* only supposed to happen on emulation/FPGA */
9801 BNX2X_ERROR("warning: random MAC workaround active\n");
9802 random_ether_addr(bp->dev->dev_addr);
9803 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9809 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9811 int cnt, i, block_end, rodi;
9812 char vpd_data[BNX2X_VPD_LEN+1];
9813 char str_id_reg[VENDOR_ID_LEN+1];
9814 char str_id_cap[VENDOR_ID_LEN+1];
9817 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9818 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9820 if (cnt < BNX2X_VPD_LEN)
9823 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9824 PCI_VPD_LRDT_RO_DATA);
9829 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9830 pci_vpd_lrdt_size(&vpd_data[i]);
9832 i += PCI_VPD_LRDT_TAG_SIZE;
9834 if (block_end > BNX2X_VPD_LEN)
9837 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9838 PCI_VPD_RO_KEYWORD_MFR_ID);
9842 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9844 if (len != VENDOR_ID_LEN)
9847 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9849 /* vendor specific info */
9850 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9851 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9852 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9853 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9855 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9856 PCI_VPD_RO_KEYWORD_VENDOR0);
9858 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9860 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9862 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9863 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9864 bp->fw_ver[len] = ' ';
9873 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9875 int func = BP_FUNC(bp);
9879 /* Disable interrupt handling until HW is initialized */
9880 atomic_set(&bp->intr_sem, 1);
9881 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9883 mutex_init(&bp->port.phy_mutex);
9884 mutex_init(&bp->fw_mb_mutex);
9886 mutex_init(&bp->cnic_mutex);
9889 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9890 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9892 rc = bnx2x_get_hwinfo(bp);
9894 bnx2x_read_fwinfo(bp);
9895 /* need to reset chip if undi was active */
9897 bnx2x_undi_unload(bp);
9899 if (CHIP_REV_IS_FPGA(bp))
9900 dev_err(&bp->pdev->dev, "FPGA detected\n");
9902 if (BP_NOMCP(bp) && (func == 0))
9903 dev_err(&bp->pdev->dev, "MCP disabled, "
9904 "must load devices in order!\n");
9906 /* Set multi queue mode */
9907 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9908 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9909 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9910 "requested is not MSI-X\n");
9911 multi_mode = ETH_RSS_MODE_DISABLED;
9913 bp->multi_mode = multi_mode;
9916 bp->dev->features |= NETIF_F_GRO;
9920 bp->flags &= ~TPA_ENABLE_FLAG;
9921 bp->dev->features &= ~NETIF_F_LRO;
9923 bp->flags |= TPA_ENABLE_FLAG;
9924 bp->dev->features |= NETIF_F_LRO;
9928 bp->dropless_fc = 0;
9930 bp->dropless_fc = dropless_fc;
9934 bp->tx_ring_size = MAX_TX_AVAIL;
9935 bp->rx_ring_size = MAX_RX_AVAIL;
9939 /* make sure that the numbers are in the right granularity */
9940 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9941 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9943 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9944 bp->current_interval = (poll ? poll : timer_interval);
9946 init_timer(&bp->timer);
9947 bp->timer.expires = jiffies + bp->current_interval;
9948 bp->timer.data = (unsigned long) bp;
9949 bp->timer.function = bnx2x_timer;
9955 * ethtool service functions
9958 /* All ethtool functions called with rtnl_lock */
9960 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9962 struct bnx2x *bp = netdev_priv(dev);
9964 cmd->supported = bp->port.supported;
9965 cmd->advertising = bp->port.advertising;
9967 if ((bp->state == BNX2X_STATE_OPEN) &&
9968 !(bp->flags & MF_FUNC_DIS) &&
9969 (bp->link_vars.link_up)) {
9970 cmd->speed = bp->link_vars.line_speed;
9971 cmd->duplex = bp->link_vars.duplex;
9976 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9977 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9978 if (vn_max_rate < cmd->speed)
9979 cmd->speed = vn_max_rate;
9986 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9988 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9990 switch (ext_phy_type) {
9991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9993 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9995 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9996 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9997 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9998 cmd->port = PORT_FIBRE;
10001 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10003 cmd->port = PORT_TP;
10006 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10007 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10008 bp->link_params.ext_phy_config);
10012 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10013 bp->link_params.ext_phy_config);
10017 cmd->port = PORT_TP;
10019 cmd->phy_address = bp->mdio.prtad;
10020 cmd->transceiver = XCVR_INTERNAL;
10022 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10023 cmd->autoneg = AUTONEG_ENABLE;
10025 cmd->autoneg = AUTONEG_DISABLE;
10030 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10031 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10032 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10033 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10034 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10035 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10036 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10041 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10043 struct bnx2x *bp = netdev_priv(dev);
10049 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10050 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10051 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10052 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10053 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10054 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10055 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10057 if (cmd->autoneg == AUTONEG_ENABLE) {
10058 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10059 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10063 /* advertise the requested speed and duplex if supported */
10064 cmd->advertising &= bp->port.supported;
10066 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10067 bp->link_params.req_duplex = DUPLEX_FULL;
10068 bp->port.advertising |= (ADVERTISED_Autoneg |
10071 } else { /* forced speed */
10072 /* advertise the requested speed and duplex if supported */
10073 switch (cmd->speed) {
10075 if (cmd->duplex == DUPLEX_FULL) {
10076 if (!(bp->port.supported &
10077 SUPPORTED_10baseT_Full)) {
10079 "10M full not supported\n");
10083 advertising = (ADVERTISED_10baseT_Full |
10086 if (!(bp->port.supported &
10087 SUPPORTED_10baseT_Half)) {
10089 "10M half not supported\n");
10093 advertising = (ADVERTISED_10baseT_Half |
10099 if (cmd->duplex == DUPLEX_FULL) {
10100 if (!(bp->port.supported &
10101 SUPPORTED_100baseT_Full)) {
10103 "100M full not supported\n");
10107 advertising = (ADVERTISED_100baseT_Full |
10110 if (!(bp->port.supported &
10111 SUPPORTED_100baseT_Half)) {
10113 "100M half not supported\n");
10117 advertising = (ADVERTISED_100baseT_Half |
10123 if (cmd->duplex != DUPLEX_FULL) {
10124 DP(NETIF_MSG_LINK, "1G half not supported\n");
10128 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10129 DP(NETIF_MSG_LINK, "1G full not supported\n");
10133 advertising = (ADVERTISED_1000baseT_Full |
10138 if (cmd->duplex != DUPLEX_FULL) {
10140 "2.5G half not supported\n");
10144 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10146 "2.5G full not supported\n");
10150 advertising = (ADVERTISED_2500baseX_Full |
10155 if (cmd->duplex != DUPLEX_FULL) {
10156 DP(NETIF_MSG_LINK, "10G half not supported\n");
10160 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10161 DP(NETIF_MSG_LINK, "10G full not supported\n");
10165 advertising = (ADVERTISED_10000baseT_Full |
10170 DP(NETIF_MSG_LINK, "Unsupported speed\n");
10174 bp->link_params.req_line_speed = cmd->speed;
10175 bp->link_params.req_duplex = cmd->duplex;
10176 bp->port.advertising = advertising;
10179 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10180 DP_LEVEL " req_duplex %d advertising 0x%x\n",
10181 bp->link_params.req_line_speed, bp->link_params.req_duplex,
10182 bp->port.advertising);
10184 if (netif_running(dev)) {
10185 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10186 bnx2x_link_set(bp);
10192 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10193 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10195 static int bnx2x_get_regs_len(struct net_device *dev)
10197 struct bnx2x *bp = netdev_priv(dev);
10198 int regdump_len = 0;
10201 if (CHIP_IS_E1(bp)) {
10202 for (i = 0; i < REGS_COUNT; i++)
10203 if (IS_E1_ONLINE(reg_addrs[i].info))
10204 regdump_len += reg_addrs[i].size;
10206 for (i = 0; i < WREGS_COUNT_E1; i++)
10207 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10208 regdump_len += wreg_addrs_e1[i].size *
10209 (1 + wreg_addrs_e1[i].read_regs_count);
10212 for (i = 0; i < REGS_COUNT; i++)
10213 if (IS_E1H_ONLINE(reg_addrs[i].info))
10214 regdump_len += reg_addrs[i].size;
10216 for (i = 0; i < WREGS_COUNT_E1H; i++)
10217 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10218 regdump_len += wreg_addrs_e1h[i].size *
10219 (1 + wreg_addrs_e1h[i].read_regs_count);
10222 regdump_len += sizeof(struct dump_hdr);
10224 return regdump_len;
10227 static void bnx2x_get_regs(struct net_device *dev,
10228 struct ethtool_regs *regs, void *_p)
10231 struct bnx2x *bp = netdev_priv(dev);
10232 struct dump_hdr dump_hdr = {0};
10235 memset(p, 0, regs->len);
10237 if (!netif_running(bp->dev))
10240 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10241 dump_hdr.dump_sign = dump_sign_all;
10242 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10243 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10244 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10245 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10246 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10248 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10249 p += dump_hdr.hdr_size + 1;
10251 if (CHIP_IS_E1(bp)) {
10252 for (i = 0; i < REGS_COUNT; i++)
10253 if (IS_E1_ONLINE(reg_addrs[i].info))
10254 for (j = 0; j < reg_addrs[i].size; j++)
10256 reg_addrs[i].addr + j*4);
10259 for (i = 0; i < REGS_COUNT; i++)
10260 if (IS_E1H_ONLINE(reg_addrs[i].info))
10261 for (j = 0; j < reg_addrs[i].size; j++)
10263 reg_addrs[i].addr + j*4);
10267 #define PHY_FW_VER_LEN 10
10269 static void bnx2x_get_drvinfo(struct net_device *dev,
10270 struct ethtool_drvinfo *info)
10272 struct bnx2x *bp = netdev_priv(dev);
10273 u8 phy_fw_ver[PHY_FW_VER_LEN];
10275 strcpy(info->driver, DRV_MODULE_NAME);
10276 strcpy(info->version, DRV_MODULE_VERSION);
10278 phy_fw_ver[0] = '\0';
10279 if (bp->port.pmf) {
10280 bnx2x_acquire_phy_lock(bp);
10281 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10282 (bp->state != BNX2X_STATE_CLOSED),
10283 phy_fw_ver, PHY_FW_VER_LEN);
10284 bnx2x_release_phy_lock(bp);
10287 strncpy(info->fw_version, bp->fw_ver, 32);
10288 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10290 (bp->common.bc_ver & 0xff0000) >> 16,
10291 (bp->common.bc_ver & 0xff00) >> 8,
10292 (bp->common.bc_ver & 0xff),
10293 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10294 strcpy(info->bus_info, pci_name(bp->pdev));
10295 info->n_stats = BNX2X_NUM_STATS;
10296 info->testinfo_len = BNX2X_NUM_TESTS;
10297 info->eedump_len = bp->common.flash_size;
10298 info->regdump_len = bnx2x_get_regs_len(dev);
10301 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10303 struct bnx2x *bp = netdev_priv(dev);
10305 if (bp->flags & NO_WOL_FLAG) {
10306 wol->supported = 0;
10309 wol->supported = WAKE_MAGIC;
10311 wol->wolopts = WAKE_MAGIC;
10315 memset(&wol->sopass, 0, sizeof(wol->sopass));
10318 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10320 struct bnx2x *bp = netdev_priv(dev);
10322 if (wol->wolopts & ~WAKE_MAGIC)
10325 if (wol->wolopts & WAKE_MAGIC) {
10326 if (bp->flags & NO_WOL_FLAG)
10336 static u32 bnx2x_get_msglevel(struct net_device *dev)
10338 struct bnx2x *bp = netdev_priv(dev);
10340 return bp->msg_enable;
10343 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10345 struct bnx2x *bp = netdev_priv(dev);
10347 if (capable(CAP_NET_ADMIN))
10348 bp->msg_enable = level;
10351 static int bnx2x_nway_reset(struct net_device *dev)
10353 struct bnx2x *bp = netdev_priv(dev);
10358 if (netif_running(dev)) {
10359 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10360 bnx2x_link_set(bp);
10366 static u32 bnx2x_get_link(struct net_device *dev)
10368 struct bnx2x *bp = netdev_priv(dev);
10370 if (bp->flags & MF_FUNC_DIS)
10373 return bp->link_vars.link_up;
10376 static int bnx2x_get_eeprom_len(struct net_device *dev)
10378 struct bnx2x *bp = netdev_priv(dev);
10380 return bp->common.flash_size;
10383 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10385 int port = BP_PORT(bp);
10389 /* adjust timeout for emulation/FPGA */
10390 count = NVRAM_TIMEOUT_COUNT;
10391 if (CHIP_REV_IS_SLOW(bp))
10394 /* request access to nvram interface */
10395 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10396 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10398 for (i = 0; i < count*10; i++) {
10399 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10400 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10406 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10407 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10414 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10416 int port = BP_PORT(bp);
10420 /* adjust timeout for emulation/FPGA */
10421 count = NVRAM_TIMEOUT_COUNT;
10422 if (CHIP_REV_IS_SLOW(bp))
10425 /* relinquish nvram interface */
10426 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10427 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10429 for (i = 0; i < count*10; i++) {
10430 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10431 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10437 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10438 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10445 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10449 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10451 /* enable both bits, even on read */
10452 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10453 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10454 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10457 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10461 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10463 /* disable both bits, even after read */
10464 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10465 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10466 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10469 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10475 /* build the command word */
10476 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10478 /* need to clear DONE bit separately */
10479 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10481 /* address of the NVRAM to read from */
10482 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10483 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10485 /* issue a read command */
10486 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10488 /* adjust timeout for emulation/FPGA */
10489 count = NVRAM_TIMEOUT_COUNT;
10490 if (CHIP_REV_IS_SLOW(bp))
10493 /* wait for completion */
10496 for (i = 0; i < count; i++) {
10498 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10500 if (val & MCPR_NVM_COMMAND_DONE) {
10501 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10502 /* we read nvram data in cpu order
10503 * but ethtool sees it as an array of bytes
10504 * converting to big-endian will do the work */
10505 *ret_val = cpu_to_be32(val);
10514 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10521 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10523 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10528 if (offset + buf_size > bp->common.flash_size) {
10529 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10530 " buf_size (0x%x) > flash_size (0x%x)\n",
10531 offset, buf_size, bp->common.flash_size);
10535 /* request access to nvram interface */
10536 rc = bnx2x_acquire_nvram_lock(bp);
10540 /* enable access to nvram interface */
10541 bnx2x_enable_nvram_access(bp);
10543 /* read the first word(s) */
10544 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10545 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10546 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10547 memcpy(ret_buf, &val, 4);
10549 /* advance to the next dword */
10550 offset += sizeof(u32);
10551 ret_buf += sizeof(u32);
10552 buf_size -= sizeof(u32);
10557 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10558 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10559 memcpy(ret_buf, &val, 4);
10562 /* disable access to nvram interface */
10563 bnx2x_disable_nvram_access(bp);
10564 bnx2x_release_nvram_lock(bp);
10569 static int bnx2x_get_eeprom(struct net_device *dev,
10570 struct ethtool_eeprom *eeprom, u8 *eebuf)
10572 struct bnx2x *bp = netdev_priv(dev);
10575 if (!netif_running(dev))
10578 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10579 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10580 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10581 eeprom->len, eeprom->len);
10583 /* parameters already validated in ethtool_get_eeprom */
10585 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10590 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10595 /* build the command word */
10596 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10598 /* need to clear DONE bit separately */
10599 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10601 /* write the data */
10602 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10604 /* address of the NVRAM to write to */
10605 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10606 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10608 /* issue the write command */
10609 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10611 /* adjust timeout for emulation/FPGA */
10612 count = NVRAM_TIMEOUT_COUNT;
10613 if (CHIP_REV_IS_SLOW(bp))
10616 /* wait for completion */
10618 for (i = 0; i < count; i++) {
10620 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10621 if (val & MCPR_NVM_COMMAND_DONE) {
10630 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
10632 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10640 if (offset + buf_size > bp->common.flash_size) {
10641 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10642 " buf_size (0x%x) > flash_size (0x%x)\n",
10643 offset, buf_size, bp->common.flash_size);
10647 /* request access to nvram interface */
10648 rc = bnx2x_acquire_nvram_lock(bp);
10652 /* enable access to nvram interface */
10653 bnx2x_enable_nvram_access(bp);
10655 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10656 align_offset = (offset & ~0x03);
10657 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10660 val &= ~(0xff << BYTE_OFFSET(offset));
10661 val |= (*data_buf << BYTE_OFFSET(offset));
10663 /* nvram data is returned as an array of bytes
10664 * convert it back to cpu order */
10665 val = be32_to_cpu(val);
10667 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10671 /* disable access to nvram interface */
10672 bnx2x_disable_nvram_access(bp);
10673 bnx2x_release_nvram_lock(bp);
10678 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10684 u32 written_so_far;
10686 if (buf_size == 1) /* ethtool */
10687 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10689 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10691 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10696 if (offset + buf_size > bp->common.flash_size) {
10697 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10698 " buf_size (0x%x) > flash_size (0x%x)\n",
10699 offset, buf_size, bp->common.flash_size);
10703 /* request access to nvram interface */
10704 rc = bnx2x_acquire_nvram_lock(bp);
10708 /* enable access to nvram interface */
10709 bnx2x_enable_nvram_access(bp);
10711 written_so_far = 0;
10712 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10713 while ((written_so_far < buf_size) && (rc == 0)) {
10714 if (written_so_far == (buf_size - sizeof(u32)))
10715 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10716 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10717 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10718 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10719 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10721 memcpy(&val, data_buf, 4);
10723 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10725 /* advance to the next dword */
10726 offset += sizeof(u32);
10727 data_buf += sizeof(u32);
10728 written_so_far += sizeof(u32);
10732 /* disable access to nvram interface */
10733 bnx2x_disable_nvram_access(bp);
10734 bnx2x_release_nvram_lock(bp);
10739 static int bnx2x_set_eeprom(struct net_device *dev,
10740 struct ethtool_eeprom *eeprom, u8 *eebuf)
10742 struct bnx2x *bp = netdev_priv(dev);
10743 int port = BP_PORT(bp);
10746 if (!netif_running(dev))
10749 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10750 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10751 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10752 eeprom->len, eeprom->len);
10754 /* parameters already validated in ethtool_set_eeprom */
10756 /* PHY eeprom can be accessed only by the PMF */
10757 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10761 if (eeprom->magic == 0x50485950) {
10762 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10763 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10765 bnx2x_acquire_phy_lock(bp);
10766 rc |= bnx2x_link_reset(&bp->link_params,
10767 &bp->link_vars, 0);
10768 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10769 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10770 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10771 MISC_REGISTERS_GPIO_HIGH, port);
10772 bnx2x_release_phy_lock(bp);
10773 bnx2x_link_report(bp);
10775 } else if (eeprom->magic == 0x50485952) {
10776 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10777 if (bp->state == BNX2X_STATE_OPEN) {
10778 bnx2x_acquire_phy_lock(bp);
10779 rc |= bnx2x_link_reset(&bp->link_params,
10780 &bp->link_vars, 1);
10782 rc |= bnx2x_phy_init(&bp->link_params,
10784 bnx2x_release_phy_lock(bp);
10785 bnx2x_calc_fc_adv(bp);
10787 } else if (eeprom->magic == 0x53985943) {
10788 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10789 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10790 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10792 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10794 /* DSP Remove Download Mode */
10795 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10796 MISC_REGISTERS_GPIO_LOW, port);
10798 bnx2x_acquire_phy_lock(bp);
10800 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10802 /* wait 0.5 sec to allow it to run */
10804 bnx2x_ext_phy_hw_reset(bp, port);
10806 bnx2x_release_phy_lock(bp);
10809 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10814 static int bnx2x_get_coalesce(struct net_device *dev,
10815 struct ethtool_coalesce *coal)
10817 struct bnx2x *bp = netdev_priv(dev);
10819 memset(coal, 0, sizeof(struct ethtool_coalesce));
10821 coal->rx_coalesce_usecs = bp->rx_ticks;
10822 coal->tx_coalesce_usecs = bp->tx_ticks;
10827 static int bnx2x_set_coalesce(struct net_device *dev,
10828 struct ethtool_coalesce *coal)
10830 struct bnx2x *bp = netdev_priv(dev);
10832 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10833 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10834 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10836 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10837 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10838 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10840 if (netif_running(dev))
10841 bnx2x_update_coalesce(bp);
10846 static void bnx2x_get_ringparam(struct net_device *dev,
10847 struct ethtool_ringparam *ering)
10849 struct bnx2x *bp = netdev_priv(dev);
10851 ering->rx_max_pending = MAX_RX_AVAIL;
10852 ering->rx_mini_max_pending = 0;
10853 ering->rx_jumbo_max_pending = 0;
10855 ering->rx_pending = bp->rx_ring_size;
10856 ering->rx_mini_pending = 0;
10857 ering->rx_jumbo_pending = 0;
10859 ering->tx_max_pending = MAX_TX_AVAIL;
10860 ering->tx_pending = bp->tx_ring_size;
10863 static int bnx2x_set_ringparam(struct net_device *dev,
10864 struct ethtool_ringparam *ering)
10866 struct bnx2x *bp = netdev_priv(dev);
10869 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10870 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10874 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10875 (ering->tx_pending > MAX_TX_AVAIL) ||
10876 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10879 bp->rx_ring_size = ering->rx_pending;
10880 bp->tx_ring_size = ering->tx_pending;
10882 if (netif_running(dev)) {
10883 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10884 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10890 static void bnx2x_get_pauseparam(struct net_device *dev,
10891 struct ethtool_pauseparam *epause)
10893 struct bnx2x *bp = netdev_priv(dev);
10895 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10896 BNX2X_FLOW_CTRL_AUTO) &&
10897 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10899 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10900 BNX2X_FLOW_CTRL_RX);
10901 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10902 BNX2X_FLOW_CTRL_TX);
10904 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10905 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10906 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10909 static int bnx2x_set_pauseparam(struct net_device *dev,
10910 struct ethtool_pauseparam *epause)
10912 struct bnx2x *bp = netdev_priv(dev);
10917 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10918 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10919 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10921 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10923 if (epause->rx_pause)
10924 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10926 if (epause->tx_pause)
10927 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10929 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10930 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10932 if (epause->autoneg) {
10933 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10934 DP(NETIF_MSG_LINK, "autoneg not supported\n");
10938 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10939 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10943 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10945 if (netif_running(dev)) {
10946 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10947 bnx2x_link_set(bp);
10953 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10955 struct bnx2x *bp = netdev_priv(dev);
10959 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10960 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10964 /* TPA requires Rx CSUM offloading */
10965 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10966 if (!disable_tpa) {
10967 if (!(dev->features & NETIF_F_LRO)) {
10968 dev->features |= NETIF_F_LRO;
10969 bp->flags |= TPA_ENABLE_FLAG;
10974 } else if (dev->features & NETIF_F_LRO) {
10975 dev->features &= ~NETIF_F_LRO;
10976 bp->flags &= ~TPA_ENABLE_FLAG;
10980 if (changed && netif_running(dev)) {
10981 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10982 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10988 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10990 struct bnx2x *bp = netdev_priv(dev);
10992 return bp->rx_csum;
10995 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10997 struct bnx2x *bp = netdev_priv(dev);
11000 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11001 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11005 bp->rx_csum = data;
11007 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11008 TPA'ed packets will be discarded due to wrong TCP CSUM */
11010 u32 flags = ethtool_op_get_flags(dev);
11012 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11018 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11021 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11022 dev->features |= NETIF_F_TSO6;
11024 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11025 dev->features &= ~NETIF_F_TSO6;
11031 static const struct {
11032 char string[ETH_GSTRING_LEN];
11033 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11034 { "register_test (offline)" },
11035 { "memory_test (offline)" },
11036 { "loopback_test (offline)" },
11037 { "nvram_test (online)" },
11038 { "interrupt_test (online)" },
11039 { "link_test (online)" },
11040 { "idle check (online)" }
11043 static int bnx2x_test_registers(struct bnx2x *bp)
11045 int idx, i, rc = -ENODEV;
11047 int port = BP_PORT(bp);
11048 static const struct {
11053 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11054 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11055 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11056 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11057 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11058 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11059 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11060 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11061 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11062 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11063 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11064 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11065 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11066 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11067 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11068 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11069 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11070 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
11071 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
11072 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11073 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
11074 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11075 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11076 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11077 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11078 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11079 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11080 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11081 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
11082 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11083 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
11084 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11085 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11086 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11087 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11088 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11089 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11091 { 0xffffffff, 0, 0x00000000 }
11094 if (!netif_running(bp->dev))
11097 /* Repeat the test twice:
11098 First by writing 0x00000000, second by writing 0xffffffff */
11099 for (idx = 0; idx < 2; idx++) {
11106 wr_val = 0xffffffff;
11110 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11111 u32 offset, mask, save_val, val;
11113 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11114 mask = reg_tbl[i].mask;
11116 save_val = REG_RD(bp, offset);
11118 REG_WR(bp, offset, wr_val);
11119 val = REG_RD(bp, offset);
11121 /* Restore the original register's value */
11122 REG_WR(bp, offset, save_val);
11124 /* verify value is as expected */
11125 if ((val & mask) != (wr_val & mask)) {
11126 DP(NETIF_MSG_PROBE,
11127 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11128 offset, val, wr_val, mask);
11129 goto test_reg_exit;
11140 static int bnx2x_test_memory(struct bnx2x *bp)
11142 int i, j, rc = -ENODEV;
11144 static const struct {
11148 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11149 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11150 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11151 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11152 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11153 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11154 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11158 static const struct {
11164 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11165 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11166 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11167 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11168 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11169 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11171 { NULL, 0xffffffff, 0, 0 }
11174 if (!netif_running(bp->dev))
11177 /* Go through all the memories */
11178 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11179 for (j = 0; j < mem_tbl[i].size; j++)
11180 REG_RD(bp, mem_tbl[i].offset + j*4);
11182 /* Check the parity status */
11183 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11184 val = REG_RD(bp, prty_tbl[i].offset);
11185 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11186 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11188 "%s is 0x%x\n", prty_tbl[i].name, val);
11189 goto test_mem_exit;
11199 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11204 while (bnx2x_link_test(bp) && cnt--)
11208 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11210 unsigned int pkt_size, num_pkts, i;
11211 struct sk_buff *skb;
11212 unsigned char *packet;
11213 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11214 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11215 u16 tx_start_idx, tx_idx;
11216 u16 rx_start_idx, rx_idx;
11217 u16 pkt_prod, bd_prod;
11218 struct sw_tx_bd *tx_buf;
11219 struct eth_tx_start_bd *tx_start_bd;
11220 struct eth_tx_parse_bd *pbd = NULL;
11221 dma_addr_t mapping;
11222 union eth_rx_cqe *cqe;
11224 struct sw_rx_bd *rx_buf;
11228 /* check the loopback mode */
11229 switch (loopback_mode) {
11230 case BNX2X_PHY_LOOPBACK:
11231 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11234 case BNX2X_MAC_LOOPBACK:
11235 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11236 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11242 /* prepare the loopback packet */
11243 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11244 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11245 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11248 goto test_loopback_exit;
11250 packet = skb_put(skb, pkt_size);
11251 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11252 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11253 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11254 for (i = ETH_HLEN; i < pkt_size; i++)
11255 packet[i] = (unsigned char) (i & 0xff);
11257 /* send the loopback packet */
11259 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11260 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11262 pkt_prod = fp_tx->tx_pkt_prod++;
11263 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11264 tx_buf->first_bd = fp_tx->tx_bd_prod;
11268 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11269 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11270 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11271 skb_headlen(skb), DMA_TO_DEVICE);
11272 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11273 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11274 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11275 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11276 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11277 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11278 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11279 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11281 /* turn on parsing and get a BD */
11282 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11283 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11285 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11289 fp_tx->tx_db.data.prod += 2;
11291 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11296 fp_tx->tx_bd_prod += 2; /* start + pbd */
11300 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11301 if (tx_idx != tx_start_idx + num_pkts)
11302 goto test_loopback_exit;
11304 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11305 if (rx_idx != rx_start_idx + num_pkts)
11306 goto test_loopback_exit;
11308 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11309 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11310 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11311 goto test_loopback_rx_exit;
11313 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11314 if (len != pkt_size)
11315 goto test_loopback_rx_exit;
11317 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11319 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11320 for (i = ETH_HLEN; i < pkt_size; i++)
11321 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11322 goto test_loopback_rx_exit;
11326 test_loopback_rx_exit:
11328 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11329 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11330 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11331 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11333 /* Update producers */
11334 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11335 fp_rx->rx_sge_prod);
11337 test_loopback_exit:
11338 bp->link_params.loopback_mode = LOOPBACK_NONE;
11343 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11350 if (!netif_running(bp->dev))
11351 return BNX2X_LOOPBACK_FAILED;
11353 bnx2x_netif_stop(bp, 1);
11354 bnx2x_acquire_phy_lock(bp);
11356 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11358 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11359 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11362 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11364 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11365 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11368 bnx2x_release_phy_lock(bp);
11369 bnx2x_netif_start(bp);
11374 #define CRC32_RESIDUAL 0xdebb20e3
11376 static int bnx2x_test_nvram(struct bnx2x *bp)
11378 static const struct {
11382 { 0, 0x14 }, /* bootstrap */
11383 { 0x14, 0xec }, /* dir */
11384 { 0x100, 0x350 }, /* manuf_info */
11385 { 0x450, 0xf0 }, /* feature_info */
11386 { 0x640, 0x64 }, /* upgrade_key_info */
11388 { 0x708, 0x70 }, /* manuf_key_info */
11392 __be32 buf[0x350 / 4];
11393 u8 *data = (u8 *)buf;
11400 rc = bnx2x_nvram_read(bp, 0, data, 4);
11402 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11403 goto test_nvram_exit;
11406 magic = be32_to_cpu(buf[0]);
11407 if (magic != 0x669955aa) {
11408 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11410 goto test_nvram_exit;
11413 for (i = 0; nvram_tbl[i].size; i++) {
11415 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11416 nvram_tbl[i].size);
11418 DP(NETIF_MSG_PROBE,
11419 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11420 goto test_nvram_exit;
11423 crc = ether_crc_le(nvram_tbl[i].size, data);
11424 if (crc != CRC32_RESIDUAL) {
11425 DP(NETIF_MSG_PROBE,
11426 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11428 goto test_nvram_exit;
11436 static int bnx2x_test_intr(struct bnx2x *bp)
11438 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11441 if (!netif_running(bp->dev))
11444 config->hdr.length = 0;
11445 if (CHIP_IS_E1(bp))
11446 /* use last unicast entries */
11447 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11449 config->hdr.offset = BP_FUNC(bp);
11450 config->hdr.client_id = bp->fp->cl_id;
11451 config->hdr.reserved1 = 0;
11453 bp->set_mac_pending++;
11455 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11456 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11457 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11459 for (i = 0; i < 10; i++) {
11460 if (!bp->set_mac_pending)
11463 msleep_interruptible(10);
11472 static void bnx2x_self_test(struct net_device *dev,
11473 struct ethtool_test *etest, u64 *buf)
11475 struct bnx2x *bp = netdev_priv(dev);
11477 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11478 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11479 etest->flags |= ETH_TEST_FL_FAILED;
11483 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11485 if (!netif_running(dev))
11488 /* offline tests are not supported in MF mode */
11490 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11492 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11493 int port = BP_PORT(bp);
11497 /* save current value of input enable for TX port IF */
11498 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11499 /* disable input for TX port IF */
11500 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11502 link_up = (bnx2x_link_test(bp) == 0);
11503 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11504 bnx2x_nic_load(bp, LOAD_DIAG);
11505 /* wait until link state is restored */
11506 bnx2x_wait_for_link(bp, link_up);
11508 if (bnx2x_test_registers(bp) != 0) {
11510 etest->flags |= ETH_TEST_FL_FAILED;
11512 if (bnx2x_test_memory(bp) != 0) {
11514 etest->flags |= ETH_TEST_FL_FAILED;
11516 buf[2] = bnx2x_test_loopback(bp, link_up);
11518 etest->flags |= ETH_TEST_FL_FAILED;
11520 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11522 /* restore input for TX port IF */
11523 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11525 bnx2x_nic_load(bp, LOAD_NORMAL);
11526 /* wait until link state is restored */
11527 bnx2x_wait_for_link(bp, link_up);
11529 if (bnx2x_test_nvram(bp) != 0) {
11531 etest->flags |= ETH_TEST_FL_FAILED;
11533 if (bnx2x_test_intr(bp) != 0) {
11535 etest->flags |= ETH_TEST_FL_FAILED;
11538 if (bnx2x_link_test(bp) != 0) {
11540 etest->flags |= ETH_TEST_FL_FAILED;
11543 #ifdef BNX2X_EXTRA_DEBUG
11544 bnx2x_panic_dump(bp);
11548 static const struct {
11551 u8 string[ETH_GSTRING_LEN];
11552 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11553 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11554 { Q_STATS_OFFSET32(error_bytes_received_hi),
11555 8, "[%d]: rx_error_bytes" },
11556 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11557 8, "[%d]: rx_ucast_packets" },
11558 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11559 8, "[%d]: rx_mcast_packets" },
11560 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11561 8, "[%d]: rx_bcast_packets" },
11562 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11563 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11564 4, "[%d]: rx_phy_ip_err_discards"},
11565 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11566 4, "[%d]: rx_skb_alloc_discard" },
11567 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11569 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11570 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11571 8, "[%d]: tx_ucast_packets" },
11572 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11573 8, "[%d]: tx_mcast_packets" },
11574 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11575 8, "[%d]: tx_bcast_packets" }
11578 static const struct {
11582 #define STATS_FLAGS_PORT 1
11583 #define STATS_FLAGS_FUNC 2
11584 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11585 u8 string[ETH_GSTRING_LEN];
11586 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11587 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11588 8, STATS_FLAGS_BOTH, "rx_bytes" },
11589 { STATS_OFFSET32(error_bytes_received_hi),
11590 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11591 { STATS_OFFSET32(total_unicast_packets_received_hi),
11592 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11593 { STATS_OFFSET32(total_multicast_packets_received_hi),
11594 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11595 { STATS_OFFSET32(total_broadcast_packets_received_hi),
11596 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11597 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11598 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11599 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11600 8, STATS_FLAGS_PORT, "rx_align_errors" },
11601 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11602 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11603 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11604 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11605 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11606 8, STATS_FLAGS_PORT, "rx_fragments" },
11607 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11608 8, STATS_FLAGS_PORT, "rx_jabbers" },
11609 { STATS_OFFSET32(no_buff_discard_hi),
11610 8, STATS_FLAGS_BOTH, "rx_discards" },
11611 { STATS_OFFSET32(mac_filter_discard),
11612 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11613 { STATS_OFFSET32(xxoverflow_discard),
11614 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11615 { STATS_OFFSET32(brb_drop_hi),
11616 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11617 { STATS_OFFSET32(brb_truncate_hi),
11618 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11619 { STATS_OFFSET32(pause_frames_received_hi),
11620 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11621 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11622 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11623 { STATS_OFFSET32(nig_timer_max),
11624 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11625 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11626 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11627 { STATS_OFFSET32(rx_skb_alloc_failed),
11628 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11629 { STATS_OFFSET32(hw_csum_err),
11630 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11632 { STATS_OFFSET32(total_bytes_transmitted_hi),
11633 8, STATS_FLAGS_BOTH, "tx_bytes" },
11634 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11635 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11636 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11637 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11638 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11639 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11640 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11641 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11642 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11643 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11644 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11645 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11646 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11647 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11648 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11649 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11650 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11651 8, STATS_FLAGS_PORT, "tx_deferred" },
11652 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11653 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11654 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11655 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11656 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11657 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11658 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11659 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11660 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11661 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11662 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11663 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11664 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11665 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11666 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11667 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11668 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11669 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11670 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11671 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11672 { STATS_OFFSET32(pause_frames_sent_hi),
11673 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11676 #define IS_PORT_STAT(i) \
11677 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11678 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11679 #define IS_E1HMF_MODE_STAT(bp) \
11680 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11682 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11684 struct bnx2x *bp = netdev_priv(dev);
11687 switch (stringset) {
11689 if (is_multi(bp)) {
11690 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11691 if (!IS_E1HMF_MODE_STAT(bp))
11692 num_stats += BNX2X_NUM_STATS;
11694 if (IS_E1HMF_MODE_STAT(bp)) {
11696 for (i = 0; i < BNX2X_NUM_STATS; i++)
11697 if (IS_FUNC_STAT(i))
11700 num_stats = BNX2X_NUM_STATS;
11705 return BNX2X_NUM_TESTS;
11712 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11714 struct bnx2x *bp = netdev_priv(dev);
11717 switch (stringset) {
11719 if (is_multi(bp)) {
11721 for_each_queue(bp, i) {
11722 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11723 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11724 bnx2x_q_stats_arr[j].string, i);
11725 k += BNX2X_NUM_Q_STATS;
11727 if (IS_E1HMF_MODE_STAT(bp))
11729 for (j = 0; j < BNX2X_NUM_STATS; j++)
11730 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11731 bnx2x_stats_arr[j].string);
11733 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11734 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11736 strcpy(buf + j*ETH_GSTRING_LEN,
11737 bnx2x_stats_arr[i].string);
11744 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11749 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11750 struct ethtool_stats *stats, u64 *buf)
11752 struct bnx2x *bp = netdev_priv(dev);
11753 u32 *hw_stats, *offset;
11756 if (is_multi(bp)) {
11758 for_each_queue(bp, i) {
11759 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11760 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11761 if (bnx2x_q_stats_arr[j].size == 0) {
11762 /* skip this counter */
11766 offset = (hw_stats +
11767 bnx2x_q_stats_arr[j].offset);
11768 if (bnx2x_q_stats_arr[j].size == 4) {
11769 /* 4-byte counter */
11770 buf[k + j] = (u64) *offset;
11773 /* 8-byte counter */
11774 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11776 k += BNX2X_NUM_Q_STATS;
11778 if (IS_E1HMF_MODE_STAT(bp))
11780 hw_stats = (u32 *)&bp->eth_stats;
11781 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11782 if (bnx2x_stats_arr[j].size == 0) {
11783 /* skip this counter */
11787 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11788 if (bnx2x_stats_arr[j].size == 4) {
11789 /* 4-byte counter */
11790 buf[k + j] = (u64) *offset;
11793 /* 8-byte counter */
11794 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11797 hw_stats = (u32 *)&bp->eth_stats;
11798 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11799 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11801 if (bnx2x_stats_arr[i].size == 0) {
11802 /* skip this counter */
11807 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11808 if (bnx2x_stats_arr[i].size == 4) {
11809 /* 4-byte counter */
11810 buf[j] = (u64) *offset;
11814 /* 8-byte counter */
11815 buf[j] = HILO_U64(*offset, *(offset + 1));
11821 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11823 struct bnx2x *bp = netdev_priv(dev);
11826 if (!netif_running(dev))
11835 for (i = 0; i < (data * 2); i++) {
11837 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11840 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11842 msleep_interruptible(500);
11843 if (signal_pending(current))
11847 if (bp->link_vars.link_up)
11848 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11849 bp->link_vars.line_speed);
11854 static const struct ethtool_ops bnx2x_ethtool_ops = {
11855 .get_settings = bnx2x_get_settings,
11856 .set_settings = bnx2x_set_settings,
11857 .get_drvinfo = bnx2x_get_drvinfo,
11858 .get_regs_len = bnx2x_get_regs_len,
11859 .get_regs = bnx2x_get_regs,
11860 .get_wol = bnx2x_get_wol,
11861 .set_wol = bnx2x_set_wol,
11862 .get_msglevel = bnx2x_get_msglevel,
11863 .set_msglevel = bnx2x_set_msglevel,
11864 .nway_reset = bnx2x_nway_reset,
11865 .get_link = bnx2x_get_link,
11866 .get_eeprom_len = bnx2x_get_eeprom_len,
11867 .get_eeprom = bnx2x_get_eeprom,
11868 .set_eeprom = bnx2x_set_eeprom,
11869 .get_coalesce = bnx2x_get_coalesce,
11870 .set_coalesce = bnx2x_set_coalesce,
11871 .get_ringparam = bnx2x_get_ringparam,
11872 .set_ringparam = bnx2x_set_ringparam,
11873 .get_pauseparam = bnx2x_get_pauseparam,
11874 .set_pauseparam = bnx2x_set_pauseparam,
11875 .get_rx_csum = bnx2x_get_rx_csum,
11876 .set_rx_csum = bnx2x_set_rx_csum,
11877 .get_tx_csum = ethtool_op_get_tx_csum,
11878 .set_tx_csum = ethtool_op_set_tx_hw_csum,
11879 .set_flags = bnx2x_set_flags,
11880 .get_flags = ethtool_op_get_flags,
11881 .get_sg = ethtool_op_get_sg,
11882 .set_sg = ethtool_op_set_sg,
11883 .get_tso = ethtool_op_get_tso,
11884 .set_tso = bnx2x_set_tso,
11885 .self_test = bnx2x_self_test,
11886 .get_sset_count = bnx2x_get_sset_count,
11887 .get_strings = bnx2x_get_strings,
11888 .phys_id = bnx2x_phys_id,
11889 .get_ethtool_stats = bnx2x_get_ethtool_stats,
11892 /* end of ethtool_ops */
11894 /****************************************************************************
11895 * General service functions
11896 ****************************************************************************/
11898 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11902 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11906 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11907 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11908 PCI_PM_CTRL_PME_STATUS));
11910 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11911 /* delay required during transition out of D3hot */
11916 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11920 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11922 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11925 /* No more memory access after this point until
11926 * device is brought back to D0.
11936 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11940 /* Tell compiler that status block fields can change */
11942 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11943 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11945 return (fp->rx_comp_cons != rx_cons_sb);
11949 * net_device service functions
11952 static int bnx2x_poll(struct napi_struct *napi, int budget)
11955 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11957 struct bnx2x *bp = fp->bp;
11960 #ifdef BNX2X_STOP_ON_ERROR
11961 if (unlikely(bp->panic)) {
11962 napi_complete(napi);
11967 if (bnx2x_has_tx_work(fp))
11970 if (bnx2x_has_rx_work(fp)) {
11971 work_done += bnx2x_rx_int(fp, budget - work_done);
11973 /* must not complete if we consumed full budget */
11974 if (work_done >= budget)
11978 /* Fall out from the NAPI loop if needed */
11979 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11980 bnx2x_update_fpsb_idx(fp);
11981 /* bnx2x_has_rx_work() reads the status block, thus we need
11982 * to ensure that status block indices have been actually read
11983 * (bnx2x_update_fpsb_idx) prior to this check
11984 * (bnx2x_has_rx_work) so that we won't write the "newer"
11985 * value of the status block to IGU (if there was a DMA right
11986 * after bnx2x_has_rx_work and if there is no rmb, the memory
11987 * reading (bnx2x_update_fpsb_idx) may be postponed to right
11988 * before bnx2x_ack_sb). In this case there will never be
11989 * another interrupt until there is another update of the
11990 * status block, while there is still unhandled work.
11994 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11995 napi_complete(napi);
11996 /* Re-enable interrupts */
11997 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11998 le16_to_cpu(fp->fp_c_idx),
12000 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12001 le16_to_cpu(fp->fp_u_idx),
12002 IGU_INT_ENABLE, 1);
12012 /* we split the first BD into headers and data BDs
12013 * to ease the pain of our fellow microcode engineers
12014 * we use one mapping for both BDs
12015 * So far this has only been observed to happen
12016 * in Other Operating Systems(TM)
12018 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12019 struct bnx2x_fastpath *fp,
12020 struct sw_tx_bd *tx_buf,
12021 struct eth_tx_start_bd **tx_bd, u16 hlen,
12022 u16 bd_prod, int nbd)
12024 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12025 struct eth_tx_bd *d_tx_bd;
12026 dma_addr_t mapping;
12027 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12029 /* first fix first BD */
12030 h_tx_bd->nbd = cpu_to_le16(nbd);
12031 h_tx_bd->nbytes = cpu_to_le16(hlen);
12033 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12034 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12035 h_tx_bd->addr_lo, h_tx_bd->nbd);
12037 /* now get a new data BD
12038 * (after the pbd) and fill it */
12039 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12040 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12042 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12043 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12045 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12046 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12047 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12049 /* this marks the BD as one that has no individual mapping */
12050 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12052 DP(NETIF_MSG_TX_QUEUED,
12053 "TSO split data size is %d (%x:%x)\n",
12054 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12057 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12062 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12065 csum = (u16) ~csum_fold(csum_sub(csum,
12066 csum_partial(t_header - fix, fix, 0)));
12069 csum = (u16) ~csum_fold(csum_add(csum,
12070 csum_partial(t_header, -fix, 0)));
12072 return swab16(csum);
12075 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12079 if (skb->ip_summed != CHECKSUM_PARTIAL)
12083 if (skb->protocol == htons(ETH_P_IPV6)) {
12085 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12086 rc |= XMIT_CSUM_TCP;
12090 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12091 rc |= XMIT_CSUM_TCP;
12095 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12096 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12098 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12099 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12104 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12105 /* check if packet requires linearization (packet is too fragmented)
12106 no need to check fragmentation if page size > 8K (there will be no
12107 violation to FW restrictions) */
12108 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12113 int first_bd_sz = 0;
12115 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12116 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12118 if (xmit_type & XMIT_GSO) {
12119 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12120 /* Check if LSO packet needs to be copied:
12121 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12122 int wnd_size = MAX_FETCH_BD - 3;
12123 /* Number of windows to check */
12124 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12129 /* Headers length */
12130 hlen = (int)(skb_transport_header(skb) - skb->data) +
12133 /* Amount of data (w/o headers) on linear part of SKB*/
12134 first_bd_sz = skb_headlen(skb) - hlen;
12136 wnd_sum = first_bd_sz;
12138 /* Calculate the first sum - it's special */
12139 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12141 skb_shinfo(skb)->frags[frag_idx].size;
12143 /* If there was data on linear skb data - check it */
12144 if (first_bd_sz > 0) {
12145 if (unlikely(wnd_sum < lso_mss)) {
12150 wnd_sum -= first_bd_sz;
12153 /* Others are easier: run through the frag list and
12154 check all windows */
12155 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12157 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12159 if (unlikely(wnd_sum < lso_mss)) {
12164 skb_shinfo(skb)->frags[wnd_idx].size;
12167 /* in non-LSO too fragmented packet should always
12174 if (unlikely(to_copy))
12175 DP(NETIF_MSG_TX_QUEUED,
12176 "Linearization IS REQUIRED for %s packet. "
12177 "num_frags %d hlen %d first_bd_sz %d\n",
12178 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12179 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12185 /* called with netif_tx_lock
12186 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12187 * netif_wake_queue()
12189 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12191 struct bnx2x *bp = netdev_priv(dev);
12192 struct bnx2x_fastpath *fp;
12193 struct netdev_queue *txq;
12194 struct sw_tx_bd *tx_buf;
12195 struct eth_tx_start_bd *tx_start_bd;
12196 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12197 struct eth_tx_parse_bd *pbd = NULL;
12198 u16 pkt_prod, bd_prod;
12200 dma_addr_t mapping;
12201 u32 xmit_type = bnx2x_xmit_type(bp, skb);
12204 __le16 pkt_size = 0;
12205 struct ethhdr *eth;
12206 u8 mac_type = UNICAST_ADDRESS;
12208 #ifdef BNX2X_STOP_ON_ERROR
12209 if (unlikely(bp->panic))
12210 return NETDEV_TX_BUSY;
12213 fp_index = skb_get_queue_mapping(skb);
12214 txq = netdev_get_tx_queue(dev, fp_index);
12216 fp = &bp->fp[fp_index];
12218 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12219 fp->eth_q_stats.driver_xoff++;
12220 netif_tx_stop_queue(txq);
12221 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12222 return NETDEV_TX_BUSY;
12225 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12226 " gso type %x xmit_type %x\n",
12227 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12228 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12230 eth = (struct ethhdr *)skb->data;
12232 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12233 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12234 if (is_broadcast_ether_addr(eth->h_dest))
12235 mac_type = BROADCAST_ADDRESS;
12237 mac_type = MULTICAST_ADDRESS;
12240 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12241 /* First, check if we need to linearize the skb (due to FW
12242 restrictions). No need to check fragmentation if page size > 8K
12243 (there will be no violation to FW restrictions) */
12244 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12245 /* Statistics of linearization */
12247 if (skb_linearize(skb) != 0) {
12248 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12249 "silently dropping this SKB\n");
12250 dev_kfree_skb_any(skb);
12251 return NETDEV_TX_OK;
12257 Please read carefully. First we use one BD which we mark as start,
12258 then we have a parsing info BD (used for TSO or xsum),
12259 and only then we have the rest of the TSO BDs.
12260 (don't forget to mark the last one as last,
12261 and to unmap only AFTER you write to the BD ...)
12262 And above all, all pdb sizes are in words - NOT DWORDS!
12265 pkt_prod = fp->tx_pkt_prod++;
12266 bd_prod = TX_BD(fp->tx_bd_prod);
12268 /* get a tx_buf and first BD */
12269 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12270 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12272 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12273 tx_start_bd->general_data = (mac_type <<
12274 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12276 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12278 /* remember the first BD of the packet */
12279 tx_buf->first_bd = fp->tx_bd_prod;
12283 DP(NETIF_MSG_TX_QUEUED,
12284 "sending pkt %u @%p next_idx %u bd %u @%p\n",
12285 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12288 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12289 (bp->flags & HW_VLAN_TX_FLAG)) {
12290 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12291 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12294 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12296 /* turn on parsing and get a BD */
12297 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12298 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12300 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12302 if (xmit_type & XMIT_CSUM) {
12303 hlen = (skb_network_header(skb) - skb->data) / 2;
12305 /* for now NS flag is not used in Linux */
12307 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12308 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12310 pbd->ip_hlen = (skb_transport_header(skb) -
12311 skb_network_header(skb)) / 2;
12313 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12315 pbd->total_hlen = cpu_to_le16(hlen);
12318 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12320 if (xmit_type & XMIT_CSUM_V4)
12321 tx_start_bd->bd_flags.as_bitfield |=
12322 ETH_TX_BD_FLAGS_IP_CSUM;
12324 tx_start_bd->bd_flags.as_bitfield |=
12325 ETH_TX_BD_FLAGS_IPV6;
12327 if (xmit_type & XMIT_CSUM_TCP) {
12328 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12331 s8 fix = SKB_CS_OFF(skb); /* signed! */
12333 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12335 DP(NETIF_MSG_TX_QUEUED,
12336 "hlen %d fix %d csum before fix %x\n",
12337 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12339 /* HW bug: fixup the CSUM */
12340 pbd->tcp_pseudo_csum =
12341 bnx2x_csum_fix(skb_transport_header(skb),
12344 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12345 pbd->tcp_pseudo_csum);
12349 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12350 skb_headlen(skb), DMA_TO_DEVICE);
12352 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12353 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12354 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12355 tx_start_bd->nbd = cpu_to_le16(nbd);
12356 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12357 pkt_size = tx_start_bd->nbytes;
12359 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
12360 " nbytes %d flags %x vlan %x\n",
12361 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12362 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12363 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12365 if (xmit_type & XMIT_GSO) {
12367 DP(NETIF_MSG_TX_QUEUED,
12368 "TSO packet len %d hlen %d total len %d tso size %d\n",
12369 skb->len, hlen, skb_headlen(skb),
12370 skb_shinfo(skb)->gso_size);
12372 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12374 if (unlikely(skb_headlen(skb) > hlen))
12375 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12376 hlen, bd_prod, ++nbd);
12378 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12379 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12380 pbd->tcp_flags = pbd_tcp_flags(skb);
12382 if (xmit_type & XMIT_GSO_V4) {
12383 pbd->ip_id = swab16(ip_hdr(skb)->id);
12384 pbd->tcp_pseudo_csum =
12385 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12386 ip_hdr(skb)->daddr,
12387 0, IPPROTO_TCP, 0));
12390 pbd->tcp_pseudo_csum =
12391 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12392 &ipv6_hdr(skb)->daddr,
12393 0, IPPROTO_TCP, 0));
12395 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12397 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12399 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12400 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12402 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12403 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12404 if (total_pkt_bd == NULL)
12405 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12407 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12409 frag->size, DMA_TO_DEVICE);
12411 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12412 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12413 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12414 le16_add_cpu(&pkt_size, frag->size);
12416 DP(NETIF_MSG_TX_QUEUED,
12417 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12418 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12419 le16_to_cpu(tx_data_bd->nbytes));
12422 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12424 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12426 /* now send a tx doorbell, counting the next BD
12427 * if the packet contains or ends with it
12429 if (TX_BD_POFF(bd_prod) < nbd)
12432 if (total_pkt_bd != NULL)
12433 total_pkt_bd->total_pkt_bytes = pkt_size;
12436 DP(NETIF_MSG_TX_QUEUED,
12437 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12438 " tcp_flags %x xsum %x seq %u hlen %u\n",
12439 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12440 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12441 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12443 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
12446 * Make sure that the BD data is updated before updating the producer
12447 * since FW might read the BD right after the producer is updated.
12448 * This is only applicable for weak-ordered memory model archs such
12449 * as IA-64. The following barrier is also mandatory since FW will
12450 * assumes packets must have BDs.
12454 fp->tx_db.data.prod += nbd;
12456 DOORBELL(bp, fp->index, fp->tx_db.raw);
12460 fp->tx_bd_prod += nbd;
12462 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12463 netif_tx_stop_queue(txq);
12465 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12466 * ordering of set_bit() in netif_tx_stop_queue() and read of
12467 * fp->bd_tx_cons */
12470 fp->eth_q_stats.driver_xoff++;
12471 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12472 netif_tx_wake_queue(txq);
12476 return NETDEV_TX_OK;
12479 /* called with rtnl_lock */
12480 static int bnx2x_open(struct net_device *dev)
12482 struct bnx2x *bp = netdev_priv(dev);
12484 netif_carrier_off(dev);
12486 bnx2x_set_power_state(bp, PCI_D0);
12488 if (!bnx2x_reset_is_done(bp)) {
12490 /* Reset MCP mail box sequence if there is on going
12495 /* If it's the first function to load and reset done
12496 * is still not cleared it may mean that. We don't
12497 * check the attention state here because it may have
12498 * already been cleared by a "common" reset but we
12499 * shell proceed with "process kill" anyway.
12501 if ((bnx2x_get_load_cnt(bp) == 0) &&
12502 bnx2x_trylock_hw_lock(bp,
12503 HW_LOCK_RESOURCE_RESERVED_08) &&
12504 (!bnx2x_leader_reset(bp))) {
12505 DP(NETIF_MSG_HW, "Recovered in open\n");
12509 bnx2x_set_power_state(bp, PCI_D3hot);
12511 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12512 " completed yet. Try again later. If u still see this"
12513 " message after a few retries then power cycle is"
12514 " required.\n", bp->dev->name);
12520 bp->recovery_state = BNX2X_RECOVERY_DONE;
12522 return bnx2x_nic_load(bp, LOAD_OPEN);
12525 /* called with rtnl_lock */
12526 static int bnx2x_close(struct net_device *dev)
12528 struct bnx2x *bp = netdev_priv(dev);
12530 /* Unload the driver, release IRQs */
12531 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12532 if (atomic_read(&bp->pdev->enable_cnt) == 1)
12533 if (!CHIP_REV_IS_SLOW(bp))
12534 bnx2x_set_power_state(bp, PCI_D3hot);
12539 /* called with netif_tx_lock from dev_mcast.c */
12540 static void bnx2x_set_rx_mode(struct net_device *dev)
12542 struct bnx2x *bp = netdev_priv(dev);
12543 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12544 int port = BP_PORT(bp);
12546 if (bp->state != BNX2X_STATE_OPEN) {
12547 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12551 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12553 if (dev->flags & IFF_PROMISC)
12554 rx_mode = BNX2X_RX_MODE_PROMISC;
12556 else if ((dev->flags & IFF_ALLMULTI) ||
12557 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12559 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12561 else { /* some multicasts */
12562 if (CHIP_IS_E1(bp)) {
12563 int i, old, offset;
12564 struct netdev_hw_addr *ha;
12565 struct mac_configuration_cmd *config =
12566 bnx2x_sp(bp, mcast_config);
12569 netdev_for_each_mc_addr(ha, dev) {
12570 config->config_table[i].
12571 cam_entry.msb_mac_addr =
12572 swab16(*(u16 *)&ha->addr[0]);
12573 config->config_table[i].
12574 cam_entry.middle_mac_addr =
12575 swab16(*(u16 *)&ha->addr[2]);
12576 config->config_table[i].
12577 cam_entry.lsb_mac_addr =
12578 swab16(*(u16 *)&ha->addr[4]);
12579 config->config_table[i].cam_entry.flags =
12581 config->config_table[i].
12582 target_table_entry.flags = 0;
12583 config->config_table[i].target_table_entry.
12584 clients_bit_vector =
12585 cpu_to_le32(1 << BP_L_ID(bp));
12586 config->config_table[i].
12587 target_table_entry.vlan_id = 0;
12590 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12591 config->config_table[i].
12592 cam_entry.msb_mac_addr,
12593 config->config_table[i].
12594 cam_entry.middle_mac_addr,
12595 config->config_table[i].
12596 cam_entry.lsb_mac_addr);
12599 old = config->hdr.length;
12601 for (; i < old; i++) {
12602 if (CAM_IS_INVALID(config->
12603 config_table[i])) {
12604 /* already invalidated */
12608 CAM_INVALIDATE(config->
12613 if (CHIP_REV_IS_SLOW(bp))
12614 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12616 offset = BNX2X_MAX_MULTICAST*(1 + port);
12618 config->hdr.length = i;
12619 config->hdr.offset = offset;
12620 config->hdr.client_id = bp->fp->cl_id;
12621 config->hdr.reserved1 = 0;
12623 bp->set_mac_pending++;
12626 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12627 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12628 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12631 /* Accept one or more multicasts */
12632 struct netdev_hw_addr *ha;
12633 u32 mc_filter[MC_HASH_SIZE];
12634 u32 crc, bit, regidx;
12637 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12639 netdev_for_each_mc_addr(ha, dev) {
12640 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12643 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12644 bit = (crc >> 24) & 0xff;
12647 mc_filter[regidx] |= (1 << bit);
12650 for (i = 0; i < MC_HASH_SIZE; i++)
12651 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12656 bp->rx_mode = rx_mode;
12657 bnx2x_set_storm_rx_mode(bp);
12660 /* called with rtnl_lock */
12661 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12663 struct sockaddr *addr = p;
12664 struct bnx2x *bp = netdev_priv(dev);
12666 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12669 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12670 if (netif_running(dev)) {
12671 if (CHIP_IS_E1(bp))
12672 bnx2x_set_eth_mac_addr_e1(bp, 1);
12674 bnx2x_set_eth_mac_addr_e1h(bp, 1);
12680 /* called with rtnl_lock */
12681 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12682 int devad, u16 addr)
12684 struct bnx2x *bp = netdev_priv(netdev);
12687 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12689 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12690 prtad, devad, addr);
12692 if (prtad != bp->mdio.prtad) {
12693 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12694 prtad, bp->mdio.prtad);
12698 /* The HW expects different devad if CL22 is used */
12699 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12701 bnx2x_acquire_phy_lock(bp);
12702 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12703 devad, addr, &value);
12704 bnx2x_release_phy_lock(bp);
12705 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12712 /* called with rtnl_lock */
12713 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12714 u16 addr, u16 value)
12716 struct bnx2x *bp = netdev_priv(netdev);
12717 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12720 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12721 " value 0x%x\n", prtad, devad, addr, value);
12723 if (prtad != bp->mdio.prtad) {
12724 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12725 prtad, bp->mdio.prtad);
12729 /* The HW expects different devad if CL22 is used */
12730 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12732 bnx2x_acquire_phy_lock(bp);
12733 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12734 devad, addr, value);
12735 bnx2x_release_phy_lock(bp);
12739 /* called with rtnl_lock */
12740 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12742 struct bnx2x *bp = netdev_priv(dev);
12743 struct mii_ioctl_data *mdio = if_mii(ifr);
12745 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12746 mdio->phy_id, mdio->reg_num, mdio->val_in);
12748 if (!netif_running(dev))
12751 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12754 /* called with rtnl_lock */
12755 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12757 struct bnx2x *bp = netdev_priv(dev);
12760 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12761 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12765 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12766 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12769 /* This does not race with packet allocation
12770 * because the actual alloc size is
12771 * only updated as part of load
12773 dev->mtu = new_mtu;
12775 if (netif_running(dev)) {
12776 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12777 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12783 static void bnx2x_tx_timeout(struct net_device *dev)
12785 struct bnx2x *bp = netdev_priv(dev);
12787 #ifdef BNX2X_STOP_ON_ERROR
12791 /* This allows the netif to be shutdown gracefully before resetting */
12792 schedule_delayed_work(&bp->reset_task, 0);
12796 /* called with rtnl_lock */
12797 static void bnx2x_vlan_rx_register(struct net_device *dev,
12798 struct vlan_group *vlgrp)
12800 struct bnx2x *bp = netdev_priv(dev);
12804 /* Set flags according to the required capabilities */
12805 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12807 if (dev->features & NETIF_F_HW_VLAN_TX)
12808 bp->flags |= HW_VLAN_TX_FLAG;
12810 if (dev->features & NETIF_F_HW_VLAN_RX)
12811 bp->flags |= HW_VLAN_RX_FLAG;
12813 if (netif_running(dev))
12814 bnx2x_set_client_config(bp);
12819 #ifdef CONFIG_NET_POLL_CONTROLLER
12820 static void poll_bnx2x(struct net_device *dev)
12822 struct bnx2x *bp = netdev_priv(dev);
12824 disable_irq(bp->pdev->irq);
12825 bnx2x_interrupt(bp->pdev->irq, dev);
12826 enable_irq(bp->pdev->irq);
12830 static const struct net_device_ops bnx2x_netdev_ops = {
12831 .ndo_open = bnx2x_open,
12832 .ndo_stop = bnx2x_close,
12833 .ndo_start_xmit = bnx2x_start_xmit,
12834 .ndo_set_multicast_list = bnx2x_set_rx_mode,
12835 .ndo_set_mac_address = bnx2x_change_mac_addr,
12836 .ndo_validate_addr = eth_validate_addr,
12837 .ndo_do_ioctl = bnx2x_ioctl,
12838 .ndo_change_mtu = bnx2x_change_mtu,
12839 .ndo_tx_timeout = bnx2x_tx_timeout,
12841 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12843 #ifdef CONFIG_NET_POLL_CONTROLLER
12844 .ndo_poll_controller = poll_bnx2x,
12848 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12849 struct net_device *dev)
12854 SET_NETDEV_DEV(dev, &pdev->dev);
12855 bp = netdev_priv(dev);
12860 bp->func = PCI_FUNC(pdev->devfn);
12862 rc = pci_enable_device(pdev);
12864 dev_err(&bp->pdev->dev,
12865 "Cannot enable PCI device, aborting\n");
12869 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12870 dev_err(&bp->pdev->dev,
12871 "Cannot find PCI device base address, aborting\n");
12873 goto err_out_disable;
12876 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12877 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12878 " base address, aborting\n");
12880 goto err_out_disable;
12883 if (atomic_read(&pdev->enable_cnt) == 1) {
12884 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12886 dev_err(&bp->pdev->dev,
12887 "Cannot obtain PCI resources, aborting\n");
12888 goto err_out_disable;
12891 pci_set_master(pdev);
12892 pci_save_state(pdev);
12895 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12896 if (bp->pm_cap == 0) {
12897 dev_err(&bp->pdev->dev,
12898 "Cannot find power management capability, aborting\n");
12900 goto err_out_release;
12903 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12904 if (bp->pcie_cap == 0) {
12905 dev_err(&bp->pdev->dev,
12906 "Cannot find PCI Express capability, aborting\n");
12908 goto err_out_release;
12911 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12912 bp->flags |= USING_DAC_FLAG;
12913 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12914 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12915 " failed, aborting\n");
12917 goto err_out_release;
12920 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12921 dev_err(&bp->pdev->dev,
12922 "System does not support DMA, aborting\n");
12924 goto err_out_release;
12927 dev->mem_start = pci_resource_start(pdev, 0);
12928 dev->base_addr = dev->mem_start;
12929 dev->mem_end = pci_resource_end(pdev, 0);
12931 dev->irq = pdev->irq;
12933 bp->regview = pci_ioremap_bar(pdev, 0);
12934 if (!bp->regview) {
12935 dev_err(&bp->pdev->dev,
12936 "Cannot map register space, aborting\n");
12938 goto err_out_release;
12941 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12942 min_t(u64, BNX2X_DB_SIZE,
12943 pci_resource_len(pdev, 2)));
12944 if (!bp->doorbells) {
12945 dev_err(&bp->pdev->dev,
12946 "Cannot map doorbell space, aborting\n");
12948 goto err_out_unmap;
12951 bnx2x_set_power_state(bp, PCI_D0);
12953 /* clean indirect addresses */
12954 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12955 PCICFG_VENDOR_ID_OFFSET);
12956 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12957 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12958 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12959 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
12961 /* Reset the load counter */
12962 bnx2x_clear_load_cnt(bp);
12964 dev->watchdog_timeo = TX_TIMEOUT;
12966 dev->netdev_ops = &bnx2x_netdev_ops;
12967 dev->ethtool_ops = &bnx2x_ethtool_ops;
12968 dev->features |= NETIF_F_SG;
12969 dev->features |= NETIF_F_HW_CSUM;
12970 if (bp->flags & USING_DAC_FLAG)
12971 dev->features |= NETIF_F_HIGHDMA;
12972 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12973 dev->features |= NETIF_F_TSO6;
12975 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
12976 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12978 dev->vlan_features |= NETIF_F_SG;
12979 dev->vlan_features |= NETIF_F_HW_CSUM;
12980 if (bp->flags & USING_DAC_FLAG)
12981 dev->vlan_features |= NETIF_F_HIGHDMA;
12982 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12983 dev->vlan_features |= NETIF_F_TSO6;
12986 /* get_port_hwinfo() will set prtad and mmds properly */
12987 bp->mdio.prtad = MDIO_PRTAD_NONE;
12989 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12990 bp->mdio.dev = dev;
12991 bp->mdio.mdio_read = bnx2x_mdio_read;
12992 bp->mdio.mdio_write = bnx2x_mdio_write;
12998 iounmap(bp->regview);
12999 bp->regview = NULL;
13001 if (bp->doorbells) {
13002 iounmap(bp->doorbells);
13003 bp->doorbells = NULL;
13007 if (atomic_read(&pdev->enable_cnt) == 1)
13008 pci_release_regions(pdev);
13011 pci_disable_device(pdev);
13012 pci_set_drvdata(pdev, NULL);
13018 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13019 int *width, int *speed)
13021 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13023 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13025 /* return value of 1=2.5GHz 2=5GHz */
13026 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13029 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13031 const struct firmware *firmware = bp->firmware;
13032 struct bnx2x_fw_file_hdr *fw_hdr;
13033 struct bnx2x_fw_file_section *sections;
13034 u32 offset, len, num_ops;
13039 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13042 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13043 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13045 /* Make sure none of the offsets and sizes make us read beyond
13046 * the end of the firmware data */
13047 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13048 offset = be32_to_cpu(sections[i].offset);
13049 len = be32_to_cpu(sections[i].len);
13050 if (offset + len > firmware->size) {
13051 dev_err(&bp->pdev->dev,
13052 "Section %d length is out of bounds\n", i);
13057 /* Likewise for the init_ops offsets */
13058 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13059 ops_offsets = (u16 *)(firmware->data + offset);
13060 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13062 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13063 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13064 dev_err(&bp->pdev->dev,
13065 "Section offset %d is out of bounds\n", i);
13070 /* Check FW version */
13071 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13072 fw_ver = firmware->data + offset;
13073 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13074 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13075 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13076 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13077 dev_err(&bp->pdev->dev,
13078 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13079 fw_ver[0], fw_ver[1], fw_ver[2],
13080 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13081 BCM_5710_FW_MINOR_VERSION,
13082 BCM_5710_FW_REVISION_VERSION,
13083 BCM_5710_FW_ENGINEERING_VERSION);
13090 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13092 const __be32 *source = (const __be32 *)_source;
13093 u32 *target = (u32 *)_target;
13096 for (i = 0; i < n/4; i++)
13097 target[i] = be32_to_cpu(source[i]);
13101 Ops array is stored in the following format:
13102 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13104 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13106 const __be32 *source = (const __be32 *)_source;
13107 struct raw_op *target = (struct raw_op *)_target;
13110 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13111 tmp = be32_to_cpu(source[j]);
13112 target[i].op = (tmp >> 24) & 0xff;
13113 target[i].offset = tmp & 0xffffff;
13114 target[i].raw_data = be32_to_cpu(source[j + 1]);
13118 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13120 const __be16 *source = (const __be16 *)_source;
13121 u16 *target = (u16 *)_target;
13124 for (i = 0; i < n/2; i++)
13125 target[i] = be16_to_cpu(source[i]);
13128 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13130 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13131 bp->arr = kmalloc(len, GFP_KERNEL); \
13133 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13136 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13137 (u8 *)bp->arr, len); \
13140 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13142 const char *fw_file_name;
13143 struct bnx2x_fw_file_hdr *fw_hdr;
13146 if (CHIP_IS_E1(bp))
13147 fw_file_name = FW_FILE_NAME_E1;
13148 else if (CHIP_IS_E1H(bp))
13149 fw_file_name = FW_FILE_NAME_E1H;
13151 dev_err(dev, "Unsupported chip revision\n");
13155 dev_info(dev, "Loading %s\n", fw_file_name);
13157 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13159 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13160 goto request_firmware_exit;
13163 rc = bnx2x_check_firmware(bp);
13165 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13166 goto request_firmware_exit;
13169 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13171 /* Initialize the pointers to the init arrays */
13173 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13176 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13179 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13182 /* STORMs firmware */
13183 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13184 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13185 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13186 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13187 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13188 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13189 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13190 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13191 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13192 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13193 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13194 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13195 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13196 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13197 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13198 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13202 init_offsets_alloc_err:
13203 kfree(bp->init_ops);
13204 init_ops_alloc_err:
13205 kfree(bp->init_data);
13206 request_firmware_exit:
13207 release_firmware(bp->firmware);
13213 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13214 const struct pci_device_id *ent)
13216 struct net_device *dev = NULL;
13218 int pcie_width, pcie_speed;
13221 /* dev zeroed in init_etherdev */
13222 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13224 dev_err(&pdev->dev, "Cannot allocate net device\n");
13228 bp = netdev_priv(dev);
13229 bp->msg_enable = debug;
13231 pci_set_drvdata(pdev, dev);
13233 rc = bnx2x_init_dev(pdev, dev);
13239 rc = bnx2x_init_bp(bp);
13241 goto init_one_exit;
13243 /* Set init arrays */
13244 rc = bnx2x_init_firmware(bp, &pdev->dev);
13246 dev_err(&pdev->dev, "Error loading firmware\n");
13247 goto init_one_exit;
13250 rc = register_netdev(dev);
13252 dev_err(&pdev->dev, "Cannot register net device\n");
13253 goto init_one_exit;
13256 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13257 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13258 " IRQ %d, ", board_info[ent->driver_data].name,
13259 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13260 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13261 dev->base_addr, bp->pdev->irq);
13262 pr_cont("node addr %pM\n", dev->dev_addr);
13268 iounmap(bp->regview);
13271 iounmap(bp->doorbells);
13275 if (atomic_read(&pdev->enable_cnt) == 1)
13276 pci_release_regions(pdev);
13278 pci_disable_device(pdev);
13279 pci_set_drvdata(pdev, NULL);
13284 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13286 struct net_device *dev = pci_get_drvdata(pdev);
13290 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13293 bp = netdev_priv(dev);
13295 unregister_netdev(dev);
13297 /* Make sure RESET task is not scheduled before continuing */
13298 cancel_delayed_work_sync(&bp->reset_task);
13300 kfree(bp->init_ops_offsets);
13301 kfree(bp->init_ops);
13302 kfree(bp->init_data);
13303 release_firmware(bp->firmware);
13306 iounmap(bp->regview);
13309 iounmap(bp->doorbells);
13313 if (atomic_read(&pdev->enable_cnt) == 1)
13314 pci_release_regions(pdev);
13316 pci_disable_device(pdev);
13317 pci_set_drvdata(pdev, NULL);
13320 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13322 struct net_device *dev = pci_get_drvdata(pdev);
13326 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13329 bp = netdev_priv(dev);
13333 pci_save_state(pdev);
13335 if (!netif_running(dev)) {
13340 netif_device_detach(dev);
13342 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13344 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13351 static int bnx2x_resume(struct pci_dev *pdev)
13353 struct net_device *dev = pci_get_drvdata(pdev);
13358 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13361 bp = netdev_priv(dev);
13363 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13364 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13370 pci_restore_state(pdev);
13372 if (!netif_running(dev)) {
13377 bnx2x_set_power_state(bp, PCI_D0);
13378 netif_device_attach(dev);
13380 rc = bnx2x_nic_load(bp, LOAD_OPEN);
13387 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13391 bp->state = BNX2X_STATE_ERROR;
13393 bp->rx_mode = BNX2X_RX_MODE_NONE;
13395 bnx2x_netif_stop(bp, 0);
13397 del_timer_sync(&bp->timer);
13398 bp->stats_state = STATS_STATE_DISABLED;
13399 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13402 bnx2x_free_irq(bp, false);
13404 if (CHIP_IS_E1(bp)) {
13405 struct mac_configuration_cmd *config =
13406 bnx2x_sp(bp, mcast_config);
13408 for (i = 0; i < config->hdr.length; i++)
13409 CAM_INVALIDATE(config->config_table[i]);
13412 /* Free SKBs, SGEs, TPA pool and driver internals */
13413 bnx2x_free_skbs(bp);
13414 for_each_queue(bp, i)
13415 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13416 for_each_queue(bp, i)
13417 netif_napi_del(&bnx2x_fp(bp, i, napi));
13418 bnx2x_free_mem(bp);
13420 bp->state = BNX2X_STATE_CLOSED;
13422 netif_carrier_off(bp->dev);
13427 static void bnx2x_eeh_recover(struct bnx2x *bp)
13431 mutex_init(&bp->port.phy_mutex);
13433 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13434 bp->link_params.shmem_base = bp->common.shmem_base;
13435 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13437 if (!bp->common.shmem_base ||
13438 (bp->common.shmem_base < 0xA0000) ||
13439 (bp->common.shmem_base >= 0xC0000)) {
13440 BNX2X_DEV_INFO("MCP not active\n");
13441 bp->flags |= NO_MCP_FLAG;
13445 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13446 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13447 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13448 BNX2X_ERR("BAD MCP validity signature\n");
13450 if (!BP_NOMCP(bp)) {
13451 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13452 & DRV_MSG_SEQ_NUMBER_MASK);
13453 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13458 * bnx2x_io_error_detected - called when PCI error is detected
13459 * @pdev: Pointer to PCI device
13460 * @state: The current pci connection state
13462 * This function is called after a PCI bus error affecting
13463 * this device has been detected.
13465 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13466 pci_channel_state_t state)
13468 struct net_device *dev = pci_get_drvdata(pdev);
13469 struct bnx2x *bp = netdev_priv(dev);
13473 netif_device_detach(dev);
13475 if (state == pci_channel_io_perm_failure) {
13477 return PCI_ERS_RESULT_DISCONNECT;
13480 if (netif_running(dev))
13481 bnx2x_eeh_nic_unload(bp);
13483 pci_disable_device(pdev);
13487 /* Request a slot reset */
13488 return PCI_ERS_RESULT_NEED_RESET;
13492 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13493 * @pdev: Pointer to PCI device
13495 * Restart the card from scratch, as if from a cold-boot.
13497 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13499 struct net_device *dev = pci_get_drvdata(pdev);
13500 struct bnx2x *bp = netdev_priv(dev);
13504 if (pci_enable_device(pdev)) {
13505 dev_err(&pdev->dev,
13506 "Cannot re-enable PCI device after reset\n");
13508 return PCI_ERS_RESULT_DISCONNECT;
13511 pci_set_master(pdev);
13512 pci_restore_state(pdev);
13514 if (netif_running(dev))
13515 bnx2x_set_power_state(bp, PCI_D0);
13519 return PCI_ERS_RESULT_RECOVERED;
13523 * bnx2x_io_resume - called when traffic can start flowing again
13524 * @pdev: Pointer to PCI device
13526 * This callback is called when the error recovery driver tells us that
13527 * its OK to resume normal operation.
13529 static void bnx2x_io_resume(struct pci_dev *pdev)
13531 struct net_device *dev = pci_get_drvdata(pdev);
13532 struct bnx2x *bp = netdev_priv(dev);
13534 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13535 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13541 bnx2x_eeh_recover(bp);
13543 if (netif_running(dev))
13544 bnx2x_nic_load(bp, LOAD_NORMAL);
13546 netif_device_attach(dev);
13551 static struct pci_error_handlers bnx2x_err_handler = {
13552 .error_detected = bnx2x_io_error_detected,
13553 .slot_reset = bnx2x_io_slot_reset,
13554 .resume = bnx2x_io_resume,
13557 static struct pci_driver bnx2x_pci_driver = {
13558 .name = DRV_MODULE_NAME,
13559 .id_table = bnx2x_pci_tbl,
13560 .probe = bnx2x_init_one,
13561 .remove = __devexit_p(bnx2x_remove_one),
13562 .suspend = bnx2x_suspend,
13563 .resume = bnx2x_resume,
13564 .err_handler = &bnx2x_err_handler,
13567 static int __init bnx2x_init(void)
13571 pr_info("%s", version);
13573 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13574 if (bnx2x_wq == NULL) {
13575 pr_err("Cannot create workqueue\n");
13579 ret = pci_register_driver(&bnx2x_pci_driver);
13581 pr_err("Cannot register driver\n");
13582 destroy_workqueue(bnx2x_wq);
13587 static void __exit bnx2x_cleanup(void)
13589 pci_unregister_driver(&bnx2x_pci_driver);
13591 destroy_workqueue(bnx2x_wq);
13594 module_init(bnx2x_init);
13595 module_exit(bnx2x_cleanup);
13599 /* count denotes the number of new completions we have seen */
13600 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13602 struct eth_spe *spe;
13604 #ifdef BNX2X_STOP_ON_ERROR
13605 if (unlikely(bp->panic))
13609 spin_lock_bh(&bp->spq_lock);
13610 bp->cnic_spq_pending -= count;
13612 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13613 bp->cnic_spq_pending++) {
13615 if (!bp->cnic_kwq_pending)
13618 spe = bnx2x_sp_get_next(bp);
13619 *spe = *bp->cnic_kwq_cons;
13621 bp->cnic_kwq_pending--;
13623 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13624 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13626 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13627 bp->cnic_kwq_cons = bp->cnic_kwq;
13629 bp->cnic_kwq_cons++;
13631 bnx2x_sp_prod_update(bp);
13632 spin_unlock_bh(&bp->spq_lock);
13635 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13636 struct kwqe_16 *kwqes[], u32 count)
13638 struct bnx2x *bp = netdev_priv(dev);
13641 #ifdef BNX2X_STOP_ON_ERROR
13642 if (unlikely(bp->panic))
13646 spin_lock_bh(&bp->spq_lock);
13648 for (i = 0; i < count; i++) {
13649 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13651 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13654 *bp->cnic_kwq_prod = *spe;
13656 bp->cnic_kwq_pending++;
13658 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13659 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13660 spe->data.mac_config_addr.hi,
13661 spe->data.mac_config_addr.lo,
13662 bp->cnic_kwq_pending);
13664 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13665 bp->cnic_kwq_prod = bp->cnic_kwq;
13667 bp->cnic_kwq_prod++;
13670 spin_unlock_bh(&bp->spq_lock);
13672 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13673 bnx2x_cnic_sp_post(bp, 0);
13678 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13680 struct cnic_ops *c_ops;
13683 mutex_lock(&bp->cnic_mutex);
13684 c_ops = bp->cnic_ops;
13686 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13687 mutex_unlock(&bp->cnic_mutex);
13692 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13694 struct cnic_ops *c_ops;
13698 c_ops = rcu_dereference(bp->cnic_ops);
13700 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13707 * for commands that have no data
13709 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13711 struct cnic_ctl_info ctl = {0};
13715 return bnx2x_cnic_ctl_send(bp, &ctl);
13718 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13720 struct cnic_ctl_info ctl;
13722 /* first we tell CNIC and only then we count this as a completion */
13723 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13724 ctl.data.comp.cid = cid;
13726 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13727 bnx2x_cnic_sp_post(bp, 1);
13730 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13732 struct bnx2x *bp = netdev_priv(dev);
13735 switch (ctl->cmd) {
13736 case DRV_CTL_CTXTBL_WR_CMD: {
13737 u32 index = ctl->data.io.offset;
13738 dma_addr_t addr = ctl->data.io.dma_addr;
13740 bnx2x_ilt_wr(bp, index, addr);
13744 case DRV_CTL_COMPLETION_CMD: {
13745 int count = ctl->data.comp.comp_count;
13747 bnx2x_cnic_sp_post(bp, count);
13751 /* rtnl_lock is held. */
13752 case DRV_CTL_START_L2_CMD: {
13753 u32 cli = ctl->data.ring.client_id;
13755 bp->rx_mode_cl_mask |= (1 << cli);
13756 bnx2x_set_storm_rx_mode(bp);
13760 /* rtnl_lock is held. */
13761 case DRV_CTL_STOP_L2_CMD: {
13762 u32 cli = ctl->data.ring.client_id;
13764 bp->rx_mode_cl_mask &= ~(1 << cli);
13765 bnx2x_set_storm_rx_mode(bp);
13770 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13777 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13779 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13781 if (bp->flags & USING_MSIX_FLAG) {
13782 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13783 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13784 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13786 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13787 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13789 cp->irq_arr[0].status_blk = bp->cnic_sb;
13790 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13791 cp->irq_arr[1].status_blk = bp->def_status_blk;
13792 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13797 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13800 struct bnx2x *bp = netdev_priv(dev);
13801 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13806 if (atomic_read(&bp->intr_sem) != 0)
13809 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13813 bp->cnic_kwq_cons = bp->cnic_kwq;
13814 bp->cnic_kwq_prod = bp->cnic_kwq;
13815 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13817 bp->cnic_spq_pending = 0;
13818 bp->cnic_kwq_pending = 0;
13820 bp->cnic_data = data;
13823 cp->drv_state = CNIC_DRV_STATE_REGD;
13825 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13827 bnx2x_setup_cnic_irq_info(bp);
13828 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13829 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13830 rcu_assign_pointer(bp->cnic_ops, ops);
13835 static int bnx2x_unregister_cnic(struct net_device *dev)
13837 struct bnx2x *bp = netdev_priv(dev);
13838 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13840 mutex_lock(&bp->cnic_mutex);
13841 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13842 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13843 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13846 rcu_assign_pointer(bp->cnic_ops, NULL);
13847 mutex_unlock(&bp->cnic_mutex);
13849 kfree(bp->cnic_kwq);
13850 bp->cnic_kwq = NULL;
13855 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13857 struct bnx2x *bp = netdev_priv(dev);
13858 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13860 cp->drv_owner = THIS_MODULE;
13861 cp->chip_id = CHIP_ID(bp);
13862 cp->pdev = bp->pdev;
13863 cp->io_base = bp->regview;
13864 cp->io_base2 = bp->doorbells;
13865 cp->max_kwqe_pending = 8;
13866 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13867 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13868 cp->ctx_tbl_len = CNIC_ILT_LINES;
13869 cp->starting_cid = BCM_CNIC_CID_START;
13870 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13871 cp->drv_ctl = bnx2x_drv_ctl;
13872 cp->drv_register_cnic = bnx2x_register_cnic;
13873 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13877 EXPORT_SYMBOL(bnx2x_cnic_probe);
13879 #endif /* BCM_CNIC */