1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.53-1"
61 #define DRV_MODULE_RELDATE "2010/18/04"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version[] __devinitdata =
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
126 static struct workqueue_struct *bnx2x_wq;
128 enum bnx2x_board_type {
134 /* indexed by board_type, above */
137 } board_info[] __devinitdata = {
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
158 * locking is done by mcp
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
180 static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
207 struct dmae_command dmae;
208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
220 memset(&dmae, 0, sizeof(struct dmae_command));
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
228 DMAE_CMD_ENDIANITY_DW_SWAP |
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
252 mutex_lock(&bp->dmae_mutex);
256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
264 BNX2X_ERR("DMAE timeout!\n");
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
275 mutex_unlock(&bp->dmae_mutex);
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
280 struct dmae_command dmae;
281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
295 memset(&dmae, 0, sizeof(struct dmae_command));
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
303 DMAE_CMD_ENDIANITY_DW_SWAP |
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
324 mutex_lock(&bp->dmae_mutex);
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
333 while (*wb_comp != DMAE_COMP_VAL) {
336 BNX2X_ERR("DMAE timeout!\n");
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
350 mutex_unlock(&bp->dmae_mutex);
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
359 while (len > dmae_wr_max) {
360 bnx2x_write_dmae(bp, phys_addr + offset,
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
384 REG_RD_DMAE(bp, reg, wb_data, 2);
386 return HILO_U64(wb_data[0], wb_data[1]);
390 static int bnx2x_mc_assert(struct bnx2x *bp)
394 u32 row0, row1, row2, row3;
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
511 static void bnx2x_fw_dump(struct bnx2x *bp)
519 BNX2X_ERR("NO MCP - can not dump\n");
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526 pr_err("begin fw dump (mark 0x%x)\n", mark);
529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530 for (word = 0; word < 8; word++)
531 data[word] = htonl(REG_RD(bp, offset + 4*word));
533 pr_cont("%s", (char *)data);
535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536 for (word = 0; word < 8; word++)
537 data[word] = htonl(REG_RD(bp, offset + 4*word));
539 pr_cont("%s", (char *)data);
541 pr_err("end of fw dump\n");
544 static void bnx2x_panic_dump(struct bnx2x *bp)
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
552 BNX2X_ERR("begin crash dump -----------------\n");
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
563 for_each_queue(bp, i) {
564 struct bnx2x_fastpath *fp = &bp->fp[i];
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
569 i, fp->rx_bd_prod, fp->rx_bd_cons,
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
580 for_each_queue(bp, i) {
581 struct bnx2x_fastpath *fp = &bp->fp[i];
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590 fp->status_blk->c_status_block.status_block_index,
591 fp->tx_db.data.prod);
596 for_each_queue(bp, i) {
597 struct bnx2x_fastpath *fp = &bp->fp[i];
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601 for (j = start; j != end; j = RX_BD(j + 1)) {
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
611 for (j = start; j != end; j = RX_SGE(j + 1)) {
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
630 for_each_queue(bp, i) {
631 struct bnx2x_fastpath *fp = &bp->fp[i];
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
654 BNX2X_ERR("end crash dump -----------------\n");
657 static void bnx2x_int_enable(struct bnx2x *bp)
659 int port = BP_PORT(bp);
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
684 REG_WR(bp, addr, val);
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
692 REG_WR(bp, addr, val);
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
704 /* enable nig and gpio3 attention */
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
713 /* Make sure that interrupts are indeed enabled from here on */
717 static void bnx2x_int_disable(struct bnx2x *bp)
719 int port = BP_PORT(bp);
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
731 /* flush all outstanding writes */
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
744 /* disable interrupt handling */
745 atomic_inc(&bp->intr_sem);
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
752 /* make sure all ISRs are done */
754 synchronize_irq(bp->msix_table[0].vector);
759 for_each_queue(bp, i)
760 synchronize_irq(bp->msix_table[i + offset].vector);
762 synchronize_irq(bp->pdev->irq);
764 /* make sure sp_task is not running */
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
772 * General service functions
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810 u8 storm, u16 index, u8 op, u8 update)
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
814 struct igu_ack_register igu_ack;
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
827 /* Make sure that ACK is written */
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
834 struct host_status_block *fpsb = fp->status_blk;
836 barrier(); /* status block is written to by the chip */
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
855 * fast path service functions
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
860 /* Tell compiler that consumer and producer can change */
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
865 /* free skb in the packet ring at pos idx
866 * return idx of last bd freed
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
874 struct sk_buff *skb = tx_buf->skb;
875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893 BNX2X_ERR("BAD nbd!\n");
897 new_cons = nbd + tx_buf->first_bd;
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
902 /* Skip a parse bd... */
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
926 tx_buf->first_bd = 0;
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
945 #ifdef BNX2X_STOP_ON_ERROR
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
951 return (s16)(fp->bp->tx_ring_size) - used;
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
958 /* Tell compiler that status block fields can change */
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
966 struct bnx2x *bp = fp->bp;
967 struct netdev_queue *txq;
968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
970 #ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
975 txq = netdev_get_tx_queue(bp->dev, fp->index);
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
979 while (sw_cons != hw_cons) {
982 pkt_cons = TX_BD(sw_cons);
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
987 hw_cons, sw_cons, pkt_cons);
989 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1009 /* TBD need a thresh? */
1010 if (unlikely(netif_tx_queue_stopped(txq))) {
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1021 __netif_tx_lock(txq, smp_processor_id());
1023 if ((netif_tx_queue_stopped(txq)) &&
1024 (bp->state == BNX2X_STATE_OPEN) &&
1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026 netif_tx_wake_queue(txq);
1028 __netif_tx_unlock(txq);
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1046 fp->index, cid, command, bp->state,
1047 rr_cqe->ramrod_cqe.ramrod_type);
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1057 fp->state = BNX2X_FP_STATE_OPEN;
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1063 fp->state = BNX2X_FP_STATE_HALTED;
1067 BNX2X_ERR("unexpected MC reply (%d) "
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103 bp->set_mac_pending--;
1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109 bp->set_mac_pending--;
1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1115 command, bp->state);
1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1128 /* Skip "next page" elements */
1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1136 sw_buf->page = NULL;
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1158 if (unlikely(page == NULL))
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1168 sw_buf->page = page;
1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1205 /* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
1226 *prod_bd = *cons_bd;
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1232 u16 last_max = fp->last_max_sge;
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1255 struct bnx2x *bp = fp->bp;
1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
1259 u16 last_max, last_elem, first_elem;
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1346 #ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1353 fp->tpa_queue_used);
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1362 struct sw_rx_page *rx_pg, old_rx_pg;
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1371 /* This is needed in order to enable forwarding support */
1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374 max(frag_size, (u32)len_on_bd));
1376 #ifdef BNX2X_STOP_ON_ERROR
1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394 rx_pg = &fp->rx_page_ring[sge_idx];
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1405 /* Unmap the page as we r going to pass it to the stack */
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1417 frag_size -= frag_len;
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1438 if (likely(new_skb)) {
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1450 prefetch(((char *)(skb)) + 128);
1452 #ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1462 skb_reserve(skb, pad);
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 iph = (struct iphdr *)skb->data;
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1492 napi_gro_receive(&fp->napi, skb);
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1504 /* else drop the packet and keep the buffer in the bin */
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
1507 fp->eth_q_stats.rx_skb_alloc_failed++;
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1518 struct ustorm_eth_rx_producers rx_prods = {0};
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539 ((u32 *)&rx_prods)[i]);
1541 mmiowb(); /* keep prod updates ordered */
1543 DP(NETIF_MSG_RX_STATUS,
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1548 /* Set Toeplitz hash value in the skb using the value from the
1549 * CQE (calculated by HW).
1551 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
1552 struct sk_buff *skb)
1554 /* Set Toeplitz hash from CQE */
1555 if ((bp->dev->features & NETIF_F_RXHASH) &&
1556 (cqe->fast_path_cqe.status_flags &
1557 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1559 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
1562 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1564 struct bnx2x *bp = fp->bp;
1565 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1566 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1569 #ifdef BNX2X_STOP_ON_ERROR
1570 if (unlikely(bp->panic))
1574 /* CQ "next element" is of the size of the regular element,
1575 that's why it's ok here */
1576 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1577 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1580 bd_cons = fp->rx_bd_cons;
1581 bd_prod = fp->rx_bd_prod;
1582 bd_prod_fw = bd_prod;
1583 sw_comp_cons = fp->rx_comp_cons;
1584 sw_comp_prod = fp->rx_comp_prod;
1586 /* Memory barrier necessary as speculative reads of the rx
1587 * buffer can be ahead of the index in the status block
1591 DP(NETIF_MSG_RX_STATUS,
1592 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1593 fp->index, hw_comp_cons, sw_comp_cons);
1595 while (sw_comp_cons != hw_comp_cons) {
1596 struct sw_rx_bd *rx_buf = NULL;
1597 struct sk_buff *skb;
1598 union eth_rx_cqe *cqe;
1602 comp_ring_cons = RCQ_BD(sw_comp_cons);
1603 bd_prod = RX_BD(bd_prod);
1604 bd_cons = RX_BD(bd_cons);
1606 /* Prefetch the page containing the BD descriptor
1607 at producer's index. It will be needed when new skb is
1609 prefetch((void *)(PAGE_ALIGN((unsigned long)
1610 (&fp->rx_desc_ring[bd_prod])) -
1613 cqe = &fp->rx_comp_ring[comp_ring_cons];
1614 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1616 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1617 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1618 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1619 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1620 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1621 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1623 /* is this a slowpath msg? */
1624 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1625 bnx2x_sp_event(fp, cqe);
1628 /* this is an rx packet */
1630 rx_buf = &fp->rx_buf_ring[bd_cons];
1633 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1634 pad = cqe->fast_path_cqe.placement_offset;
1636 /* If CQE is marked both TPA_START and TPA_END
1637 it is a non-TPA CQE */
1638 if ((!fp->disable_tpa) &&
1639 (TPA_TYPE(cqe_fp_flags) !=
1640 (TPA_TYPE_START | TPA_TYPE_END))) {
1641 u16 queue = cqe->fast_path_cqe.queue_index;
1643 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1644 DP(NETIF_MSG_RX_STATUS,
1645 "calling tpa_start on queue %d\n",
1648 bnx2x_tpa_start(fp, queue, skb,
1651 /* Set Toeplitz hash for an LRO skb */
1652 bnx2x_set_skb_rxhash(bp, cqe, skb);
1657 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1658 DP(NETIF_MSG_RX_STATUS,
1659 "calling tpa_stop on queue %d\n",
1662 if (!BNX2X_RX_SUM_FIX(cqe))
1663 BNX2X_ERR("STOP on none TCP "
1666 /* This is a size of the linear data
1668 len = le16_to_cpu(cqe->fast_path_cqe.
1670 bnx2x_tpa_stop(bp, fp, queue, pad,
1671 len, cqe, comp_ring_cons);
1672 #ifdef BNX2X_STOP_ON_ERROR
1677 bnx2x_update_sge_prod(fp,
1678 &cqe->fast_path_cqe);
1683 dma_sync_single_for_device(&bp->pdev->dev,
1684 dma_unmap_addr(rx_buf, mapping),
1685 pad + RX_COPY_THRESH,
1687 prefetch(((char *)(skb)) + 128);
1689 /* is this an error packet? */
1690 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1691 DP(NETIF_MSG_RX_ERR,
1692 "ERROR flags %x rx packet %u\n",
1693 cqe_fp_flags, sw_comp_cons);
1694 fp->eth_q_stats.rx_err_discard_pkt++;
1698 /* Since we don't have a jumbo ring
1699 * copy small packets if mtu > 1500
1701 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1702 (len <= RX_COPY_THRESH)) {
1703 struct sk_buff *new_skb;
1705 new_skb = netdev_alloc_skb(bp->dev,
1707 if (new_skb == NULL) {
1708 DP(NETIF_MSG_RX_ERR,
1709 "ERROR packet dropped "
1710 "because of alloc failure\n");
1711 fp->eth_q_stats.rx_skb_alloc_failed++;
1716 skb_copy_from_linear_data_offset(skb, pad,
1717 new_skb->data + pad, len);
1718 skb_reserve(new_skb, pad);
1719 skb_put(new_skb, len);
1721 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1726 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1727 dma_unmap_single(&bp->pdev->dev,
1728 dma_unmap_addr(rx_buf, mapping),
1731 skb_reserve(skb, pad);
1735 DP(NETIF_MSG_RX_ERR,
1736 "ERROR packet dropped because "
1737 "of alloc failure\n");
1738 fp->eth_q_stats.rx_skb_alloc_failed++;
1740 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1744 skb->protocol = eth_type_trans(skb, bp->dev);
1746 /* Set Toeplitz hash for a none-LRO skb */
1747 bnx2x_set_skb_rxhash(bp, cqe, skb);
1749 skb->ip_summed = CHECKSUM_NONE;
1751 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1752 skb->ip_summed = CHECKSUM_UNNECESSARY;
1754 fp->eth_q_stats.hw_csum_err++;
1758 skb_record_rx_queue(skb, fp->index);
1761 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1762 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1763 PARSING_FLAGS_VLAN))
1764 vlan_gro_receive(&fp->napi, bp->vlgrp,
1765 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1768 napi_gro_receive(&fp->napi, skb);
1774 bd_cons = NEXT_RX_IDX(bd_cons);
1775 bd_prod = NEXT_RX_IDX(bd_prod);
1776 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1779 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1780 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1782 if (rx_pkt == budget)
1786 fp->rx_bd_cons = bd_cons;
1787 fp->rx_bd_prod = bd_prod_fw;
1788 fp->rx_comp_cons = sw_comp_cons;
1789 fp->rx_comp_prod = sw_comp_prod;
1791 /* Update producers */
1792 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1795 fp->rx_pkt += rx_pkt;
1801 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1803 struct bnx2x_fastpath *fp = fp_cookie;
1804 struct bnx2x *bp = fp->bp;
1806 /* Return here if interrupt is disabled */
1807 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1808 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1812 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1813 fp->index, fp->sb_id);
1814 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1816 #ifdef BNX2X_STOP_ON_ERROR
1817 if (unlikely(bp->panic))
1821 /* Handle Rx and Tx according to MSI-X vector */
1822 prefetch(fp->rx_cons_sb);
1823 prefetch(fp->tx_cons_sb);
1824 prefetch(&fp->status_blk->u_status_block.status_block_index);
1825 prefetch(&fp->status_blk->c_status_block.status_block_index);
1826 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1831 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1833 struct bnx2x *bp = netdev_priv(dev_instance);
1834 u16 status = bnx2x_ack_int(bp);
1838 /* Return here if interrupt is shared and it's not for us */
1839 if (unlikely(status == 0)) {
1840 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1843 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1845 /* Return here if interrupt is disabled */
1846 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1847 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1851 #ifdef BNX2X_STOP_ON_ERROR
1852 if (unlikely(bp->panic))
1856 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1857 struct bnx2x_fastpath *fp = &bp->fp[i];
1859 mask = 0x2 << fp->sb_id;
1860 if (status & mask) {
1861 /* Handle Rx and Tx according to SB id */
1862 prefetch(fp->rx_cons_sb);
1863 prefetch(&fp->status_blk->u_status_block.
1864 status_block_index);
1865 prefetch(fp->tx_cons_sb);
1866 prefetch(&fp->status_blk->c_status_block.
1867 status_block_index);
1868 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1874 mask = 0x2 << CNIC_SB_ID(bp);
1875 if (status & (mask | 0x1)) {
1876 struct cnic_ops *c_ops = NULL;
1879 c_ops = rcu_dereference(bp->cnic_ops);
1881 c_ops->cnic_handler(bp->cnic_data, NULL);
1888 if (unlikely(status & 0x1)) {
1889 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1896 if (unlikely(status))
1897 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1903 /* end of fast path */
1905 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1910 * General service functions
1913 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1916 u32 resource_bit = (1 << resource);
1917 int func = BP_FUNC(bp);
1918 u32 hw_lock_control_reg;
1921 /* Validating that the resource is within range */
1922 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1924 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1925 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1930 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1932 hw_lock_control_reg =
1933 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1936 /* Validating that the resource is not already taken */
1937 lock_status = REG_RD(bp, hw_lock_control_reg);
1938 if (lock_status & resource_bit) {
1939 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1940 lock_status, resource_bit);
1944 /* Try for 5 second every 5ms */
1945 for (cnt = 0; cnt < 1000; cnt++) {
1946 /* Try to acquire the lock */
1947 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1948 lock_status = REG_RD(bp, hw_lock_control_reg);
1949 if (lock_status & resource_bit)
1954 DP(NETIF_MSG_HW, "Timeout\n");
1958 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1961 u32 resource_bit = (1 << resource);
1962 int func = BP_FUNC(bp);
1963 u32 hw_lock_control_reg;
1965 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1967 /* Validating that the resource is within range */
1968 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1970 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1971 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1976 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1978 hw_lock_control_reg =
1979 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1982 /* Validating that the resource is currently taken */
1983 lock_status = REG_RD(bp, hw_lock_control_reg);
1984 if (!(lock_status & resource_bit)) {
1985 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1986 lock_status, resource_bit);
1990 REG_WR(bp, hw_lock_control_reg, resource_bit);
1994 /* HW Lock for shared dual port PHYs */
1995 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1997 mutex_lock(&bp->port.phy_mutex);
1999 if (bp->port.need_hw_lock)
2000 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
2003 static void bnx2x_release_phy_lock(struct bnx2x *bp)
2005 if (bp->port.need_hw_lock)
2006 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
2008 mutex_unlock(&bp->port.phy_mutex);
2011 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2013 /* The GPIO should be swapped if swap register is set and active */
2014 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2015 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2016 int gpio_shift = gpio_num +
2017 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2018 u32 gpio_mask = (1 << gpio_shift);
2022 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2023 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2027 /* read GPIO value */
2028 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2030 /* get the requested pin value */
2031 if ((gpio_reg & gpio_mask) == gpio_mask)
2036 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2041 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2043 /* The GPIO should be swapped if swap register is set and active */
2044 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046 int gpio_shift = gpio_num +
2047 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048 u32 gpio_mask = (1 << gpio_shift);
2051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057 /* read GPIO and mask except the float bits */
2058 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2061 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2062 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2063 gpio_num, gpio_shift);
2064 /* clear FLOAT and set CLR */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2069 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2070 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2071 gpio_num, gpio_shift);
2072 /* clear FLOAT and set SET */
2073 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2077 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2078 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2079 gpio_num, gpio_shift);
2081 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2088 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2089 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2094 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2096 /* The GPIO should be swapped if swap register is set and active */
2097 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2098 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2099 int gpio_shift = gpio_num +
2100 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2101 u32 gpio_mask = (1 << gpio_shift);
2104 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2105 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2109 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2111 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2114 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2115 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2116 "output low\n", gpio_num, gpio_shift);
2117 /* clear SET and set CLR */
2118 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2119 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2122 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2123 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2124 "output high\n", gpio_num, gpio_shift);
2125 /* clear CLR and set SET */
2126 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2127 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2134 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2135 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2140 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2142 u32 spio_mask = (1 << spio_num);
2145 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2146 (spio_num > MISC_REGISTERS_SPIO_7)) {
2147 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2151 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2152 /* read SPIO and mask except the float bits */
2153 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2156 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2157 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2158 /* clear FLOAT and set CLR */
2159 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2160 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2163 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2164 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2165 /* clear FLOAT and set SET */
2166 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2167 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2170 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2171 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2173 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2180 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2181 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2186 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2188 switch (bp->link_vars.ieee_fc &
2189 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2190 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2191 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2195 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2196 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2200 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2201 bp->port.advertising |= ADVERTISED_Asym_Pause;
2205 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2211 static void bnx2x_link_report(struct bnx2x *bp)
2213 if (bp->flags & MF_FUNC_DIS) {
2214 netif_carrier_off(bp->dev);
2215 netdev_err(bp->dev, "NIC Link is Down\n");
2219 if (bp->link_vars.link_up) {
2222 if (bp->state == BNX2X_STATE_OPEN)
2223 netif_carrier_on(bp->dev);
2224 netdev_info(bp->dev, "NIC Link is Up, ");
2226 line_speed = bp->link_vars.line_speed;
2231 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2232 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2233 if (vn_max_rate < line_speed)
2234 line_speed = vn_max_rate;
2236 pr_cont("%d Mbps ", line_speed);
2238 if (bp->link_vars.duplex == DUPLEX_FULL)
2239 pr_cont("full duplex");
2241 pr_cont("half duplex");
2243 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2244 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2245 pr_cont(", receive ");
2246 if (bp->link_vars.flow_ctrl &
2248 pr_cont("& transmit ");
2250 pr_cont(", transmit ");
2252 pr_cont("flow control ON");
2256 } else { /* link_down */
2257 netif_carrier_off(bp->dev);
2258 netdev_err(bp->dev, "NIC Link is Down\n");
2262 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2264 if (!BP_NOMCP(bp)) {
2267 /* Initialize link parameters structure variables */
2268 /* It is recommended to turn off RX FC for jumbo frames
2269 for better performance */
2270 if (bp->dev->mtu > 5000)
2271 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2273 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2275 bnx2x_acquire_phy_lock(bp);
2277 if (load_mode == LOAD_DIAG)
2278 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2280 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2282 bnx2x_release_phy_lock(bp);
2284 bnx2x_calc_fc_adv(bp);
2286 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2287 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2288 bnx2x_link_report(bp);
2293 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2297 static void bnx2x_link_set(struct bnx2x *bp)
2299 if (!BP_NOMCP(bp)) {
2300 bnx2x_acquire_phy_lock(bp);
2301 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2302 bnx2x_release_phy_lock(bp);
2304 bnx2x_calc_fc_adv(bp);
2306 BNX2X_ERR("Bootcode is missing - can not set link\n");
2309 static void bnx2x__link_reset(struct bnx2x *bp)
2311 if (!BP_NOMCP(bp)) {
2312 bnx2x_acquire_phy_lock(bp);
2313 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2314 bnx2x_release_phy_lock(bp);
2316 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2319 static u8 bnx2x_link_test(struct bnx2x *bp)
2323 if (!BP_NOMCP(bp)) {
2324 bnx2x_acquire_phy_lock(bp);
2325 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2326 bnx2x_release_phy_lock(bp);
2328 BNX2X_ERR("Bootcode is missing - can not test link\n");
2333 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2335 u32 r_param = bp->link_vars.line_speed / 8;
2336 u32 fair_periodic_timeout_usec;
2339 memset(&(bp->cmng.rs_vars), 0,
2340 sizeof(struct rate_shaping_vars_per_port));
2341 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2343 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2344 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2346 /* this is the threshold below which no timer arming will occur
2347 1.25 coefficient is for the threshold to be a little bigger
2348 than the real time, to compensate for timer in-accuracy */
2349 bp->cmng.rs_vars.rs_threshold =
2350 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2352 /* resolution of fairness timer */
2353 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2354 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2355 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2357 /* this is the threshold below which we won't arm the timer anymore */
2358 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2360 /* we multiply by 1e3/8 to get bytes/msec.
2361 We don't want the credits to pass a credit
2362 of the t_fair*FAIR_MEM (algorithm resolution) */
2363 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2364 /* since each tick is 4 usec */
2365 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2368 /* Calculates the sum of vn_min_rates.
2369 It's needed for further normalizing of the min_rates.
2371 sum of vn_min_rates.
2373 0 - if all the min_rates are 0.
2374 In the later case fainess algorithm should be deactivated.
2375 If not all min_rates are zero then those that are zeroes will be set to 1.
2377 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2380 int port = BP_PORT(bp);
2383 bp->vn_weight_sum = 0;
2384 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2385 int func = 2*vn + port;
2386 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2387 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2388 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2390 /* Skip hidden vns */
2391 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2394 /* If min rate is zero - set it to 1 */
2396 vn_min_rate = DEF_MIN_RATE;
2400 bp->vn_weight_sum += vn_min_rate;
2403 /* ... only if all min rates are zeros - disable fairness */
2405 bp->cmng.flags.cmng_enables &=
2406 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2407 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2408 " fairness will be disabled\n");
2410 bp->cmng.flags.cmng_enables |=
2411 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2414 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2416 struct rate_shaping_vars_per_vn m_rs_vn;
2417 struct fairness_vars_per_vn m_fair_vn;
2418 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2419 u16 vn_min_rate, vn_max_rate;
2422 /* If function is hidden - set min and max to zeroes */
2423 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2428 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2429 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2430 /* If min rate is zero - set it to 1 */
2432 vn_min_rate = DEF_MIN_RATE;
2433 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2434 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2437 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2438 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2440 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2441 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2443 /* global vn counter - maximal Mbps for this vn */
2444 m_rs_vn.vn_counter.rate = vn_max_rate;
2446 /* quota - number of bytes transmitted in this period */
2447 m_rs_vn.vn_counter.quota =
2448 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2450 if (bp->vn_weight_sum) {
2451 /* credit for each period of the fairness algorithm:
2452 number of bytes in T_FAIR (the vn share the port rate).
2453 vn_weight_sum should not be larger than 10000, thus
2454 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2456 m_fair_vn.vn_credit_delta =
2457 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2458 (8 * bp->vn_weight_sum))),
2459 (bp->cmng.fair_vars.fair_threshold * 2));
2460 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2461 m_fair_vn.vn_credit_delta);
2464 /* Store it to internal memory */
2465 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2466 REG_WR(bp, BAR_XSTRORM_INTMEM +
2467 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2468 ((u32 *)(&m_rs_vn))[i]);
2470 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2471 REG_WR(bp, BAR_XSTRORM_INTMEM +
2472 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2473 ((u32 *)(&m_fair_vn))[i]);
2477 /* This function is called upon link interrupt */
2478 static void bnx2x_link_attn(struct bnx2x *bp)
2480 u32 prev_link_status = bp->link_vars.link_status;
2481 /* Make sure that we are synced with the current statistics */
2482 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2484 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2486 if (bp->link_vars.link_up) {
2488 /* dropless flow control */
2489 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2490 int port = BP_PORT(bp);
2491 u32 pause_enabled = 0;
2493 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2496 REG_WR(bp, BAR_USTRORM_INTMEM +
2497 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2501 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2502 struct host_port_stats *pstats;
2504 pstats = bnx2x_sp(bp, port_stats);
2505 /* reset old bmac stats */
2506 memset(&(pstats->mac_stx[0]), 0,
2507 sizeof(struct mac_stx));
2509 if (bp->state == BNX2X_STATE_OPEN)
2510 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2513 /* indicate link status only if link status actually changed */
2514 if (prev_link_status != bp->link_vars.link_status)
2515 bnx2x_link_report(bp);
2518 int port = BP_PORT(bp);
2522 /* Set the attention towards other drivers on the same port */
2523 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2524 if (vn == BP_E1HVN(bp))
2527 func = ((vn << 1) | port);
2528 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2529 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2532 if (bp->link_vars.link_up) {
2535 /* Init rate shaping and fairness contexts */
2536 bnx2x_init_port_minmax(bp);
2538 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2539 bnx2x_init_vn_minmax(bp, 2*vn + port);
2541 /* Store it to internal memory */
2543 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2544 REG_WR(bp, BAR_XSTRORM_INTMEM +
2545 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2546 ((u32 *)(&bp->cmng))[i]);
2551 static void bnx2x__link_status_update(struct bnx2x *bp)
2553 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2556 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2558 if (bp->link_vars.link_up)
2559 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2561 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2563 bnx2x_calc_vn_weight_sum(bp);
2565 /* indicate link status */
2566 bnx2x_link_report(bp);
2569 static void bnx2x_pmf_update(struct bnx2x *bp)
2571 int port = BP_PORT(bp);
2575 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2577 /* enable nig attention */
2578 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2579 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2580 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2582 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2590 * General service functions
2593 /* send the MCP a request, block until there is a reply */
2594 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2596 int func = BP_FUNC(bp);
2597 u32 seq = ++bp->fw_seq;
2600 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2602 mutex_lock(&bp->fw_mb_mutex);
2603 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2604 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2607 /* let the FW do it's magic ... */
2610 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2612 /* Give the FW up to 5 second (500*10ms) */
2613 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2615 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2616 cnt*delay, rc, seq);
2618 /* is this a reply to our command? */
2619 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2620 rc &= FW_MSG_CODE_MASK;
2623 BNX2X_ERR("FW failed to respond!\n");
2627 mutex_unlock(&bp->fw_mb_mutex);
2632 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2633 static void bnx2x_set_rx_mode(struct net_device *dev);
2635 static void bnx2x_e1h_disable(struct bnx2x *bp)
2637 int port = BP_PORT(bp);
2639 netif_tx_disable(bp->dev);
2641 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2643 netif_carrier_off(bp->dev);
2646 static void bnx2x_e1h_enable(struct bnx2x *bp)
2648 int port = BP_PORT(bp);
2650 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2652 /* Tx queue should be only reenabled */
2653 netif_tx_wake_all_queues(bp->dev);
2656 * Should not call netif_carrier_on since it will be called if the link
2657 * is up when checking for link state
2661 static void bnx2x_update_min_max(struct bnx2x *bp)
2663 int port = BP_PORT(bp);
2666 /* Init rate shaping and fairness contexts */
2667 bnx2x_init_port_minmax(bp);
2669 bnx2x_calc_vn_weight_sum(bp);
2671 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2672 bnx2x_init_vn_minmax(bp, 2*vn + port);
2677 /* Set the attention towards other drivers on the same port */
2678 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2679 if (vn == BP_E1HVN(bp))
2682 func = ((vn << 1) | port);
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2684 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2687 /* Store it to internal memory */
2688 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2689 REG_WR(bp, BAR_XSTRORM_INTMEM +
2690 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2691 ((u32 *)(&bp->cmng))[i]);
2695 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2697 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2699 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2702 * This is the only place besides the function initialization
2703 * where the bp->flags can change so it is done without any
2706 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2707 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2708 bp->flags |= MF_FUNC_DIS;
2710 bnx2x_e1h_disable(bp);
2712 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2713 bp->flags &= ~MF_FUNC_DIS;
2715 bnx2x_e1h_enable(bp);
2717 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2719 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2721 bnx2x_update_min_max(bp);
2722 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2725 /* Report results to MCP */
2727 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2729 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2732 /* must be called under the spq lock */
2733 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2735 struct eth_spe *next_spe = bp->spq_prod_bd;
2737 if (bp->spq_prod_bd == bp->spq_last_bd) {
2738 bp->spq_prod_bd = bp->spq;
2739 bp->spq_prod_idx = 0;
2740 DP(NETIF_MSG_TIMER, "end of spq\n");
2748 /* must be called under the spq lock */
2749 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2751 int func = BP_FUNC(bp);
2753 /* Make sure that BD data is updated before writing the producer */
2756 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2761 /* the slow path queue is odd since completions arrive on the fastpath ring */
2762 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2763 u32 data_hi, u32 data_lo, int common)
2765 struct eth_spe *spe;
2767 #ifdef BNX2X_STOP_ON_ERROR
2768 if (unlikely(bp->panic))
2772 spin_lock_bh(&bp->spq_lock);
2774 if (!bp->spq_left) {
2775 BNX2X_ERR("BUG! SPQ ring full!\n");
2776 spin_unlock_bh(&bp->spq_lock);
2781 spe = bnx2x_sp_get_next(bp);
2783 /* CID needs port number to be encoded int it */
2784 spe->hdr.conn_and_cmd_data =
2785 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2787 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2790 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2792 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2793 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2797 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2798 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2799 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2800 (u32)(U64_LO(bp->spq_mapping) +
2801 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2802 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2804 bnx2x_sp_prod_update(bp);
2805 spin_unlock_bh(&bp->spq_lock);
2809 /* acquire split MCP access lock register */
2810 static int bnx2x_acquire_alr(struct bnx2x *bp)
2816 for (j = 0; j < 1000; j++) {
2818 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2819 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2820 if (val & (1L << 31))
2825 if (!(val & (1L << 31))) {
2826 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2833 /* release split MCP access lock register */
2834 static void bnx2x_release_alr(struct bnx2x *bp)
2836 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2839 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2841 struct host_def_status_block *def_sb = bp->def_status_blk;
2844 barrier(); /* status block is written to by the chip */
2845 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2846 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2849 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2850 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2853 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2854 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2857 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2858 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2861 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2862 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2869 * slow path service functions
2872 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2874 int port = BP_PORT(bp);
2875 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2876 COMMAND_REG_ATTN_BITS_SET);
2877 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2878 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2879 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2880 NIG_REG_MASK_INTERRUPT_PORT0;
2884 if (bp->attn_state & asserted)
2885 BNX2X_ERR("IGU ERROR\n");
2887 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2888 aeu_mask = REG_RD(bp, aeu_addr);
2890 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2891 aeu_mask, asserted);
2892 aeu_mask &= ~(asserted & 0x3ff);
2893 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2895 REG_WR(bp, aeu_addr, aeu_mask);
2896 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2898 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2899 bp->attn_state |= asserted;
2900 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2902 if (asserted & ATTN_HARD_WIRED_MASK) {
2903 if (asserted & ATTN_NIG_FOR_FUNC) {
2905 bnx2x_acquire_phy_lock(bp);
2907 /* save nig interrupt mask */
2908 nig_mask = REG_RD(bp, nig_int_mask_addr);
2909 REG_WR(bp, nig_int_mask_addr, 0);
2911 bnx2x_link_attn(bp);
2913 /* handle unicore attn? */
2915 if (asserted & ATTN_SW_TIMER_4_FUNC)
2916 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2918 if (asserted & GPIO_2_FUNC)
2919 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2921 if (asserted & GPIO_3_FUNC)
2922 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2924 if (asserted & GPIO_4_FUNC)
2925 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2928 if (asserted & ATTN_GENERAL_ATTN_1) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2932 if (asserted & ATTN_GENERAL_ATTN_2) {
2933 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2934 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2936 if (asserted & ATTN_GENERAL_ATTN_3) {
2937 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2938 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2941 if (asserted & ATTN_GENERAL_ATTN_4) {
2942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2945 if (asserted & ATTN_GENERAL_ATTN_5) {
2946 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2947 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2949 if (asserted & ATTN_GENERAL_ATTN_6) {
2950 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2951 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2955 } /* if hardwired */
2957 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2959 REG_WR(bp, hc_addr, asserted);
2961 /* now set back the mask */
2962 if (asserted & ATTN_NIG_FOR_FUNC) {
2963 REG_WR(bp, nig_int_mask_addr, nig_mask);
2964 bnx2x_release_phy_lock(bp);
2968 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2970 int port = BP_PORT(bp);
2972 /* mark the failure */
2973 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2974 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2975 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2976 bp->link_params.ext_phy_config);
2978 /* log the failure */
2979 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2980 " the driver to shutdown the card to prevent permanent"
2981 " damage. Please contact OEM Support for assistance\n");
2984 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2986 int port = BP_PORT(bp);
2988 u32 val, swap_val, swap_override;
2990 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2991 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2993 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2995 val = REG_RD(bp, reg_offset);
2996 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2997 REG_WR(bp, reg_offset, val);
2999 BNX2X_ERR("SPIO5 hw attention\n");
3001 /* Fan failure attention */
3002 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
3003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3004 /* Low power mode is controlled by GPIO 2 */
3005 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3006 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3007 /* The PHY reset is controlled by GPIO 1 */
3008 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3009 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3013 /* The PHY reset is controlled by GPIO 1 */
3014 /* fake the port number to cancel the swap done in
3016 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3017 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3018 port = (swap_val && swap_override) ^ 1;
3019 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3020 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3026 bnx2x_fan_failure(bp);
3029 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3030 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3031 bnx2x_acquire_phy_lock(bp);
3032 bnx2x_handle_module_detect_int(&bp->link_params);
3033 bnx2x_release_phy_lock(bp);
3036 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3038 val = REG_RD(bp, reg_offset);
3039 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3040 REG_WR(bp, reg_offset, val);
3042 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3043 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3048 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3052 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3054 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3055 BNX2X_ERR("DB hw attention 0x%x\n", val);
3056 /* DORQ discard attention */
3058 BNX2X_ERR("FATAL error from DORQ\n");
3061 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3063 int port = BP_PORT(bp);
3066 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3067 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3069 val = REG_RD(bp, reg_offset);
3070 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3071 REG_WR(bp, reg_offset, val);
3073 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3074 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3079 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3083 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3085 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3086 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3087 /* CFC error attention */
3089 BNX2X_ERR("FATAL error from CFC\n");
3092 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3094 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3095 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3096 /* RQ_USDMDP_FIFO_OVERFLOW */
3098 BNX2X_ERR("FATAL error from PXP\n");
3101 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3103 int port = BP_PORT(bp);
3106 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3107 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3109 val = REG_RD(bp, reg_offset);
3110 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3111 REG_WR(bp, reg_offset, val);
3113 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3114 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3119 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3123 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3125 if (attn & BNX2X_PMF_LINK_ASSERT) {
3126 int func = BP_FUNC(bp);
3128 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3129 bp->mf_config = SHMEM_RD(bp,
3130 mf_cfg.func_mf_config[func].config);
3131 val = SHMEM_RD(bp, func_mb[func].drv_status);
3132 if (val & DRV_STATUS_DCC_EVENT_MASK)
3134 (val & DRV_STATUS_DCC_EVENT_MASK));
3135 bnx2x__link_status_update(bp);
3136 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3137 bnx2x_pmf_update(bp);
3139 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3141 BNX2X_ERR("MC assert!\n");
3142 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3144 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3145 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3148 } else if (attn & BNX2X_MCP_ASSERT) {
3150 BNX2X_ERR("MCP assert!\n");
3151 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3155 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3158 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3159 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3160 if (attn & BNX2X_GRC_TIMEOUT) {
3161 val = CHIP_IS_E1H(bp) ?
3162 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3163 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3165 if (attn & BNX2X_GRC_RSV) {
3166 val = CHIP_IS_E1H(bp) ?
3167 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3168 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3170 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3174 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3175 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3178 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3179 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3180 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3181 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3182 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3183 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3185 * should be run under rtnl lock
3187 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3189 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3190 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3191 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3197 * should be run under rtnl lock
3199 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3201 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3203 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3209 * should be run under rtnl lock
3211 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3213 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3214 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3215 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3219 * should be run under rtnl lock
3221 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3223 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3227 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3228 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3234 * should be run under rtnl lock
3236 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3238 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3240 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3242 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3243 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3251 * should be run under rtnl lock
3253 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3255 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3258 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3260 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3261 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3264 static inline void _print_next_block(int idx, const char *blk)
3271 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3275 for (i = 0; sig; i++) {
3276 cur_bit = ((u32)0x1 << i);
3277 if (sig & cur_bit) {
3279 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3280 _print_next_block(par_num++, "BRB");
3282 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3283 _print_next_block(par_num++, "PARSER");
3285 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3286 _print_next_block(par_num++, "TSDM");
3288 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3289 _print_next_block(par_num++, "SEARCHER");
3291 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3292 _print_next_block(par_num++, "TSEMI");
3304 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3308 for (i = 0; sig; i++) {
3309 cur_bit = ((u32)0x1 << i);
3310 if (sig & cur_bit) {
3312 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3313 _print_next_block(par_num++, "PBCLIENT");
3315 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3316 _print_next_block(par_num++, "QM");
3318 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3319 _print_next_block(par_num++, "XSDM");
3321 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3322 _print_next_block(par_num++, "XSEMI");
3324 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3325 _print_next_block(par_num++, "DOORBELLQ");
3327 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3328 _print_next_block(par_num++, "VAUX PCI CORE");
3330 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3331 _print_next_block(par_num++, "DEBUG");
3333 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3334 _print_next_block(par_num++, "USDM");
3336 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3337 _print_next_block(par_num++, "USEMI");
3339 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3340 _print_next_block(par_num++, "UPB");
3342 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3343 _print_next_block(par_num++, "CSDM");
3355 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3359 for (i = 0; sig; i++) {
3360 cur_bit = ((u32)0x1 << i);
3361 if (sig & cur_bit) {
3363 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3364 _print_next_block(par_num++, "CSEMI");
3366 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3367 _print_next_block(par_num++, "PXP");
3369 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3370 _print_next_block(par_num++,
3371 "PXPPCICLOCKCLIENT");
3373 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3374 _print_next_block(par_num++, "CFC");
3376 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3377 _print_next_block(par_num++, "CDU");
3379 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3380 _print_next_block(par_num++, "IGU");
3382 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3383 _print_next_block(par_num++, "MISC");
3395 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3399 for (i = 0; sig; i++) {
3400 cur_bit = ((u32)0x1 << i);
3401 if (sig & cur_bit) {
3403 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3404 _print_next_block(par_num++, "MCP ROM");
3406 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3407 _print_next_block(par_num++, "MCP UMP RX");
3409 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3410 _print_next_block(par_num++, "MCP UMP TX");
3412 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3413 _print_next_block(par_num++, "MCP SCPAD");
3425 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3428 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3429 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3431 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3432 "[0]:0x%08x [1]:0x%08x "
3433 "[2]:0x%08x [3]:0x%08x\n",
3434 sig0 & HW_PRTY_ASSERT_SET_0,
3435 sig1 & HW_PRTY_ASSERT_SET_1,
3436 sig2 & HW_PRTY_ASSERT_SET_2,
3437 sig3 & HW_PRTY_ASSERT_SET_3);
3438 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3440 par_num = bnx2x_print_blocks_with_parity0(
3441 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3442 par_num = bnx2x_print_blocks_with_parity1(
3443 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3444 par_num = bnx2x_print_blocks_with_parity2(
3445 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3446 par_num = bnx2x_print_blocks_with_parity3(
3447 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3454 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3456 struct attn_route attn;
3457 int port = BP_PORT(bp);
3459 attn.sig[0] = REG_RD(bp,
3460 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3462 attn.sig[1] = REG_RD(bp,
3463 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3465 attn.sig[2] = REG_RD(bp,
3466 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3468 attn.sig[3] = REG_RD(bp,
3469 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3472 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3476 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3478 struct attn_route attn, *group_mask;
3479 int port = BP_PORT(bp);
3485 /* need to take HW lock because MCP or other port might also
3486 try to handle this event */
3487 bnx2x_acquire_alr(bp);
3489 if (bnx2x_chk_parity_attn(bp)) {
3490 bp->recovery_state = BNX2X_RECOVERY_INIT;
3491 bnx2x_set_reset_in_progress(bp);
3492 schedule_delayed_work(&bp->reset_task, 0);
3493 /* Disable HW interrupts */
3494 bnx2x_int_disable(bp);
3495 bnx2x_release_alr(bp);
3496 /* In case of parity errors don't handle attentions so that
3497 * other function would "see" parity errors.
3502 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3503 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3504 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3505 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3506 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3507 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3509 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3510 if (deasserted & (1 << index)) {
3511 group_mask = &bp->attn_group[index];
3513 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3514 index, group_mask->sig[0], group_mask->sig[1],
3515 group_mask->sig[2], group_mask->sig[3]);
3517 bnx2x_attn_int_deasserted3(bp,
3518 attn.sig[3] & group_mask->sig[3]);
3519 bnx2x_attn_int_deasserted1(bp,
3520 attn.sig[1] & group_mask->sig[1]);
3521 bnx2x_attn_int_deasserted2(bp,
3522 attn.sig[2] & group_mask->sig[2]);
3523 bnx2x_attn_int_deasserted0(bp,
3524 attn.sig[0] & group_mask->sig[0]);
3528 bnx2x_release_alr(bp);
3530 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3533 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3535 REG_WR(bp, reg_addr, val);
3537 if (~bp->attn_state & deasserted)
3538 BNX2X_ERR("IGU ERROR\n");
3540 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3541 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3543 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3544 aeu_mask = REG_RD(bp, reg_addr);
3546 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3547 aeu_mask, deasserted);
3548 aeu_mask |= (deasserted & 0x3ff);
3549 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3551 REG_WR(bp, reg_addr, aeu_mask);
3552 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3554 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3555 bp->attn_state &= ~deasserted;
3556 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3559 static void bnx2x_attn_int(struct bnx2x *bp)
3561 /* read local copy of bits */
3562 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3564 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3566 u32 attn_state = bp->attn_state;
3568 /* look for changed bits */
3569 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3570 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3573 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3574 attn_bits, attn_ack, asserted, deasserted);
3576 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3577 BNX2X_ERR("BAD attention state\n");
3579 /* handle bits that were raised */
3581 bnx2x_attn_int_asserted(bp, asserted);
3584 bnx2x_attn_int_deasserted(bp, deasserted);
3587 static void bnx2x_sp_task(struct work_struct *work)
3589 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3592 /* Return here if interrupt is disabled */
3593 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3594 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3598 status = bnx2x_update_dsb_idx(bp);
3599 /* if (status == 0) */
3600 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3602 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3610 /* CStorm events: STAT_QUERY */
3612 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3616 if (unlikely(status))
3617 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3620 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3622 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3624 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3626 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3628 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3632 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3634 struct net_device *dev = dev_instance;
3635 struct bnx2x *bp = netdev_priv(dev);
3637 /* Return here if interrupt is disabled */
3638 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3639 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3643 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3645 #ifdef BNX2X_STOP_ON_ERROR
3646 if (unlikely(bp->panic))
3652 struct cnic_ops *c_ops;
3655 c_ops = rcu_dereference(bp->cnic_ops);
3657 c_ops->cnic_handler(bp->cnic_data, NULL);
3661 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3666 /* end of slow path */
3670 /****************************************************************************
3672 ****************************************************************************/
3674 /* sum[hi:lo] += add[hi:lo] */
3675 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3678 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3681 /* difference = minuend - subtrahend */
3682 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3684 if (m_lo < s_lo) { \
3686 d_hi = m_hi - s_hi; \
3688 /* we can 'loan' 1 */ \
3690 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3692 /* m_hi <= s_hi */ \
3697 /* m_lo >= s_lo */ \
3698 if (m_hi < s_hi) { \
3702 /* m_hi >= s_hi */ \
3703 d_hi = m_hi - s_hi; \
3704 d_lo = m_lo - s_lo; \
3709 #define UPDATE_STAT64(s, t) \
3711 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3712 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3713 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3714 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3715 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3716 pstats->mac_stx[1].t##_lo, diff.lo); \
3719 #define UPDATE_STAT64_NIG(s, t) \
3721 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3722 diff.lo, new->s##_lo, old->s##_lo); \
3723 ADD_64(estats->t##_hi, diff.hi, \
3724 estats->t##_lo, diff.lo); \
3727 /* sum[hi:lo] += add */
3728 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3731 s_hi += (s_lo < a) ? 1 : 0; \
3734 #define UPDATE_EXTEND_STAT(s) \
3736 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3737 pstats->mac_stx[1].s##_lo, \
3741 #define UPDATE_EXTEND_TSTAT(s, t) \
3743 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3744 old_tclient->s = tclient->s; \
3745 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3748 #define UPDATE_EXTEND_USTAT(s, t) \
3750 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3751 old_uclient->s = uclient->s; \
3752 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3755 #define UPDATE_EXTEND_XSTAT(s, t) \
3757 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3758 old_xclient->s = xclient->s; \
3759 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3762 /* minuend -= subtrahend */
3763 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3765 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3768 /* minuend[hi:lo] -= subtrahend */
3769 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3771 SUB_64(m_hi, 0, m_lo, s); \
3774 #define SUB_EXTEND_USTAT(s, t) \
3776 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3777 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3781 * General service functions
3784 static inline long bnx2x_hilo(u32 *hiref)
3786 u32 lo = *(hiref + 1);
3787 #if (BITS_PER_LONG == 64)
3790 return HILO_U64(hi, lo);
3797 * Init service functions
3800 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3802 if (!bp->stats_pending) {
3803 struct eth_query_ramrod_data ramrod_data = {0};
3806 ramrod_data.drv_counter = bp->stats_counter++;
3807 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3808 for_each_queue(bp, i)
3809 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3811 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3812 ((u32 *)&ramrod_data)[1],
3813 ((u32 *)&ramrod_data)[0], 0);
3815 /* stats ramrod has it's own slot on the spq */
3817 bp->stats_pending = 1;
3822 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3824 struct dmae_command *dmae = &bp->stats_dmae;
3825 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3827 *stats_comp = DMAE_COMP_VAL;
3828 if (CHIP_REV_IS_SLOW(bp))
3832 if (bp->executer_idx) {
3833 int loader_idx = PMF_DMAE_C(bp);
3835 memset(dmae, 0, sizeof(struct dmae_command));
3837 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3838 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3839 DMAE_CMD_DST_RESET |
3841 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3843 DMAE_CMD_ENDIANITY_DW_SWAP |
3845 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3847 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3848 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3849 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3850 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3851 sizeof(struct dmae_command) *
3852 (loader_idx + 1)) >> 2;
3853 dmae->dst_addr_hi = 0;
3854 dmae->len = sizeof(struct dmae_command) >> 2;
3857 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3858 dmae->comp_addr_hi = 0;
3862 bnx2x_post_dmae(bp, dmae, loader_idx);
3864 } else if (bp->func_stx) {
3866 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3870 static int bnx2x_stats_comp(struct bnx2x *bp)
3872 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3876 while (*stats_comp != DMAE_COMP_VAL) {
3878 BNX2X_ERR("timeout waiting for stats finished\n");
3888 * Statistics service functions
3891 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3893 struct dmae_command *dmae;
3895 int loader_idx = PMF_DMAE_C(bp);
3896 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3899 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3900 BNX2X_ERR("BUG!\n");
3904 bp->executer_idx = 0;
3906 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3908 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3910 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3912 DMAE_CMD_ENDIANITY_DW_SWAP |
3914 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3915 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3917 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3918 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3919 dmae->src_addr_lo = bp->port.port_stx >> 2;
3920 dmae->src_addr_hi = 0;
3921 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3922 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3923 dmae->len = DMAE_LEN32_RD_MAX;
3924 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3925 dmae->comp_addr_hi = 0;
3928 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3929 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3930 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3931 dmae->src_addr_hi = 0;
3932 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3933 DMAE_LEN32_RD_MAX * 4);
3934 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3935 DMAE_LEN32_RD_MAX * 4);
3936 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3937 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3938 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3939 dmae->comp_val = DMAE_COMP_VAL;
3942 bnx2x_hw_stats_post(bp);
3943 bnx2x_stats_comp(bp);
3946 static void bnx2x_port_stats_init(struct bnx2x *bp)
3948 struct dmae_command *dmae;
3949 int port = BP_PORT(bp);
3950 int vn = BP_E1HVN(bp);
3952 int loader_idx = PMF_DMAE_C(bp);
3954 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3957 if (!bp->link_vars.link_up || !bp->port.pmf) {
3958 BNX2X_ERR("BUG!\n");
3962 bp->executer_idx = 0;
3965 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3966 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3967 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3969 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3971 DMAE_CMD_ENDIANITY_DW_SWAP |
3973 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3974 (vn << DMAE_CMD_E1HVN_SHIFT));
3976 if (bp->port.port_stx) {
3978 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979 dmae->opcode = opcode;
3980 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3981 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3982 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3983 dmae->dst_addr_hi = 0;
3984 dmae->len = sizeof(struct host_port_stats) >> 2;
3985 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3986 dmae->comp_addr_hi = 0;
3992 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3993 dmae->opcode = opcode;
3994 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3995 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3996 dmae->dst_addr_lo = bp->func_stx >> 2;
3997 dmae->dst_addr_hi = 0;
3998 dmae->len = sizeof(struct host_func_stats) >> 2;
3999 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4000 dmae->comp_addr_hi = 0;
4005 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4006 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4007 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4009 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4011 DMAE_CMD_ENDIANITY_DW_SWAP |
4013 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4014 (vn << DMAE_CMD_E1HVN_SHIFT));
4016 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
4018 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4019 NIG_REG_INGRESS_BMAC0_MEM);
4021 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4022 BIGMAC_REGISTER_TX_STAT_GTBYT */
4023 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4024 dmae->opcode = opcode;
4025 dmae->src_addr_lo = (mac_addr +
4026 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4027 dmae->src_addr_hi = 0;
4028 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4029 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4030 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4031 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4032 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4033 dmae->comp_addr_hi = 0;
4036 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4037 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4038 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4039 dmae->opcode = opcode;
4040 dmae->src_addr_lo = (mac_addr +
4041 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4042 dmae->src_addr_hi = 0;
4043 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4044 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4045 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4046 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4047 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4048 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4049 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4050 dmae->comp_addr_hi = 0;
4053 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4055 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4057 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4058 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4059 dmae->opcode = opcode;
4060 dmae->src_addr_lo = (mac_addr +
4061 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4062 dmae->src_addr_hi = 0;
4063 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4064 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4065 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4066 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4067 dmae->comp_addr_hi = 0;
4070 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4071 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4072 dmae->opcode = opcode;
4073 dmae->src_addr_lo = (mac_addr +
4074 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4075 dmae->src_addr_hi = 0;
4076 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4077 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4078 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4079 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4081 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4082 dmae->comp_addr_hi = 0;
4085 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4086 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4087 dmae->opcode = opcode;
4088 dmae->src_addr_lo = (mac_addr +
4089 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4090 dmae->src_addr_hi = 0;
4091 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4092 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4093 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4094 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4095 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4096 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4097 dmae->comp_addr_hi = 0;
4102 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4103 dmae->opcode = opcode;
4104 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4105 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4106 dmae->src_addr_hi = 0;
4107 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4108 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4109 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4110 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4111 dmae->comp_addr_hi = 0;
4114 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4115 dmae->opcode = opcode;
4116 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4117 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4118 dmae->src_addr_hi = 0;
4119 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4120 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4121 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4122 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4123 dmae->len = (2*sizeof(u32)) >> 2;
4124 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4125 dmae->comp_addr_hi = 0;
4128 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4129 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4130 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4131 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4133 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4135 DMAE_CMD_ENDIANITY_DW_SWAP |
4137 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4138 (vn << DMAE_CMD_E1HVN_SHIFT));
4139 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4140 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4141 dmae->src_addr_hi = 0;
4142 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4143 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4144 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4145 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4146 dmae->len = (2*sizeof(u32)) >> 2;
4147 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4148 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4149 dmae->comp_val = DMAE_COMP_VAL;
4154 static void bnx2x_func_stats_init(struct bnx2x *bp)
4156 struct dmae_command *dmae = &bp->stats_dmae;
4157 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4160 if (!bp->func_stx) {
4161 BNX2X_ERR("BUG!\n");
4165 bp->executer_idx = 0;
4166 memset(dmae, 0, sizeof(struct dmae_command));
4168 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4169 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4170 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4172 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4174 DMAE_CMD_ENDIANITY_DW_SWAP |
4176 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4177 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4178 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4179 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4180 dmae->dst_addr_lo = bp->func_stx >> 2;
4181 dmae->dst_addr_hi = 0;
4182 dmae->len = sizeof(struct host_func_stats) >> 2;
4183 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4184 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4185 dmae->comp_val = DMAE_COMP_VAL;
4190 static void bnx2x_stats_start(struct bnx2x *bp)
4193 bnx2x_port_stats_init(bp);
4195 else if (bp->func_stx)
4196 bnx2x_func_stats_init(bp);
4198 bnx2x_hw_stats_post(bp);
4199 bnx2x_storm_stats_post(bp);
4202 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4204 bnx2x_stats_comp(bp);
4205 bnx2x_stats_pmf_update(bp);
4206 bnx2x_stats_start(bp);
4209 static void bnx2x_stats_restart(struct bnx2x *bp)
4211 bnx2x_stats_comp(bp);
4212 bnx2x_stats_start(bp);
4215 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4217 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4218 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4219 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4225 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4226 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4227 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4228 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4229 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4230 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4231 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4232 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4233 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4234 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4235 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4236 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4237 UPDATE_STAT64(tx_stat_gt127,
4238 tx_stat_etherstatspkts65octetsto127octets);
4239 UPDATE_STAT64(tx_stat_gt255,
4240 tx_stat_etherstatspkts128octetsto255octets);
4241 UPDATE_STAT64(tx_stat_gt511,
4242 tx_stat_etherstatspkts256octetsto511octets);
4243 UPDATE_STAT64(tx_stat_gt1023,
4244 tx_stat_etherstatspkts512octetsto1023octets);
4245 UPDATE_STAT64(tx_stat_gt1518,
4246 tx_stat_etherstatspkts1024octetsto1522octets);
4247 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4248 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4249 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4250 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4251 UPDATE_STAT64(tx_stat_gterr,
4252 tx_stat_dot3statsinternalmactransmiterrors);
4253 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4255 estats->pause_frames_received_hi =
4256 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4257 estats->pause_frames_received_lo =
4258 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4260 estats->pause_frames_sent_hi =
4261 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4262 estats->pause_frames_sent_lo =
4263 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4266 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4268 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4269 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4270 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4272 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4273 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4274 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4275 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4276 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4277 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4278 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4279 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4280 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4281 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4282 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4283 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4284 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4285 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4286 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4287 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4288 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4289 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4290 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4291 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4292 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4293 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4294 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4295 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4296 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4297 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4298 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4299 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4300 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4301 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4302 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4304 estats->pause_frames_received_hi =
4305 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4306 estats->pause_frames_received_lo =
4307 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4308 ADD_64(estats->pause_frames_received_hi,
4309 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4310 estats->pause_frames_received_lo,
4311 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4313 estats->pause_frames_sent_hi =
4314 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4315 estats->pause_frames_sent_lo =
4316 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4317 ADD_64(estats->pause_frames_sent_hi,
4318 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4319 estats->pause_frames_sent_lo,
4320 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4323 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4325 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4326 struct nig_stats *old = &(bp->port.old_nig_stats);
4327 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4328 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4334 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4335 bnx2x_bmac_stats_update(bp);
4337 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4338 bnx2x_emac_stats_update(bp);
4340 else { /* unreached */
4341 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4345 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4346 new->brb_discard - old->brb_discard);
4347 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4348 new->brb_truncate - old->brb_truncate);
4350 UPDATE_STAT64_NIG(egress_mac_pkt0,
4351 etherstatspkts1024octetsto1522octets);
4352 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4354 memcpy(old, new, sizeof(struct nig_stats));
4356 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4357 sizeof(struct mac_stx));
4358 estats->brb_drop_hi = pstats->brb_drop_hi;
4359 estats->brb_drop_lo = pstats->brb_drop_lo;
4361 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4363 if (!BP_NOMCP(bp)) {
4365 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4366 if (nig_timer_max != estats->nig_timer_max) {
4367 estats->nig_timer_max = nig_timer_max;
4368 BNX2X_ERR("NIG timer max (%u)\n",
4369 estats->nig_timer_max);
4376 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4378 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4379 struct tstorm_per_port_stats *tport =
4380 &stats->tstorm_common.port_statistics;
4381 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4382 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4385 memcpy(&(fstats->total_bytes_received_hi),
4386 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4387 sizeof(struct host_func_stats) - 2*sizeof(u32));
4388 estats->error_bytes_received_hi = 0;
4389 estats->error_bytes_received_lo = 0;
4390 estats->etherstatsoverrsizepkts_hi = 0;
4391 estats->etherstatsoverrsizepkts_lo = 0;
4392 estats->no_buff_discard_hi = 0;
4393 estats->no_buff_discard_lo = 0;
4395 for_each_queue(bp, i) {
4396 struct bnx2x_fastpath *fp = &bp->fp[i];
4397 int cl_id = fp->cl_id;
4398 struct tstorm_per_client_stats *tclient =
4399 &stats->tstorm_common.client_statistics[cl_id];
4400 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4401 struct ustorm_per_client_stats *uclient =
4402 &stats->ustorm_common.client_statistics[cl_id];
4403 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4404 struct xstorm_per_client_stats *xclient =
4405 &stats->xstorm_common.client_statistics[cl_id];
4406 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4407 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4410 /* are storm stats valid? */
4411 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4412 bp->stats_counter) {
4413 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4414 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4415 i, xclient->stats_counter, bp->stats_counter);
4418 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4419 bp->stats_counter) {
4420 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4421 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4422 i, tclient->stats_counter, bp->stats_counter);
4425 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4426 bp->stats_counter) {
4427 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4428 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4429 i, uclient->stats_counter, bp->stats_counter);
4433 qstats->total_bytes_received_hi =
4434 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4435 qstats->total_bytes_received_lo =
4436 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4438 ADD_64(qstats->total_bytes_received_hi,
4439 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4440 qstats->total_bytes_received_lo,
4441 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4443 ADD_64(qstats->total_bytes_received_hi,
4444 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4445 qstats->total_bytes_received_lo,
4446 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4448 SUB_64(qstats->total_bytes_received_hi,
4449 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4450 qstats->total_bytes_received_lo,
4451 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4453 SUB_64(qstats->total_bytes_received_hi,
4454 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4455 qstats->total_bytes_received_lo,
4456 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4458 SUB_64(qstats->total_bytes_received_hi,
4459 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4460 qstats->total_bytes_received_lo,
4461 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4463 qstats->valid_bytes_received_hi =
4464 qstats->total_bytes_received_hi;
4465 qstats->valid_bytes_received_lo =
4466 qstats->total_bytes_received_lo;
4468 qstats->error_bytes_received_hi =
4469 le32_to_cpu(tclient->rcv_error_bytes.hi);
4470 qstats->error_bytes_received_lo =
4471 le32_to_cpu(tclient->rcv_error_bytes.lo);
4473 ADD_64(qstats->total_bytes_received_hi,
4474 qstats->error_bytes_received_hi,
4475 qstats->total_bytes_received_lo,
4476 qstats->error_bytes_received_lo);
4478 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4479 total_unicast_packets_received);
4480 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4481 total_multicast_packets_received);
4482 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4483 total_broadcast_packets_received);
4484 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4485 etherstatsoverrsizepkts);
4486 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4488 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4489 total_unicast_packets_received);
4490 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4491 total_multicast_packets_received);
4492 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4493 total_broadcast_packets_received);
4494 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4495 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4496 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4498 qstats->total_bytes_transmitted_hi =
4499 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4500 qstats->total_bytes_transmitted_lo =
4501 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4503 ADD_64(qstats->total_bytes_transmitted_hi,
4504 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4505 qstats->total_bytes_transmitted_lo,
4506 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4508 ADD_64(qstats->total_bytes_transmitted_hi,
4509 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4510 qstats->total_bytes_transmitted_lo,
4511 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4513 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4514 total_unicast_packets_transmitted);
4515 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4516 total_multicast_packets_transmitted);
4517 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4518 total_broadcast_packets_transmitted);
4520 old_tclient->checksum_discard = tclient->checksum_discard;
4521 old_tclient->ttl0_discard = tclient->ttl0_discard;
4523 ADD_64(fstats->total_bytes_received_hi,
4524 qstats->total_bytes_received_hi,
4525 fstats->total_bytes_received_lo,
4526 qstats->total_bytes_received_lo);
4527 ADD_64(fstats->total_bytes_transmitted_hi,
4528 qstats->total_bytes_transmitted_hi,
4529 fstats->total_bytes_transmitted_lo,
4530 qstats->total_bytes_transmitted_lo);
4531 ADD_64(fstats->total_unicast_packets_received_hi,
4532 qstats->total_unicast_packets_received_hi,
4533 fstats->total_unicast_packets_received_lo,
4534 qstats->total_unicast_packets_received_lo);
4535 ADD_64(fstats->total_multicast_packets_received_hi,
4536 qstats->total_multicast_packets_received_hi,
4537 fstats->total_multicast_packets_received_lo,
4538 qstats->total_multicast_packets_received_lo);
4539 ADD_64(fstats->total_broadcast_packets_received_hi,
4540 qstats->total_broadcast_packets_received_hi,
4541 fstats->total_broadcast_packets_received_lo,
4542 qstats->total_broadcast_packets_received_lo);
4543 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4544 qstats->total_unicast_packets_transmitted_hi,
4545 fstats->total_unicast_packets_transmitted_lo,
4546 qstats->total_unicast_packets_transmitted_lo);
4547 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4548 qstats->total_multicast_packets_transmitted_hi,
4549 fstats->total_multicast_packets_transmitted_lo,
4550 qstats->total_multicast_packets_transmitted_lo);
4551 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4552 qstats->total_broadcast_packets_transmitted_hi,
4553 fstats->total_broadcast_packets_transmitted_lo,
4554 qstats->total_broadcast_packets_transmitted_lo);
4555 ADD_64(fstats->valid_bytes_received_hi,
4556 qstats->valid_bytes_received_hi,
4557 fstats->valid_bytes_received_lo,
4558 qstats->valid_bytes_received_lo);
4560 ADD_64(estats->error_bytes_received_hi,
4561 qstats->error_bytes_received_hi,
4562 estats->error_bytes_received_lo,
4563 qstats->error_bytes_received_lo);
4564 ADD_64(estats->etherstatsoverrsizepkts_hi,
4565 qstats->etherstatsoverrsizepkts_hi,
4566 estats->etherstatsoverrsizepkts_lo,
4567 qstats->etherstatsoverrsizepkts_lo);
4568 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4569 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4572 ADD_64(fstats->total_bytes_received_hi,
4573 estats->rx_stat_ifhcinbadoctets_hi,
4574 fstats->total_bytes_received_lo,
4575 estats->rx_stat_ifhcinbadoctets_lo);
4577 memcpy(estats, &(fstats->total_bytes_received_hi),
4578 sizeof(struct host_func_stats) - 2*sizeof(u32));
4580 ADD_64(estats->etherstatsoverrsizepkts_hi,
4581 estats->rx_stat_dot3statsframestoolong_hi,
4582 estats->etherstatsoverrsizepkts_lo,
4583 estats->rx_stat_dot3statsframestoolong_lo);
4584 ADD_64(estats->error_bytes_received_hi,
4585 estats->rx_stat_ifhcinbadoctets_hi,
4586 estats->error_bytes_received_lo,
4587 estats->rx_stat_ifhcinbadoctets_lo);
4590 estats->mac_filter_discard =
4591 le32_to_cpu(tport->mac_filter_discard);
4592 estats->xxoverflow_discard =
4593 le32_to_cpu(tport->xxoverflow_discard);
4594 estats->brb_truncate_discard =
4595 le32_to_cpu(tport->brb_truncate_discard);
4596 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4599 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4601 bp->stats_pending = 0;
4606 static void bnx2x_net_stats_update(struct bnx2x *bp)
4608 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4609 struct net_device_stats *nstats = &bp->dev->stats;
4612 nstats->rx_packets =
4613 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4614 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4615 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4617 nstats->tx_packets =
4618 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4619 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4620 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4622 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4624 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4626 nstats->rx_dropped = estats->mac_discard;
4627 for_each_queue(bp, i)
4628 nstats->rx_dropped +=
4629 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4631 nstats->tx_dropped = 0;
4634 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4636 nstats->collisions =
4637 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4639 nstats->rx_length_errors =
4640 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4641 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4642 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4643 bnx2x_hilo(&estats->brb_truncate_hi);
4644 nstats->rx_crc_errors =
4645 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4646 nstats->rx_frame_errors =
4647 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4648 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4649 nstats->rx_missed_errors = estats->xxoverflow_discard;
4651 nstats->rx_errors = nstats->rx_length_errors +
4652 nstats->rx_over_errors +
4653 nstats->rx_crc_errors +
4654 nstats->rx_frame_errors +
4655 nstats->rx_fifo_errors +
4656 nstats->rx_missed_errors;
4658 nstats->tx_aborted_errors =
4659 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4660 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4661 nstats->tx_carrier_errors =
4662 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4663 nstats->tx_fifo_errors = 0;
4664 nstats->tx_heartbeat_errors = 0;
4665 nstats->tx_window_errors = 0;
4667 nstats->tx_errors = nstats->tx_aborted_errors +
4668 nstats->tx_carrier_errors +
4669 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4672 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4674 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4677 estats->driver_xoff = 0;
4678 estats->rx_err_discard_pkt = 0;
4679 estats->rx_skb_alloc_failed = 0;
4680 estats->hw_csum_err = 0;
4681 for_each_queue(bp, i) {
4682 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4684 estats->driver_xoff += qstats->driver_xoff;
4685 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4686 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4687 estats->hw_csum_err += qstats->hw_csum_err;
4691 static void bnx2x_stats_update(struct bnx2x *bp)
4693 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4695 if (*stats_comp != DMAE_COMP_VAL)
4699 bnx2x_hw_stats_update(bp);
4701 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4702 BNX2X_ERR("storm stats were not updated for 3 times\n");
4707 bnx2x_net_stats_update(bp);
4708 bnx2x_drv_stats_update(bp);
4710 if (netif_msg_timer(bp)) {
4711 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4714 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4716 estats->brb_drop_lo, estats->brb_truncate_lo);
4718 for_each_queue(bp, i) {
4719 struct bnx2x_fastpath *fp = &bp->fp[i];
4720 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4722 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4723 " rx pkt(%lu) rx calls(%lu %lu)\n",
4724 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4726 le16_to_cpu(*fp->rx_cons_sb),
4727 bnx2x_hilo(&qstats->
4728 total_unicast_packets_received_hi),
4729 fp->rx_calls, fp->rx_pkt);
4732 for_each_queue(bp, i) {
4733 struct bnx2x_fastpath *fp = &bp->fp[i];
4734 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4735 struct netdev_queue *txq =
4736 netdev_get_tx_queue(bp->dev, i);
4738 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4739 " tx pkt(%lu) tx calls (%lu)"
4740 " %s (Xoff events %u)\n",
4741 fp->name, bnx2x_tx_avail(fp),
4742 le16_to_cpu(*fp->tx_cons_sb),
4743 bnx2x_hilo(&qstats->
4744 total_unicast_packets_transmitted_hi),
4746 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4747 qstats->driver_xoff);
4751 bnx2x_hw_stats_post(bp);
4752 bnx2x_storm_stats_post(bp);
4755 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4757 struct dmae_command *dmae;
4759 int loader_idx = PMF_DMAE_C(bp);
4760 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4762 bp->executer_idx = 0;
4764 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4766 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4768 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4770 DMAE_CMD_ENDIANITY_DW_SWAP |
4772 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4773 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4775 if (bp->port.port_stx) {
4777 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4779 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4781 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4782 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4783 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4784 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4785 dmae->dst_addr_hi = 0;
4786 dmae->len = sizeof(struct host_port_stats) >> 2;
4788 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4789 dmae->comp_addr_hi = 0;
4792 dmae->comp_addr_lo =
4793 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4794 dmae->comp_addr_hi =
4795 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4796 dmae->comp_val = DMAE_COMP_VAL;
4804 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4805 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4806 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4807 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4808 dmae->dst_addr_lo = bp->func_stx >> 2;
4809 dmae->dst_addr_hi = 0;
4810 dmae->len = sizeof(struct host_func_stats) >> 2;
4811 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4812 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4813 dmae->comp_val = DMAE_COMP_VAL;
4819 static void bnx2x_stats_stop(struct bnx2x *bp)
4823 bnx2x_stats_comp(bp);
4826 update = (bnx2x_hw_stats_update(bp) == 0);
4828 update |= (bnx2x_storm_stats_update(bp) == 0);
4831 bnx2x_net_stats_update(bp);
4834 bnx2x_port_stats_stop(bp);
4836 bnx2x_hw_stats_post(bp);
4837 bnx2x_stats_comp(bp);
4841 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4845 static const struct {
4846 void (*action)(struct bnx2x *bp);
4847 enum bnx2x_stats_state next_state;
4848 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4851 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4852 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4853 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4854 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4857 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4858 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4859 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4860 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4864 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4866 enum bnx2x_stats_state state = bp->stats_state;
4868 if (unlikely(bp->panic))
4871 bnx2x_stats_stm[state][event].action(bp);
4872 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4874 /* Make sure the state has been "changed" */
4877 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4878 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4879 state, event, bp->stats_state);
4882 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4884 struct dmae_command *dmae;
4885 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4888 if (!bp->port.pmf || !bp->port.port_stx) {
4889 BNX2X_ERR("BUG!\n");
4893 bp->executer_idx = 0;
4895 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4896 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4897 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4898 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4900 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4902 DMAE_CMD_ENDIANITY_DW_SWAP |
4904 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4905 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4906 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4907 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4908 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4909 dmae->dst_addr_hi = 0;
4910 dmae->len = sizeof(struct host_port_stats) >> 2;
4911 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4912 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4913 dmae->comp_val = DMAE_COMP_VAL;
4916 bnx2x_hw_stats_post(bp);
4917 bnx2x_stats_comp(bp);
4920 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4922 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4923 int port = BP_PORT(bp);
4928 if (!bp->port.pmf || !bp->func_stx) {
4929 BNX2X_ERR("BUG!\n");
4933 /* save our func_stx */
4934 func_stx = bp->func_stx;
4936 for (vn = VN_0; vn < vn_max; vn++) {
4939 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4940 bnx2x_func_stats_init(bp);
4941 bnx2x_hw_stats_post(bp);
4942 bnx2x_stats_comp(bp);
4945 /* restore our func_stx */
4946 bp->func_stx = func_stx;
4949 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4951 struct dmae_command *dmae = &bp->stats_dmae;
4952 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4955 if (!bp->func_stx) {
4956 BNX2X_ERR("BUG!\n");
4960 bp->executer_idx = 0;
4961 memset(dmae, 0, sizeof(struct dmae_command));
4963 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4964 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4965 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4967 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4969 DMAE_CMD_ENDIANITY_DW_SWAP |
4971 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4972 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4973 dmae->src_addr_lo = bp->func_stx >> 2;
4974 dmae->src_addr_hi = 0;
4975 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4976 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4977 dmae->len = sizeof(struct host_func_stats) >> 2;
4978 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4979 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4980 dmae->comp_val = DMAE_COMP_VAL;
4983 bnx2x_hw_stats_post(bp);
4984 bnx2x_stats_comp(bp);
4987 static void bnx2x_stats_init(struct bnx2x *bp)
4989 int port = BP_PORT(bp);
4990 int func = BP_FUNC(bp);
4993 bp->stats_pending = 0;
4994 bp->executer_idx = 0;
4995 bp->stats_counter = 0;
4997 /* port and func stats for management */
4998 if (!BP_NOMCP(bp)) {
4999 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
5000 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5003 bp->port.port_stx = 0;
5006 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
5007 bp->port.port_stx, bp->func_stx);
5010 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
5011 bp->port.old_nig_stats.brb_discard =
5012 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
5013 bp->port.old_nig_stats.brb_truncate =
5014 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5015 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5016 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5017 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5018 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5020 /* function stats */
5021 for_each_queue(bp, i) {
5022 struct bnx2x_fastpath *fp = &bp->fp[i];
5024 memset(&fp->old_tclient, 0,
5025 sizeof(struct tstorm_per_client_stats));
5026 memset(&fp->old_uclient, 0,
5027 sizeof(struct ustorm_per_client_stats));
5028 memset(&fp->old_xclient, 0,
5029 sizeof(struct xstorm_per_client_stats));
5030 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5033 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5034 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5036 bp->stats_state = STATS_STATE_DISABLED;
5039 if (bp->port.port_stx)
5040 bnx2x_port_stats_base_init(bp);
5043 bnx2x_func_stats_base_init(bp);
5045 } else if (bp->func_stx)
5046 bnx2x_func_stats_base_update(bp);
5049 static void bnx2x_timer(unsigned long data)
5051 struct bnx2x *bp = (struct bnx2x *) data;
5053 if (!netif_running(bp->dev))
5056 if (atomic_read(&bp->intr_sem) != 0)
5060 struct bnx2x_fastpath *fp = &bp->fp[0];
5064 rc = bnx2x_rx_int(fp, 1000);
5067 if (!BP_NOMCP(bp)) {
5068 int func = BP_FUNC(bp);
5072 ++bp->fw_drv_pulse_wr_seq;
5073 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5074 /* TBD - add SYSTEM_TIME */
5075 drv_pulse = bp->fw_drv_pulse_wr_seq;
5076 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5078 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5079 MCP_PULSE_SEQ_MASK);
5080 /* The delta between driver pulse and mcp response
5081 * should be 1 (before mcp response) or 0 (after mcp response)
5083 if ((drv_pulse != mcp_pulse) &&
5084 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5085 /* someone lost a heartbeat... */
5086 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5087 drv_pulse, mcp_pulse);
5091 if (bp->state == BNX2X_STATE_OPEN)
5092 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5095 mod_timer(&bp->timer, jiffies + bp->current_interval);
5098 /* end of Statistics */
5103 * nic init service functions
5106 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5108 int port = BP_PORT(bp);
5111 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5112 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5113 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5114 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5115 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5116 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5119 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5120 dma_addr_t mapping, int sb_id)
5122 int port = BP_PORT(bp);
5123 int func = BP_FUNC(bp);
5128 section = ((u64)mapping) + offsetof(struct host_status_block,
5130 sb->u_status_block.status_block_id = sb_id;
5132 REG_WR(bp, BAR_CSTRORM_INTMEM +
5133 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5134 REG_WR(bp, BAR_CSTRORM_INTMEM +
5135 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5137 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5138 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5140 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5141 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5142 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5145 section = ((u64)mapping) + offsetof(struct host_status_block,
5147 sb->c_status_block.status_block_id = sb_id;
5149 REG_WR(bp, BAR_CSTRORM_INTMEM +
5150 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5151 REG_WR(bp, BAR_CSTRORM_INTMEM +
5152 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5154 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5155 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5157 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5158 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5159 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5161 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5164 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5166 int func = BP_FUNC(bp);
5168 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5169 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5170 sizeof(struct tstorm_def_status_block)/4);
5171 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5172 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5173 sizeof(struct cstorm_def_status_block_u)/4);
5174 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5175 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5176 sizeof(struct cstorm_def_status_block_c)/4);
5177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5178 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5179 sizeof(struct xstorm_def_status_block)/4);
5182 static void bnx2x_init_def_sb(struct bnx2x *bp,
5183 struct host_def_status_block *def_sb,
5184 dma_addr_t mapping, int sb_id)
5186 int port = BP_PORT(bp);
5187 int func = BP_FUNC(bp);
5188 int index, val, reg_offset;
5192 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5193 atten_status_block);
5194 def_sb->atten_status_block.status_block_id = sb_id;
5198 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5199 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5201 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5202 bp->attn_group[index].sig[0] = REG_RD(bp,
5203 reg_offset + 0x10*index);
5204 bp->attn_group[index].sig[1] = REG_RD(bp,
5205 reg_offset + 0x4 + 0x10*index);
5206 bp->attn_group[index].sig[2] = REG_RD(bp,
5207 reg_offset + 0x8 + 0x10*index);
5208 bp->attn_group[index].sig[3] = REG_RD(bp,
5209 reg_offset + 0xc + 0x10*index);
5212 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5213 HC_REG_ATTN_MSG0_ADDR_L);
5215 REG_WR(bp, reg_offset, U64_LO(section));
5216 REG_WR(bp, reg_offset + 4, U64_HI(section));
5218 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5220 val = REG_RD(bp, reg_offset);
5222 REG_WR(bp, reg_offset, val);
5225 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5226 u_def_status_block);
5227 def_sb->u_def_status_block.status_block_id = sb_id;
5229 REG_WR(bp, BAR_CSTRORM_INTMEM +
5230 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5231 REG_WR(bp, BAR_CSTRORM_INTMEM +
5232 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5234 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5235 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5237 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5238 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5239 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5242 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5243 c_def_status_block);
5244 def_sb->c_def_status_block.status_block_id = sb_id;
5246 REG_WR(bp, BAR_CSTRORM_INTMEM +
5247 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5248 REG_WR(bp, BAR_CSTRORM_INTMEM +
5249 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5251 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5252 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5254 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5255 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5256 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5259 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5260 t_def_status_block);
5261 def_sb->t_def_status_block.status_block_id = sb_id;
5263 REG_WR(bp, BAR_TSTRORM_INTMEM +
5264 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5265 REG_WR(bp, BAR_TSTRORM_INTMEM +
5266 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5268 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5269 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5271 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5272 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5273 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5276 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5277 x_def_status_block);
5278 def_sb->x_def_status_block.status_block_id = sb_id;
5280 REG_WR(bp, BAR_XSTRORM_INTMEM +
5281 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5282 REG_WR(bp, BAR_XSTRORM_INTMEM +
5283 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5285 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5286 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5288 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5289 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5290 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5292 bp->stats_pending = 0;
5293 bp->set_mac_pending = 0;
5295 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5298 static void bnx2x_update_coalesce(struct bnx2x *bp)
5300 int port = BP_PORT(bp);
5303 for_each_queue(bp, i) {
5304 int sb_id = bp->fp[i].sb_id;
5306 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5307 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5308 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5309 U_SB_ETH_RX_CQ_INDEX),
5310 bp->rx_ticks/(4 * BNX2X_BTR));
5311 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5312 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5313 U_SB_ETH_RX_CQ_INDEX),
5314 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5316 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5317 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5318 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5319 C_SB_ETH_TX_CQ_INDEX),
5320 bp->tx_ticks/(4 * BNX2X_BTR));
5321 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5322 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5323 C_SB_ETH_TX_CQ_INDEX),
5324 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5328 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5329 struct bnx2x_fastpath *fp, int last)
5333 for (i = 0; i < last; i++) {
5334 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5335 struct sk_buff *skb = rx_buf->skb;
5338 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5342 if (fp->tpa_state[i] == BNX2X_TPA_START)
5343 dma_unmap_single(&bp->pdev->dev,
5344 dma_unmap_addr(rx_buf, mapping),
5345 bp->rx_buf_size, DMA_FROM_DEVICE);
5352 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5354 int func = BP_FUNC(bp);
5355 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5356 ETH_MAX_AGGREGATION_QUEUES_E1H;
5357 u16 ring_prod, cqe_ring_prod;
5360 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5362 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5364 if (bp->flags & TPA_ENABLE_FLAG) {
5366 for_each_queue(bp, j) {
5367 struct bnx2x_fastpath *fp = &bp->fp[j];
5369 for (i = 0; i < max_agg_queues; i++) {
5370 fp->tpa_pool[i].skb =
5371 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5372 if (!fp->tpa_pool[i].skb) {
5373 BNX2X_ERR("Failed to allocate TPA "
5374 "skb pool for queue[%d] - "
5375 "disabling TPA on this "
5377 bnx2x_free_tpa_pool(bp, fp, i);
5378 fp->disable_tpa = 1;
5381 dma_unmap_addr_set((struct sw_rx_bd *)
5382 &bp->fp->tpa_pool[i],
5384 fp->tpa_state[i] = BNX2X_TPA_STOP;
5389 for_each_queue(bp, j) {
5390 struct bnx2x_fastpath *fp = &bp->fp[j];
5393 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5394 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5396 /* "next page" elements initialization */
5398 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5399 struct eth_rx_sge *sge;
5401 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5403 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5404 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5406 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5407 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5410 bnx2x_init_sge_ring_bit_mask(fp);
5413 for (i = 1; i <= NUM_RX_RINGS; i++) {
5414 struct eth_rx_bd *rx_bd;
5416 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5418 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5419 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5421 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5422 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5426 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5427 struct eth_rx_cqe_next_page *nextpg;
5429 nextpg = (struct eth_rx_cqe_next_page *)
5430 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5432 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5435 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5436 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5439 /* Allocate SGEs and initialize the ring elements */
5440 for (i = 0, ring_prod = 0;
5441 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5443 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5444 BNX2X_ERR("was only able to allocate "
5446 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5447 /* Cleanup already allocated elements */
5448 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5449 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5450 fp->disable_tpa = 1;
5454 ring_prod = NEXT_SGE_IDX(ring_prod);
5456 fp->rx_sge_prod = ring_prod;
5458 /* Allocate BDs and initialize BD ring */
5459 fp->rx_comp_cons = 0;
5460 cqe_ring_prod = ring_prod = 0;
5461 for (i = 0; i < bp->rx_ring_size; i++) {
5462 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5463 BNX2X_ERR("was only able to allocate "
5464 "%d rx skbs on queue[%d]\n", i, j);
5465 fp->eth_q_stats.rx_skb_alloc_failed++;
5468 ring_prod = NEXT_RX_IDX(ring_prod);
5469 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5470 WARN_ON(ring_prod <= i);
5473 fp->rx_bd_prod = ring_prod;
5474 /* must not have more available CQEs than BDs */
5475 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5477 fp->rx_pkt = fp->rx_calls = 0;
5480 * this will generate an interrupt (to the TSTORM)
5481 * must only be done after chip is initialized
5483 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
5489 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5490 U64_LO(fp->rx_comp_mapping));
5491 REG_WR(bp, BAR_USTRORM_INTMEM +
5492 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5493 U64_HI(fp->rx_comp_mapping));
5497 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5501 for_each_queue(bp, j) {
5502 struct bnx2x_fastpath *fp = &bp->fp[j];
5504 for (i = 1; i <= NUM_TX_RINGS; i++) {
5505 struct eth_tx_next_bd *tx_next_bd =
5506 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5508 tx_next_bd->addr_hi =
5509 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5510 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5511 tx_next_bd->addr_lo =
5512 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5513 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5516 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5517 fp->tx_db.data.zero_fill1 = 0;
5518 fp->tx_db.data.prod = 0;
5520 fp->tx_pkt_prod = 0;
5521 fp->tx_pkt_cons = 0;
5524 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5529 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5531 int func = BP_FUNC(bp);
5533 spin_lock_init(&bp->spq_lock);
5535 bp->spq_left = MAX_SPQ_PENDING;
5536 bp->spq_prod_idx = 0;
5537 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5538 bp->spq_prod_bd = bp->spq;
5539 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5541 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5542 U64_LO(bp->spq_mapping));
5544 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5545 U64_HI(bp->spq_mapping));
5547 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5551 static void bnx2x_init_context(struct bnx2x *bp)
5556 for_each_queue(bp, i) {
5557 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5558 struct bnx2x_fastpath *fp = &bp->fp[i];
5559 u8 cl_id = fp->cl_id;
5561 context->ustorm_st_context.common.sb_index_numbers =
5562 BNX2X_RX_SB_INDEX_NUM;
5563 context->ustorm_st_context.common.clientId = cl_id;
5564 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5565 context->ustorm_st_context.common.flags =
5566 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5567 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5568 context->ustorm_st_context.common.statistics_counter_id =
5570 context->ustorm_st_context.common.mc_alignment_log_size =
5571 BNX2X_RX_ALIGN_SHIFT;
5572 context->ustorm_st_context.common.bd_buff_size =
5574 context->ustorm_st_context.common.bd_page_base_hi =
5575 U64_HI(fp->rx_desc_mapping);
5576 context->ustorm_st_context.common.bd_page_base_lo =
5577 U64_LO(fp->rx_desc_mapping);
5578 if (!fp->disable_tpa) {
5579 context->ustorm_st_context.common.flags |=
5580 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5581 context->ustorm_st_context.common.sge_buff_size =
5582 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5584 context->ustorm_st_context.common.sge_page_base_hi =
5585 U64_HI(fp->rx_sge_mapping);
5586 context->ustorm_st_context.common.sge_page_base_lo =
5587 U64_LO(fp->rx_sge_mapping);
5589 context->ustorm_st_context.common.max_sges_for_packet =
5590 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5591 context->ustorm_st_context.common.max_sges_for_packet =
5592 ((context->ustorm_st_context.common.
5593 max_sges_for_packet + PAGES_PER_SGE - 1) &
5594 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5597 context->ustorm_ag_context.cdu_usage =
5598 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5599 CDU_REGION_NUMBER_UCM_AG,
5600 ETH_CONNECTION_TYPE);
5602 context->xstorm_ag_context.cdu_reserved =
5603 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5604 CDU_REGION_NUMBER_XCM_AG,
5605 ETH_CONNECTION_TYPE);
5609 for_each_queue(bp, i) {
5610 struct bnx2x_fastpath *fp = &bp->fp[i];
5611 struct eth_context *context =
5612 bnx2x_sp(bp, context[i].eth);
5614 context->cstorm_st_context.sb_index_number =
5615 C_SB_ETH_TX_CQ_INDEX;
5616 context->cstorm_st_context.status_block_id = fp->sb_id;
5618 context->xstorm_st_context.tx_bd_page_base_hi =
5619 U64_HI(fp->tx_desc_mapping);
5620 context->xstorm_st_context.tx_bd_page_base_lo =
5621 U64_LO(fp->tx_desc_mapping);
5622 context->xstorm_st_context.statistics_data = (fp->cl_id |
5623 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5627 static void bnx2x_init_ind_table(struct bnx2x *bp)
5629 int func = BP_FUNC(bp);
5632 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5636 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5637 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5638 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5639 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5640 bp->fp->cl_id + (i % bp->num_queues));
5643 static void bnx2x_set_client_config(struct bnx2x *bp)
5645 struct tstorm_eth_client_config tstorm_client = {0};
5646 int port = BP_PORT(bp);
5649 tstorm_client.mtu = bp->dev->mtu;
5650 tstorm_client.config_flags =
5651 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5652 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5654 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5655 tstorm_client.config_flags |=
5656 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5657 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5661 for_each_queue(bp, i) {
5662 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5664 REG_WR(bp, BAR_TSTRORM_INTMEM +
5665 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5666 ((u32 *)&tstorm_client)[0]);
5667 REG_WR(bp, BAR_TSTRORM_INTMEM +
5668 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5669 ((u32 *)&tstorm_client)[1]);
5672 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5673 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5676 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5678 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5679 int mode = bp->rx_mode;
5680 int mask = bp->rx_mode_cl_mask;
5681 int func = BP_FUNC(bp);
5682 int port = BP_PORT(bp);
5684 /* All but management unicast packets should pass to the host as well */
5686 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5687 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5688 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5689 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5691 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5694 case BNX2X_RX_MODE_NONE: /* no Rx */
5695 tstorm_mac_filter.ucast_drop_all = mask;
5696 tstorm_mac_filter.mcast_drop_all = mask;
5697 tstorm_mac_filter.bcast_drop_all = mask;
5700 case BNX2X_RX_MODE_NORMAL:
5701 tstorm_mac_filter.bcast_accept_all = mask;
5704 case BNX2X_RX_MODE_ALLMULTI:
5705 tstorm_mac_filter.mcast_accept_all = mask;
5706 tstorm_mac_filter.bcast_accept_all = mask;
5709 case BNX2X_RX_MODE_PROMISC:
5710 tstorm_mac_filter.ucast_accept_all = mask;
5711 tstorm_mac_filter.mcast_accept_all = mask;
5712 tstorm_mac_filter.bcast_accept_all = mask;
5713 /* pass management unicast packets as well */
5714 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5718 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5723 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5726 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5727 REG_WR(bp, BAR_TSTRORM_INTMEM +
5728 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5729 ((u32 *)&tstorm_mac_filter)[i]);
5731 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5732 ((u32 *)&tstorm_mac_filter)[i]); */
5735 if (mode != BNX2X_RX_MODE_NONE)
5736 bnx2x_set_client_config(bp);
5739 static void bnx2x_init_internal_common(struct bnx2x *bp)
5743 /* Zero this manually as its initialization is
5744 currently missing in the initTool */
5745 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5746 REG_WR(bp, BAR_USTRORM_INTMEM +
5747 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5750 static void bnx2x_init_internal_port(struct bnx2x *bp)
5752 int port = BP_PORT(bp);
5755 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5757 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5758 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5759 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5762 static void bnx2x_init_internal_func(struct bnx2x *bp)
5764 struct tstorm_eth_function_common_config tstorm_config = {0};
5765 struct stats_indication_flags stats_flags = {0};
5766 int port = BP_PORT(bp);
5767 int func = BP_FUNC(bp);
5772 tstorm_config.config_flags = RSS_FLAGS(bp);
5775 tstorm_config.rss_result_mask = MULTI_MASK;
5777 /* Enable TPA if needed */
5778 if (bp->flags & TPA_ENABLE_FLAG)
5779 tstorm_config.config_flags |=
5780 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5783 tstorm_config.config_flags |=
5784 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5786 tstorm_config.leading_client_id = BP_L_ID(bp);
5788 REG_WR(bp, BAR_TSTRORM_INTMEM +
5789 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5790 (*(u32 *)&tstorm_config));
5792 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5793 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5794 bnx2x_set_storm_rx_mode(bp);
5796 for_each_queue(bp, i) {
5797 u8 cl_id = bp->fp[i].cl_id;
5799 /* reset xstorm per client statistics */
5800 offset = BAR_XSTRORM_INTMEM +
5801 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5803 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5804 REG_WR(bp, offset + j*4, 0);
5806 /* reset tstorm per client statistics */
5807 offset = BAR_TSTRORM_INTMEM +
5808 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5810 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5811 REG_WR(bp, offset + j*4, 0);
5813 /* reset ustorm per client statistics */
5814 offset = BAR_USTRORM_INTMEM +
5815 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5817 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5818 REG_WR(bp, offset + j*4, 0);
5821 /* Init statistics related context */
5822 stats_flags.collect_eth = 1;
5824 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5825 ((u32 *)&stats_flags)[0]);
5826 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5827 ((u32 *)&stats_flags)[1]);
5829 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5830 ((u32 *)&stats_flags)[0]);
5831 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5832 ((u32 *)&stats_flags)[1]);
5834 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5835 ((u32 *)&stats_flags)[0]);
5836 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5837 ((u32 *)&stats_flags)[1]);
5839 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5840 ((u32 *)&stats_flags)[0]);
5841 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5842 ((u32 *)&stats_flags)[1]);
5844 REG_WR(bp, BAR_XSTRORM_INTMEM +
5845 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5846 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5847 REG_WR(bp, BAR_XSTRORM_INTMEM +
5848 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5849 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5851 REG_WR(bp, BAR_TSTRORM_INTMEM +
5852 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5853 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5854 REG_WR(bp, BAR_TSTRORM_INTMEM +
5855 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5856 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5858 REG_WR(bp, BAR_USTRORM_INTMEM +
5859 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5860 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5861 REG_WR(bp, BAR_USTRORM_INTMEM +
5862 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5863 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5865 if (CHIP_IS_E1H(bp)) {
5866 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5868 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5870 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5872 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5875 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5879 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5880 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5881 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5882 for_each_queue(bp, i) {
5883 struct bnx2x_fastpath *fp = &bp->fp[i];
5885 REG_WR(bp, BAR_USTRORM_INTMEM +
5886 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5887 U64_LO(fp->rx_comp_mapping));
5888 REG_WR(bp, BAR_USTRORM_INTMEM +
5889 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5890 U64_HI(fp->rx_comp_mapping));
5893 REG_WR(bp, BAR_USTRORM_INTMEM +
5894 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5895 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5896 REG_WR(bp, BAR_USTRORM_INTMEM +
5897 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5898 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5900 REG_WR16(bp, BAR_USTRORM_INTMEM +
5901 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5905 /* dropless flow control */
5906 if (CHIP_IS_E1H(bp)) {
5907 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5909 rx_pause.bd_thr_low = 250;
5910 rx_pause.cqe_thr_low = 250;
5912 rx_pause.sge_thr_low = 0;
5913 rx_pause.bd_thr_high = 350;
5914 rx_pause.cqe_thr_high = 350;
5915 rx_pause.sge_thr_high = 0;
5917 for_each_queue(bp, i) {
5918 struct bnx2x_fastpath *fp = &bp->fp[i];
5920 if (!fp->disable_tpa) {
5921 rx_pause.sge_thr_low = 150;
5922 rx_pause.sge_thr_high = 250;
5926 offset = BAR_USTRORM_INTMEM +
5927 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5930 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5932 REG_WR(bp, offset + j*4,
5933 ((u32 *)&rx_pause)[j]);
5937 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5939 /* Init rate shaping and fairness contexts */
5943 /* During init there is no active link
5944 Until link is up, set link rate to 10Gbps */
5945 bp->link_vars.line_speed = SPEED_10000;
5946 bnx2x_init_port_minmax(bp);
5950 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5951 bnx2x_calc_vn_weight_sum(bp);
5953 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5954 bnx2x_init_vn_minmax(bp, 2*vn + port);
5956 /* Enable rate shaping and fairness */
5957 bp->cmng.flags.cmng_enables |=
5958 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5961 /* rate shaping and fairness are disabled */
5963 "single function mode minmax will be disabled\n");
5967 /* Store cmng structures to internal memory */
5969 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5970 REG_WR(bp, BAR_XSTRORM_INTMEM +
5971 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5972 ((u32 *)(&bp->cmng))[i]);
5975 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5977 switch (load_code) {
5978 case FW_MSG_CODE_DRV_LOAD_COMMON:
5979 bnx2x_init_internal_common(bp);
5982 case FW_MSG_CODE_DRV_LOAD_PORT:
5983 bnx2x_init_internal_port(bp);
5986 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5987 bnx2x_init_internal_func(bp);
5991 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5996 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
6000 for_each_queue(bp, i) {
6001 struct bnx2x_fastpath *fp = &bp->fp[i];
6004 fp->state = BNX2X_FP_STATE_CLOSED;
6006 fp->cl_id = BP_L_ID(bp) + i;
6008 fp->sb_id = fp->cl_id + 1;
6010 fp->sb_id = fp->cl_id;
6013 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
6014 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
6015 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
6017 bnx2x_update_fpsb_idx(fp);
6020 /* ensure status block indices were read */
6024 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6026 bnx2x_update_dsb_idx(bp);
6027 bnx2x_update_coalesce(bp);
6028 bnx2x_init_rx_rings(bp);
6029 bnx2x_init_tx_ring(bp);
6030 bnx2x_init_sp_ring(bp);
6031 bnx2x_init_context(bp);
6032 bnx2x_init_internal(bp, load_code);
6033 bnx2x_init_ind_table(bp);
6034 bnx2x_stats_init(bp);
6036 /* At this point, we are ready for interrupts */
6037 atomic_set(&bp->intr_sem, 0);
6039 /* flush all before enabling interrupts */
6043 bnx2x_int_enable(bp);
6045 /* Check for SPIO5 */
6046 bnx2x_attn_int_deasserted0(bp,
6047 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6048 AEU_INPUTS_ATTN_BITS_SPIO5);
6051 /* end of nic init */
6054 * gzip service functions
6057 static int bnx2x_gunzip_init(struct bnx2x *bp)
6059 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6060 &bp->gunzip_mapping, GFP_KERNEL);
6061 if (bp->gunzip_buf == NULL)
6064 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6065 if (bp->strm == NULL)
6068 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6070 if (bp->strm->workspace == NULL)
6080 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6081 bp->gunzip_mapping);
6082 bp->gunzip_buf = NULL;
6085 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6086 " un-compression\n");
6090 static void bnx2x_gunzip_end(struct bnx2x *bp)
6092 kfree(bp->strm->workspace);
6097 if (bp->gunzip_buf) {
6098 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6099 bp->gunzip_mapping);
6100 bp->gunzip_buf = NULL;
6104 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6108 /* check gzip header */
6109 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6110 BNX2X_ERR("Bad gzip header\n");
6118 if (zbuf[3] & FNAME)
6119 while ((zbuf[n++] != 0) && (n < len));
6121 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6122 bp->strm->avail_in = len - n;
6123 bp->strm->next_out = bp->gunzip_buf;
6124 bp->strm->avail_out = FW_BUF_SIZE;
6126 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6130 rc = zlib_inflate(bp->strm, Z_FINISH);
6131 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6132 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6135 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6136 if (bp->gunzip_outlen & 0x3)
6137 netdev_err(bp->dev, "Firmware decompression error:"
6138 " gunzip_outlen (%d) not aligned\n",
6140 bp->gunzip_outlen >>= 2;
6142 zlib_inflateEnd(bp->strm);
6144 if (rc == Z_STREAM_END)
6150 /* nic load/unload */
6153 * General service functions
6156 /* send a NIG loopback debug packet */
6157 static void bnx2x_lb_pckt(struct bnx2x *bp)
6161 /* Ethernet source and destination addresses */
6162 wb_write[0] = 0x55555555;
6163 wb_write[1] = 0x55555555;
6164 wb_write[2] = 0x20; /* SOP */
6165 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6167 /* NON-IP protocol */
6168 wb_write[0] = 0x09000000;
6169 wb_write[1] = 0x55555555;
6170 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
6171 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6174 /* some of the internal memories
6175 * are not directly readable from the driver
6176 * to test them we send debug packets
6178 static int bnx2x_int_mem_test(struct bnx2x *bp)
6184 if (CHIP_REV_IS_FPGA(bp))
6186 else if (CHIP_REV_IS_EMUL(bp))
6191 DP(NETIF_MSG_HW, "start part1\n");
6193 /* Disable inputs of parser neighbor blocks */
6194 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6195 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6196 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6197 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6199 /* Write 0 to parser credits for CFC search request */
6200 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6202 /* send Ethernet packet */
6205 /* TODO do i reset NIG statistic? */
6206 /* Wait until NIG register shows 1 packet of size 0x10 */
6207 count = 1000 * factor;
6210 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6211 val = *bnx2x_sp(bp, wb_data[0]);
6219 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6223 /* Wait until PRS register shows 1 packet */
6224 count = 1000 * factor;
6226 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6234 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6238 /* Reset and init BRB, PRS */
6239 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6241 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6243 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6244 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6246 DP(NETIF_MSG_HW, "part2\n");
6248 /* Disable inputs of parser neighbor blocks */
6249 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6250 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6251 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6252 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6254 /* Write 0 to parser credits for CFC search request */
6255 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6257 /* send 10 Ethernet packets */
6258 for (i = 0; i < 10; i++)
6261 /* Wait until NIG register shows 10 + 1
6262 packets of size 11*0x10 = 0xb0 */
6263 count = 1000 * factor;
6266 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6267 val = *bnx2x_sp(bp, wb_data[0]);
6275 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6279 /* Wait until PRS register shows 2 packets */
6280 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6282 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6284 /* Write 1 to parser credits for CFC search request */
6285 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6287 /* Wait until PRS register shows 3 packets */
6288 msleep(10 * factor);
6289 /* Wait until NIG register shows 1 packet of size 0x10 */
6290 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6292 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6294 /* clear NIG EOP FIFO */
6295 for (i = 0; i < 11; i++)
6296 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6297 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6299 BNX2X_ERR("clear of NIG failed\n");
6303 /* Reset and init BRB, PRS, NIG */
6304 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6306 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6308 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6309 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6312 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6315 /* Enable inputs of parser neighbor blocks */
6316 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6317 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6318 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6319 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6321 DP(NETIF_MSG_HW, "done\n");
6326 static void enable_blocks_attention(struct bnx2x *bp)
6328 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6329 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6330 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6331 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6332 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6333 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6334 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6335 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6336 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6337 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6338 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6339 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6340 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6341 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6342 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6343 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6344 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6345 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6346 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6347 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6348 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6349 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6350 if (CHIP_REV_IS_FPGA(bp))
6351 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6353 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6354 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6355 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6356 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6357 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6358 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6359 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6360 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6361 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6362 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6365 static const struct {
6368 } bnx2x_parity_mask[] = {
6369 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6370 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6371 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6372 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6373 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6374 {QM_REG_QM_PRTY_MASK, 0x0},
6375 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6376 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6377 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6378 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6379 {CDU_REG_CDU_PRTY_MASK, 0x0},
6380 {CFC_REG_CFC_PRTY_MASK, 0x0},
6381 {DBG_REG_DBG_PRTY_MASK, 0x0},
6382 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6383 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6384 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6385 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6386 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6387 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6388 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6389 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6390 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6391 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6392 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6393 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6394 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6395 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6396 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6399 static void enable_blocks_parity(struct bnx2x *bp)
6401 int i, mask_arr_len =
6402 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6404 for (i = 0; i < mask_arr_len; i++)
6405 REG_WR(bp, bnx2x_parity_mask[i].addr,
6406 bnx2x_parity_mask[i].mask);
6410 static void bnx2x_reset_common(struct bnx2x *bp)
6413 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6415 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6418 static void bnx2x_init_pxp(struct bnx2x *bp)
6421 int r_order, w_order;
6423 pci_read_config_word(bp->pdev,
6424 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6425 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6426 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6428 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6430 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6434 bnx2x_init_pxp_arb(bp, r_order, w_order);
6437 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6447 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6448 SHARED_HW_CFG_FAN_FAILURE_MASK;
6450 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6454 * The fan failure mechanism is usually related to the PHY type since
6455 * the power consumption of the board is affected by the PHY. Currently,
6456 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6458 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6459 for (port = PORT_0; port < PORT_MAX; port++) {
6461 SHMEM_RD(bp, dev_info.port_hw_config[port].
6462 external_phy_config) &
6463 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6466 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6468 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6470 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6473 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6475 if (is_required == 0)
6478 /* Fan failure is indicated by SPIO 5 */
6479 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6480 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6482 /* set to active low mode */
6483 val = REG_RD(bp, MISC_REG_SPIO_INT);
6484 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6485 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6486 REG_WR(bp, MISC_REG_SPIO_INT, val);
6488 /* enable interrupt to signal the IGU */
6489 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6490 val |= (1 << MISC_REGISTERS_SPIO_5);
6491 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6494 static int bnx2x_init_common(struct bnx2x *bp)
6501 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6503 bnx2x_reset_common(bp);
6504 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6505 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6507 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6508 if (CHIP_IS_E1H(bp))
6509 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6511 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6513 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6515 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6516 if (CHIP_IS_E1(bp)) {
6517 /* enable HW interrupt from PXP on USDM overflow
6518 bit 16 on INT_MASK_0 */
6519 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6522 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6526 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6527 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6528 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6529 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6530 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6531 /* make sure this value is 0 */
6532 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6534 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6535 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6536 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6537 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6538 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6541 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6543 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6544 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6545 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6548 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6549 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6551 /* let the HW do it's magic ... */
6553 /* finish PXP init */
6554 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6556 BNX2X_ERR("PXP2 CFG failed\n");
6559 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6561 BNX2X_ERR("PXP2 RD_INIT failed\n");
6565 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6566 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6568 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6570 /* clean the DMAE memory */
6572 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6574 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6575 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6576 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6577 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6579 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6580 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6581 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6582 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6584 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6589 for (i = 0; i < 64; i++) {
6590 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6591 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6593 if (CHIP_IS_E1H(bp)) {
6594 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6595 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6600 /* soft reset pulse */
6601 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6602 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6605 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6608 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6609 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6610 if (!CHIP_REV_IS_SLOW(bp)) {
6611 /* enable hw interrupt from doorbell Q */
6612 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6615 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6616 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6617 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6620 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6622 if (CHIP_IS_E1H(bp))
6623 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6625 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6626 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6627 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6628 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6630 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6631 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6632 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6633 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6635 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6636 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6637 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6638 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6641 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6643 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6646 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6647 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6648 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6650 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6651 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6652 REG_WR(bp, i, random32());
6653 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6655 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6656 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6657 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6658 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6659 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6660 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6661 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6662 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6663 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6664 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6666 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6668 if (sizeof(union cdu_context) != 1024)
6669 /* we currently assume that a context is 1024 bytes */
6670 dev_alert(&bp->pdev->dev, "please adjust the size "
6671 "of cdu_context(%ld)\n",
6672 (long)sizeof(union cdu_context));
6674 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6675 val = (4 << 24) + (0 << 12) + 1024;
6676 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6678 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6679 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6680 /* enable context validation interrupt from CFC */
6681 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6683 /* set the thresholds to prevent CFC/CDU race */
6684 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6686 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6687 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6689 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6690 /* Reset PCIE errors for debug */
6691 REG_WR(bp, 0x2814, 0xffffffff);
6692 REG_WR(bp, 0x3820, 0xffffffff);
6694 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6695 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6696 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6697 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6699 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6700 if (CHIP_IS_E1H(bp)) {
6701 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6702 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6705 if (CHIP_REV_IS_SLOW(bp))
6708 /* finish CFC init */
6709 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6711 BNX2X_ERR("CFC LL_INIT failed\n");
6714 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6716 BNX2X_ERR("CFC AC_INIT failed\n");
6719 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6721 BNX2X_ERR("CFC CAM_INIT failed\n");
6724 REG_WR(bp, CFC_REG_DEBUG0, 0);
6726 /* read NIG statistic
6727 to see if this is our first up since powerup */
6728 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6729 val = *bnx2x_sp(bp, wb_data[0]);
6731 /* do internal memory self test */
6732 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6733 BNX2X_ERR("internal mem self test failed\n");
6737 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6741 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6742 bp->port.need_hw_lock = 1;
6749 bnx2x_setup_fan_failure_detection(bp);
6751 /* clear PXP2 attentions */
6752 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6754 enable_blocks_attention(bp);
6755 if (CHIP_PARITY_SUPPORTED(bp))
6756 enable_blocks_parity(bp);
6758 if (!BP_NOMCP(bp)) {
6759 bnx2x_acquire_phy_lock(bp);
6760 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6761 bnx2x_release_phy_lock(bp);
6763 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6768 static int bnx2x_init_port(struct bnx2x *bp)
6770 int port = BP_PORT(bp);
6771 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6775 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6777 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6779 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6780 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6782 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6783 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6784 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6785 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6788 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6790 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6791 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6792 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6795 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6797 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6798 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6799 /* no pause for emulation and FPGA */
6804 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6805 else if (bp->dev->mtu > 4096) {
6806 if (bp->flags & ONE_PORT_FLAG)
6810 /* (24*1024 + val*4)/256 */
6811 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6814 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6815 high = low + 56; /* 14*1024/256 */
6817 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6818 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6821 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6823 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6824 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6825 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6826 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6828 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6829 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6830 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6831 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6833 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6834 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6836 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6838 /* configure PBF to work without PAUSE mtu 9000 */
6839 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6841 /* update threshold */
6842 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6843 /* update init credit */
6844 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6847 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6849 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6852 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6854 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6855 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6857 if (CHIP_IS_E1(bp)) {
6858 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6859 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6861 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6863 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6864 /* init aeu_mask_attn_func_0/1:
6865 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6866 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6867 * bits 4-7 are used for "per vn group attention" */
6868 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6869 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6871 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6872 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6873 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6874 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6875 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6877 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6879 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6881 if (CHIP_IS_E1H(bp)) {
6882 /* 0x2 disable e1hov, 0x1 enable */
6883 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6884 (IS_E1HMF(bp) ? 0x1 : 0x2));
6887 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6888 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6889 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6893 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6894 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6896 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6897 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6899 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6901 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6902 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6904 /* The GPIO should be swapped if the swap register is
6906 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6907 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6909 /* Select function upon port-swap configuration */
6911 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6912 aeu_gpio_mask = (swap_val && swap_override) ?
6913 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6914 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6916 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6917 aeu_gpio_mask = (swap_val && swap_override) ?
6918 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6919 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6921 val = REG_RD(bp, offset);
6922 /* add GPIO3 to group */
6923 val |= aeu_gpio_mask;
6924 REG_WR(bp, offset, val);
6928 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6929 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6930 /* add SPIO 5 to group 0 */
6932 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6933 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6934 val = REG_RD(bp, reg_addr);
6935 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6936 REG_WR(bp, reg_addr, val);
6944 bnx2x__link_reset(bp);
6949 #define ILT_PER_FUNC (768/2)
6950 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6951 /* the phys address is shifted right 12 bits and has an added
6952 1=valid bit added to the 53rd bit
6953 then since this is a wide register(TM)
6954 we split it into two 32 bit writes
6956 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6957 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6958 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6959 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6962 #define CNIC_ILT_LINES 127
6963 #define CNIC_CTX_PER_ILT 16
6965 #define CNIC_ILT_LINES 0
6968 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6972 if (CHIP_IS_E1H(bp))
6973 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6975 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6977 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6980 static int bnx2x_init_func(struct bnx2x *bp)
6982 int port = BP_PORT(bp);
6983 int func = BP_FUNC(bp);
6987 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6989 /* set MSI reconfigure capability */
6990 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6991 val = REG_RD(bp, addr);
6992 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6993 REG_WR(bp, addr, val);
6995 i = FUNC_ILT_BASE(func);
6997 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6998 if (CHIP_IS_E1H(bp)) {
6999 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
7000 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
7002 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
7003 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
7006 i += 1 + CNIC_ILT_LINES;
7007 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
7009 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
7011 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
7012 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
7016 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7018 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7020 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7021 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7025 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7027 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7029 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7030 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7033 /* tell the searcher where the T2 table is */
7034 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7036 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7037 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7039 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7040 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7041 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7043 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7046 if (CHIP_IS_E1H(bp)) {
7047 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7048 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7049 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7050 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7051 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7052 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7053 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7054 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7055 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7057 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7058 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7061 /* HC init per function */
7062 if (CHIP_IS_E1H(bp)) {
7063 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7065 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7066 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7068 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7070 /* Reset PCIE errors for debug */
7071 REG_WR(bp, 0x2114, 0xffffffff);
7072 REG_WR(bp, 0x2120, 0xffffffff);
7077 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7081 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7082 BP_FUNC(bp), load_code);
7085 mutex_init(&bp->dmae_mutex);
7086 rc = bnx2x_gunzip_init(bp);
7090 switch (load_code) {
7091 case FW_MSG_CODE_DRV_LOAD_COMMON:
7092 rc = bnx2x_init_common(bp);
7097 case FW_MSG_CODE_DRV_LOAD_PORT:
7099 rc = bnx2x_init_port(bp);
7104 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7106 rc = bnx2x_init_func(bp);
7112 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7116 if (!BP_NOMCP(bp)) {
7117 int func = BP_FUNC(bp);
7119 bp->fw_drv_pulse_wr_seq =
7120 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7121 DRV_PULSE_SEQ_MASK);
7122 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7125 /* this needs to be done before gunzip end */
7126 bnx2x_zero_def_sb(bp);
7127 for_each_queue(bp, i)
7128 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7130 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7134 bnx2x_gunzip_end(bp);
7139 static void bnx2x_free_mem(struct bnx2x *bp)
7142 #define BNX2X_PCI_FREE(x, y, size) \
7145 dma_free_coherent(&bp->pdev->dev, size, x, y); \
7151 #define BNX2X_FREE(x) \
7163 for_each_queue(bp, i) {
7166 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7167 bnx2x_fp(bp, i, status_blk_mapping),
7168 sizeof(struct host_status_block));
7171 for_each_queue(bp, i) {
7173 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7174 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7175 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7176 bnx2x_fp(bp, i, rx_desc_mapping),
7177 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7179 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7180 bnx2x_fp(bp, i, rx_comp_mapping),
7181 sizeof(struct eth_fast_path_rx_cqe) *
7185 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7186 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7187 bnx2x_fp(bp, i, rx_sge_mapping),
7188 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7191 for_each_queue(bp, i) {
7193 /* fastpath tx rings: tx_buf tx_desc */
7194 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7195 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7196 bnx2x_fp(bp, i, tx_desc_mapping),
7197 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7199 /* end of fastpath */
7201 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7202 sizeof(struct host_def_status_block));
7204 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7205 sizeof(struct bnx2x_slowpath));
7208 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7209 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7210 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7211 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7212 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7213 sizeof(struct host_status_block));
7215 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7217 #undef BNX2X_PCI_FREE
7221 static int bnx2x_alloc_mem(struct bnx2x *bp)
7224 #define BNX2X_PCI_ALLOC(x, y, size) \
7226 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7228 goto alloc_mem_err; \
7229 memset(x, 0, size); \
7232 #define BNX2X_ALLOC(x, size) \
7234 x = vmalloc(size); \
7236 goto alloc_mem_err; \
7237 memset(x, 0, size); \
7244 for_each_queue(bp, i) {
7245 bnx2x_fp(bp, i, bp) = bp;
7248 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7249 &bnx2x_fp(bp, i, status_blk_mapping),
7250 sizeof(struct host_status_block));
7253 for_each_queue(bp, i) {
7255 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7256 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7257 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7258 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7259 &bnx2x_fp(bp, i, rx_desc_mapping),
7260 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7262 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7263 &bnx2x_fp(bp, i, rx_comp_mapping),
7264 sizeof(struct eth_fast_path_rx_cqe) *
7268 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7269 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7270 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7271 &bnx2x_fp(bp, i, rx_sge_mapping),
7272 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7275 for_each_queue(bp, i) {
7277 /* fastpath tx rings: tx_buf tx_desc */
7278 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7279 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7281 &bnx2x_fp(bp, i, tx_desc_mapping),
7282 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7284 /* end of fastpath */
7286 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7287 sizeof(struct host_def_status_block));
7289 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7290 sizeof(struct bnx2x_slowpath));
7293 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7295 /* allocate searcher T2 table
7296 we allocate 1/4 of alloc num for T2
7297 (which is not entered into the ILT) */
7298 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7300 /* Initialize T2 (for 1024 connections) */
7301 for (i = 0; i < 16*1024; i += 64)
7302 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7304 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7305 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7307 /* QM queues (128*MAX_CONN) */
7308 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7310 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7311 sizeof(struct host_status_block));
7314 /* Slow path ring */
7315 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7323 #undef BNX2X_PCI_ALLOC
7327 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7331 for_each_queue(bp, i) {
7332 struct bnx2x_fastpath *fp = &bp->fp[i];
7334 u16 bd_cons = fp->tx_bd_cons;
7335 u16 sw_prod = fp->tx_pkt_prod;
7336 u16 sw_cons = fp->tx_pkt_cons;
7338 while (sw_cons != sw_prod) {
7339 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7345 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7349 for_each_queue(bp, j) {
7350 struct bnx2x_fastpath *fp = &bp->fp[j];
7352 for (i = 0; i < NUM_RX_BD; i++) {
7353 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7354 struct sk_buff *skb = rx_buf->skb;
7359 dma_unmap_single(&bp->pdev->dev,
7360 dma_unmap_addr(rx_buf, mapping),
7361 bp->rx_buf_size, DMA_FROM_DEVICE);
7366 if (!fp->disable_tpa)
7367 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7368 ETH_MAX_AGGREGATION_QUEUES_E1 :
7369 ETH_MAX_AGGREGATION_QUEUES_E1H);
7373 static void bnx2x_free_skbs(struct bnx2x *bp)
7375 bnx2x_free_tx_skbs(bp);
7376 bnx2x_free_rx_skbs(bp);
7379 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7383 free_irq(bp->msix_table[0].vector, bp->dev);
7384 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7385 bp->msix_table[0].vector);
7390 for_each_queue(bp, i) {
7391 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
7392 "state %x\n", i, bp->msix_table[i + offset].vector,
7393 bnx2x_fp(bp, i, state));
7395 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7399 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7401 if (bp->flags & USING_MSIX_FLAG) {
7403 bnx2x_free_msix_irqs(bp);
7404 pci_disable_msix(bp->pdev);
7405 bp->flags &= ~USING_MSIX_FLAG;
7407 } else if (bp->flags & USING_MSI_FLAG) {
7409 free_irq(bp->pdev->irq, bp->dev);
7410 pci_disable_msi(bp->pdev);
7411 bp->flags &= ~USING_MSI_FLAG;
7413 } else if (!disable_only)
7414 free_irq(bp->pdev->irq, bp->dev);
7417 static int bnx2x_enable_msix(struct bnx2x *bp)
7419 int i, rc, offset = 1;
7422 bp->msix_table[0].entry = igu_vec;
7423 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7426 igu_vec = BP_L_ID(bp) + offset;
7427 bp->msix_table[1].entry = igu_vec;
7428 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7431 for_each_queue(bp, i) {
7432 igu_vec = BP_L_ID(bp) + offset + i;
7433 bp->msix_table[i + offset].entry = igu_vec;
7434 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7435 "(fastpath #%u)\n", i + offset, igu_vec, i);
7438 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7439 BNX2X_NUM_QUEUES(bp) + offset);
7442 * reconfigure number of tx/rx queues according to available
7445 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7446 /* vectors available for FP */
7447 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7450 "Trying to use less MSI-X vectors: %d\n", rc);
7452 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7456 "MSI-X is not attainable rc %d\n", rc);
7460 bp->num_queues = min(bp->num_queues, fp_vec);
7462 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7465 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7469 bp->flags |= USING_MSIX_FLAG;
7474 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7476 int i, rc, offset = 1;
7478 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7479 bp->dev->name, bp->dev);
7481 BNX2X_ERR("request sp irq failed\n");
7488 for_each_queue(bp, i) {
7489 struct bnx2x_fastpath *fp = &bp->fp[i];
7490 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7493 rc = request_irq(bp->msix_table[i + offset].vector,
7494 bnx2x_msix_fp_int, 0, fp->name, fp);
7496 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7497 bnx2x_free_msix_irqs(bp);
7501 fp->state = BNX2X_FP_STATE_IRQ;
7504 i = BNX2X_NUM_QUEUES(bp);
7505 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7507 bp->msix_table[0].vector,
7508 0, bp->msix_table[offset].vector,
7509 i - 1, bp->msix_table[offset + i - 1].vector);
7514 static int bnx2x_enable_msi(struct bnx2x *bp)
7518 rc = pci_enable_msi(bp->pdev);
7520 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7523 bp->flags |= USING_MSI_FLAG;
7528 static int bnx2x_req_irq(struct bnx2x *bp)
7530 unsigned long flags;
7533 if (bp->flags & USING_MSI_FLAG)
7536 flags = IRQF_SHARED;
7538 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7539 bp->dev->name, bp->dev);
7541 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7546 static void bnx2x_napi_enable(struct bnx2x *bp)
7550 for_each_queue(bp, i)
7551 napi_enable(&bnx2x_fp(bp, i, napi));
7554 static void bnx2x_napi_disable(struct bnx2x *bp)
7558 for_each_queue(bp, i)
7559 napi_disable(&bnx2x_fp(bp, i, napi));
7562 static void bnx2x_netif_start(struct bnx2x *bp)
7566 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7567 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7570 if (netif_running(bp->dev)) {
7571 bnx2x_napi_enable(bp);
7572 bnx2x_int_enable(bp);
7573 if (bp->state == BNX2X_STATE_OPEN)
7574 netif_tx_wake_all_queues(bp->dev);
7579 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7581 bnx2x_int_disable_sync(bp, disable_hw);
7582 bnx2x_napi_disable(bp);
7583 netif_tx_disable(bp->dev);
7587 * Init service functions
7591 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7593 * @param bp driver descriptor
7594 * @param set set or clear an entry (1 or 0)
7595 * @param mac pointer to a buffer containing a MAC
7596 * @param cl_bit_vec bit vector of clients to register a MAC for
7597 * @param cam_offset offset in a CAM to use
7598 * @param with_bcast set broadcast MAC as well
7600 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7601 u32 cl_bit_vec, u8 cam_offset,
7604 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7605 int port = BP_PORT(bp);
7608 * unicasts 0-31:port0 32-63:port1
7609 * multicast 64-127:port0 128-191:port1
7611 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7612 config->hdr.offset = cam_offset;
7613 config->hdr.client_id = 0xff;
7614 config->hdr.reserved1 = 0;
7617 config->config_table[0].cam_entry.msb_mac_addr =
7618 swab16(*(u16 *)&mac[0]);
7619 config->config_table[0].cam_entry.middle_mac_addr =
7620 swab16(*(u16 *)&mac[2]);
7621 config->config_table[0].cam_entry.lsb_mac_addr =
7622 swab16(*(u16 *)&mac[4]);
7623 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7625 config->config_table[0].target_table_entry.flags = 0;
7627 CAM_INVALIDATE(config->config_table[0]);
7628 config->config_table[0].target_table_entry.clients_bit_vector =
7629 cpu_to_le32(cl_bit_vec);
7630 config->config_table[0].target_table_entry.vlan_id = 0;
7632 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7633 (set ? "setting" : "clearing"),
7634 config->config_table[0].cam_entry.msb_mac_addr,
7635 config->config_table[0].cam_entry.middle_mac_addr,
7636 config->config_table[0].cam_entry.lsb_mac_addr);
7640 config->config_table[1].cam_entry.msb_mac_addr =
7641 cpu_to_le16(0xffff);
7642 config->config_table[1].cam_entry.middle_mac_addr =
7643 cpu_to_le16(0xffff);
7644 config->config_table[1].cam_entry.lsb_mac_addr =
7645 cpu_to_le16(0xffff);
7646 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7648 config->config_table[1].target_table_entry.flags =
7649 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7651 CAM_INVALIDATE(config->config_table[1]);
7652 config->config_table[1].target_table_entry.clients_bit_vector =
7653 cpu_to_le32(cl_bit_vec);
7654 config->config_table[1].target_table_entry.vlan_id = 0;
7657 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7658 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7659 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7663 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7665 * @param bp driver descriptor
7666 * @param set set or clear an entry (1 or 0)
7667 * @param mac pointer to a buffer containing a MAC
7668 * @param cl_bit_vec bit vector of clients to register a MAC for
7669 * @param cam_offset offset in a CAM to use
7671 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7672 u32 cl_bit_vec, u8 cam_offset)
7674 struct mac_configuration_cmd_e1h *config =
7675 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7677 config->hdr.length = 1;
7678 config->hdr.offset = cam_offset;
7679 config->hdr.client_id = 0xff;
7680 config->hdr.reserved1 = 0;
7683 config->config_table[0].msb_mac_addr =
7684 swab16(*(u16 *)&mac[0]);
7685 config->config_table[0].middle_mac_addr =
7686 swab16(*(u16 *)&mac[2]);
7687 config->config_table[0].lsb_mac_addr =
7688 swab16(*(u16 *)&mac[4]);
7689 config->config_table[0].clients_bit_vector =
7690 cpu_to_le32(cl_bit_vec);
7691 config->config_table[0].vlan_id = 0;
7692 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7694 config->config_table[0].flags = BP_PORT(bp);
7696 config->config_table[0].flags =
7697 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7699 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7700 (set ? "setting" : "clearing"),
7701 config->config_table[0].msb_mac_addr,
7702 config->config_table[0].middle_mac_addr,
7703 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7705 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7706 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7707 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7710 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7711 int *state_p, int poll)
7713 /* can take a while if any port is running */
7716 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7717 poll ? "polling" : "waiting", state, idx);
7722 bnx2x_rx_int(bp->fp, 10);
7723 /* if index is different from 0
7724 * the reply for some commands will
7725 * be on the non default queue
7728 bnx2x_rx_int(&bp->fp[idx], 10);
7731 mb(); /* state is changed by bnx2x_sp_event() */
7732 if (*state_p == state) {
7733 #ifdef BNX2X_STOP_ON_ERROR
7734 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7746 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7747 poll ? "polling" : "waiting", state, idx);
7748 #ifdef BNX2X_STOP_ON_ERROR
7755 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7757 bp->set_mac_pending++;
7760 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7761 (1 << bp->fp->cl_id), BP_FUNC(bp));
7763 /* Wait for a completion */
7764 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7767 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7769 bp->set_mac_pending++;
7772 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7773 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7776 /* Wait for a completion */
7777 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7782 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7783 * MAC(s). This function will wait until the ramdord completion
7786 * @param bp driver handle
7787 * @param set set or clear the CAM entry
7789 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7791 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7793 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7795 bp->set_mac_pending++;
7798 /* Send a SET_MAC ramrod */
7800 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7801 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7804 /* CAM allocation for E1H
7805 * unicasts: by func number
7806 * multicast: 20+FUNC*20, 20 each
7808 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7809 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7811 /* Wait for a completion when setting */
7812 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7818 static int bnx2x_setup_leading(struct bnx2x *bp)
7822 /* reset IGU state */
7823 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7826 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7828 /* Wait for completion */
7829 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7834 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7836 struct bnx2x_fastpath *fp = &bp->fp[index];
7838 /* reset IGU state */
7839 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7842 fp->state = BNX2X_FP_STATE_OPENING;
7843 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7846 /* Wait for completion */
7847 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7851 static int bnx2x_poll(struct napi_struct *napi, int budget);
7853 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7856 switch (bp->multi_mode) {
7857 case ETH_RSS_MODE_DISABLED:
7861 case ETH_RSS_MODE_REGULAR:
7863 bp->num_queues = min_t(u32, num_queues,
7864 BNX2X_MAX_QUEUES(bp));
7866 bp->num_queues = min_t(u32, num_online_cpus(),
7867 BNX2X_MAX_QUEUES(bp));
7877 static int bnx2x_set_num_queues(struct bnx2x *bp)
7881 switch (bp->int_mode) {
7885 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7888 /* Set number of queues according to bp->multi_mode value */
7889 bnx2x_set_num_queues_msix(bp);
7891 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7894 /* if we can't use MSI-X we only need one fp,
7895 * so try to enable MSI-X with the requested number of fp's
7896 * and fallback to MSI or legacy INTx with one fp
7898 rc = bnx2x_enable_msix(bp);
7900 /* failed to enable MSI-X */
7904 bp->dev->real_num_tx_queues = bp->num_queues;
7909 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7910 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7913 /* must be called with rtnl_lock */
7914 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7919 #ifdef BNX2X_STOP_ON_ERROR
7920 if (unlikely(bp->panic))
7924 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7926 rc = bnx2x_set_num_queues(bp);
7928 if (bnx2x_alloc_mem(bp)) {
7929 bnx2x_free_irq(bp, true);
7933 for_each_queue(bp, i)
7934 bnx2x_fp(bp, i, disable_tpa) =
7935 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7937 for_each_queue(bp, i)
7938 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7941 bnx2x_napi_enable(bp);
7943 if (bp->flags & USING_MSIX_FLAG) {
7944 rc = bnx2x_req_msix_irqs(bp);
7946 bnx2x_free_irq(bp, true);
7950 /* Fall to INTx if failed to enable MSI-X due to lack of
7951 memory (in bnx2x_set_num_queues()) */
7952 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7953 bnx2x_enable_msi(bp);
7955 rc = bnx2x_req_irq(bp);
7957 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7958 bnx2x_free_irq(bp, true);
7961 if (bp->flags & USING_MSI_FLAG) {
7962 bp->dev->irq = bp->pdev->irq;
7963 netdev_info(bp->dev, "using MSI IRQ %d\n",
7968 /* Send LOAD_REQUEST command to MCP
7969 Returns the type of LOAD command:
7970 if it is the first port to be initialized
7971 common blocks should be initialized, otherwise - not
7973 if (!BP_NOMCP(bp)) {
7974 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7976 BNX2X_ERR("MCP response failure, aborting\n");
7980 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7981 rc = -EBUSY; /* other port in diagnostic mode */
7986 int port = BP_PORT(bp);
7988 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7989 load_count[0], load_count[1], load_count[2]);
7991 load_count[1 + port]++;
7992 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7993 load_count[0], load_count[1], load_count[2]);
7994 if (load_count[0] == 1)
7995 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7996 else if (load_count[1 + port] == 1)
7997 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7999 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
8002 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
8003 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
8007 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
8010 rc = bnx2x_init_hw(bp, load_code);
8012 BNX2X_ERR("HW init failed, aborting\n");
8013 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8014 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8015 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8019 /* Setup NIC internals and enable interrupts */
8020 bnx2x_nic_init(bp, load_code);
8022 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8023 (bp->common.shmem2_base))
8024 SHMEM2_WR(bp, dcc_support,
8025 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8026 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8028 /* Send LOAD_DONE command to MCP */
8029 if (!BP_NOMCP(bp)) {
8030 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8032 BNX2X_ERR("MCP response failure, aborting\n");
8038 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8040 rc = bnx2x_setup_leading(bp);
8042 BNX2X_ERR("Setup leading failed!\n");
8043 #ifndef BNX2X_STOP_ON_ERROR
8051 if (CHIP_IS_E1H(bp))
8052 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8053 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8054 bp->flags |= MF_FUNC_DIS;
8057 if (bp->state == BNX2X_STATE_OPEN) {
8059 /* Enable Timer scan */
8060 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8062 for_each_nondefault_queue(bp, i) {
8063 rc = bnx2x_setup_multi(bp, i);
8073 bnx2x_set_eth_mac_addr_e1(bp, 1);
8075 bnx2x_set_eth_mac_addr_e1h(bp, 1);
8077 /* Set iSCSI L2 MAC */
8078 mutex_lock(&bp->cnic_mutex);
8079 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8080 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8081 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8082 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8085 mutex_unlock(&bp->cnic_mutex);
8090 bnx2x_initial_phy_init(bp, load_mode);
8092 /* Start fast path */
8093 switch (load_mode) {
8095 if (bp->state == BNX2X_STATE_OPEN) {
8096 /* Tx queue should be only reenabled */
8097 netif_tx_wake_all_queues(bp->dev);
8099 /* Initialize the receive filter. */
8100 bnx2x_set_rx_mode(bp->dev);
8104 netif_tx_start_all_queues(bp->dev);
8105 if (bp->state != BNX2X_STATE_OPEN)
8106 netif_tx_disable(bp->dev);
8107 /* Initialize the receive filter. */
8108 bnx2x_set_rx_mode(bp->dev);
8112 /* Initialize the receive filter. */
8113 bnx2x_set_rx_mode(bp->dev);
8114 bp->state = BNX2X_STATE_DIAG;
8122 bnx2x__link_status_update(bp);
8124 /* start the timer */
8125 mod_timer(&bp->timer, jiffies + bp->current_interval);
8128 bnx2x_setup_cnic_irq_info(bp);
8129 if (bp->state == BNX2X_STATE_OPEN)
8130 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8132 bnx2x_inc_load_cnt(bp);
8138 /* Disable Timer scan */
8139 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8142 bnx2x_int_disable_sync(bp, 1);
8143 if (!BP_NOMCP(bp)) {
8144 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8145 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8148 /* Free SKBs, SGEs, TPA pool and driver internals */
8149 bnx2x_free_skbs(bp);
8150 for_each_queue(bp, i)
8151 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8154 bnx2x_free_irq(bp, false);
8156 bnx2x_napi_disable(bp);
8157 for_each_queue(bp, i)
8158 netif_napi_del(&bnx2x_fp(bp, i, napi));
8164 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8166 struct bnx2x_fastpath *fp = &bp->fp[index];
8169 /* halt the connection */
8170 fp->state = BNX2X_FP_STATE_HALTING;
8171 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8173 /* Wait for completion */
8174 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8176 if (rc) /* timeout */
8179 /* delete cfc entry */
8180 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8182 /* Wait for completion */
8183 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8188 static int bnx2x_stop_leading(struct bnx2x *bp)
8190 __le16 dsb_sp_prod_idx;
8191 /* if the other port is handling traffic,
8192 this can take a lot of time */
8198 /* Send HALT ramrod */
8199 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8200 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8202 /* Wait for completion */
8203 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8204 &(bp->fp[0].state), 1);
8205 if (rc) /* timeout */
8208 dsb_sp_prod_idx = *bp->dsb_sp_prod;
8210 /* Send PORT_DELETE ramrod */
8211 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8213 /* Wait for completion to arrive on default status block
8214 we are going to reset the chip anyway
8215 so there is not much to do if this times out
8217 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8219 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8220 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8221 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8222 #ifdef BNX2X_STOP_ON_ERROR
8230 rmb(); /* Refresh the dsb_sp_prod */
8232 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8233 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8238 static void bnx2x_reset_func(struct bnx2x *bp)
8240 int port = BP_PORT(bp);
8241 int func = BP_FUNC(bp);
8245 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8246 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8249 /* Disable Timer scan */
8250 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8252 * Wait for at least 10ms and up to 2 second for the timers scan to
8255 for (i = 0; i < 200; i++) {
8257 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8262 base = FUNC_ILT_BASE(func);
8263 for (i = base; i < base + ILT_PER_FUNC; i++)
8264 bnx2x_ilt_wr(bp, i, 0);
8267 static void bnx2x_reset_port(struct bnx2x *bp)
8269 int port = BP_PORT(bp);
8272 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8274 /* Do not rcv packets to BRB */
8275 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8276 /* Do not direct rcv packets that are not for MCP to the BRB */
8277 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8278 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8281 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8284 /* Check for BRB port occupancy */
8285 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8287 DP(NETIF_MSG_IFDOWN,
8288 "BRB1 is not empty %d blocks are occupied\n", val);
8290 /* TODO: Close Doorbell port? */
8293 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8295 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8296 BP_FUNC(bp), reset_code);
8298 switch (reset_code) {
8299 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8300 bnx2x_reset_port(bp);
8301 bnx2x_reset_func(bp);
8302 bnx2x_reset_common(bp);
8305 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8306 bnx2x_reset_port(bp);
8307 bnx2x_reset_func(bp);
8310 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8311 bnx2x_reset_func(bp);
8315 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8320 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8322 int port = BP_PORT(bp);
8326 /* Wait until tx fastpath tasks complete */
8327 for_each_queue(bp, i) {
8328 struct bnx2x_fastpath *fp = &bp->fp[i];
8331 while (bnx2x_has_tx_work_unload(fp)) {
8335 BNX2X_ERR("timeout waiting for queue[%d]\n",
8337 #ifdef BNX2X_STOP_ON_ERROR
8348 /* Give HW time to discard old tx messages */
8351 if (CHIP_IS_E1(bp)) {
8352 struct mac_configuration_cmd *config =
8353 bnx2x_sp(bp, mcast_config);
8355 bnx2x_set_eth_mac_addr_e1(bp, 0);
8357 for (i = 0; i < config->hdr.length; i++)
8358 CAM_INVALIDATE(config->config_table[i]);
8360 config->hdr.length = i;
8361 if (CHIP_REV_IS_SLOW(bp))
8362 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8364 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8365 config->hdr.client_id = bp->fp->cl_id;
8366 config->hdr.reserved1 = 0;
8368 bp->set_mac_pending++;
8371 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8372 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8373 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8376 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8378 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8380 for (i = 0; i < MC_HASH_SIZE; i++)
8381 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8383 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8386 /* Clear iSCSI L2 MAC */
8387 mutex_lock(&bp->cnic_mutex);
8388 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8389 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8390 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8392 mutex_unlock(&bp->cnic_mutex);
8395 if (unload_mode == UNLOAD_NORMAL)
8396 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8398 else if (bp->flags & NO_WOL_FLAG)
8399 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8402 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8403 u8 *mac_addr = bp->dev->dev_addr;
8405 /* The mac address is written to entries 1-4 to
8406 preserve entry 0 which is used by the PMF */
8407 u8 entry = (BP_E1HVN(bp) + 1)*8;
8409 val = (mac_addr[0] << 8) | mac_addr[1];
8410 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8412 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8413 (mac_addr[4] << 8) | mac_addr[5];
8414 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8416 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8419 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8421 /* Close multi and leading connections
8422 Completions for ramrods are collected in a synchronous way */
8423 for_each_nondefault_queue(bp, i)
8424 if (bnx2x_stop_multi(bp, i))
8427 rc = bnx2x_stop_leading(bp);
8429 BNX2X_ERR("Stop leading failed!\n");
8430 #ifdef BNX2X_STOP_ON_ERROR
8439 reset_code = bnx2x_fw_command(bp, reset_code);
8441 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8442 load_count[0], load_count[1], load_count[2]);
8444 load_count[1 + port]--;
8445 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8446 load_count[0], load_count[1], load_count[2]);
8447 if (load_count[0] == 0)
8448 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8449 else if (load_count[1 + port] == 0)
8450 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8452 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8455 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8456 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8457 bnx2x__link_reset(bp);
8459 /* Reset the chip */
8460 bnx2x_reset_chip(bp, reset_code);
8462 /* Report UNLOAD_DONE to MCP */
8464 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8468 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8472 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8474 if (CHIP_IS_E1(bp)) {
8475 int port = BP_PORT(bp);
8476 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8477 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8479 val = REG_RD(bp, addr);
8481 REG_WR(bp, addr, val);
8482 } else if (CHIP_IS_E1H(bp)) {
8483 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8484 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8485 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8486 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8490 /* must be called with rtnl_lock */
8491 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8495 if (bp->state == BNX2X_STATE_CLOSED) {
8496 /* Interface has been removed - nothing to recover */
8497 bp->recovery_state = BNX2X_RECOVERY_DONE;
8499 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8506 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8508 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8510 /* Set "drop all" */
8511 bp->rx_mode = BNX2X_RX_MODE_NONE;
8512 bnx2x_set_storm_rx_mode(bp);
8514 /* Disable HW interrupts, NAPI and Tx */
8515 bnx2x_netif_stop(bp, 1);
8516 netif_carrier_off(bp->dev);
8518 del_timer_sync(&bp->timer);
8519 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8520 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8521 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8524 bnx2x_free_irq(bp, false);
8526 /* Cleanup the chip if needed */
8527 if (unload_mode != UNLOAD_RECOVERY)
8528 bnx2x_chip_cleanup(bp, unload_mode);
8532 /* Free SKBs, SGEs, TPA pool and driver internals */
8533 bnx2x_free_skbs(bp);
8534 for_each_queue(bp, i)
8535 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8536 for_each_queue(bp, i)
8537 netif_napi_del(&bnx2x_fp(bp, i, napi));
8540 bp->state = BNX2X_STATE_CLOSED;
8542 /* The last driver must disable a "close the gate" if there is no
8543 * parity attention or "process kill" pending.
8545 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8546 bnx2x_reset_is_done(bp))
8547 bnx2x_disable_close_the_gate(bp);
8549 /* Reset MCP mail box sequence if there is on going recovery */
8550 if (unload_mode == UNLOAD_RECOVERY)
8556 /* Close gates #2, #3 and #4: */
8557 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8561 /* Gates #2 and #4a are closed/opened for "not E1" only */
8562 if (!CHIP_IS_E1(bp)) {
8564 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8565 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8566 close ? (val | 0x1) : (val & (~(u32)1)));
8568 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8569 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8570 close ? (val | 0x1) : (val & (~(u32)1)));
8574 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8575 val = REG_RD(bp, addr);
8576 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8578 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8579 close ? "closing" : "opening");
8583 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8585 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8587 /* Do some magic... */
8588 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8589 *magic_val = val & SHARED_MF_CLP_MAGIC;
8590 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8593 /* Restore the value of the `magic' bit.
8595 * @param pdev Device handle.
8596 * @param magic_val Old value of the `magic' bit.
8598 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8600 /* Restore the `magic' bit value... */
8601 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8602 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8603 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8604 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8605 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8606 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8609 /* Prepares for MCP reset: takes care of CLP configurations.
8612 * @param magic_val Old value of 'magic' bit.
8614 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8617 u32 validity_offset;
8619 DP(NETIF_MSG_HW, "Starting\n");
8621 /* Set `magic' bit in order to save MF config */
8622 if (!CHIP_IS_E1(bp))
8623 bnx2x_clp_reset_prep(bp, magic_val);
8625 /* Get shmem offset */
8626 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8627 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8629 /* Clear validity map flags */
8631 REG_WR(bp, shmem + validity_offset, 0);
8634 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8635 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
8637 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8638 * depending on the HW type.
8642 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8644 /* special handling for emulation and FPGA,
8645 wait 10 times longer */
8646 if (CHIP_REV_IS_SLOW(bp))
8647 msleep(MCP_ONE_TIMEOUT*10);
8649 msleep(MCP_ONE_TIMEOUT);
8652 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8654 u32 shmem, cnt, validity_offset, val;
8659 /* Get shmem offset */
8660 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8662 BNX2X_ERR("Shmem 0 return failure\n");
8667 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8669 /* Wait for MCP to come up */
8670 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8671 /* TBD: its best to check validity map of last port.
8672 * currently checks on port 0.
8674 val = REG_RD(bp, shmem + validity_offset);
8675 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8676 shmem + validity_offset, val);
8678 /* check that shared memory is valid. */
8679 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8680 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8683 bnx2x_mcp_wait_one(bp);
8686 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8688 /* Check that shared memory is valid. This indicates that MCP is up. */
8689 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8690 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8691 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8697 /* Restore the `magic' bit value */
8698 if (!CHIP_IS_E1(bp))
8699 bnx2x_clp_reset_done(bp, magic_val);
8704 static void bnx2x_pxp_prep(struct bnx2x *bp)
8706 if (!CHIP_IS_E1(bp)) {
8707 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8708 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8709 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8715 * Reset the whole chip except for:
8717 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8720 * - MISC (including AEU)
8724 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8726 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8729 MISC_REGISTERS_RESET_REG_1_RST_HC |
8730 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8731 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8734 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8735 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8736 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8737 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8738 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8739 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8740 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8741 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8743 reset_mask1 = 0xffffffff;
8746 reset_mask2 = 0xffff;
8748 reset_mask2 = 0x1ffff;
8750 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8751 reset_mask1 & (~not_reset_mask1));
8752 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8753 reset_mask2 & (~not_reset_mask2));
8758 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8759 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8763 static int bnx2x_process_kill(struct bnx2x *bp)
8767 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8770 /* Empty the Tetris buffer, wait for 1s */
8772 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8773 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8774 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8775 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8776 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8777 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8778 ((port_is_idle_0 & 0x1) == 0x1) &&
8779 ((port_is_idle_1 & 0x1) == 0x1) &&
8780 (pgl_exp_rom2 == 0xffffffff))
8783 } while (cnt-- > 0);
8786 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8788 " outstanding read requests after 1s!\n");
8789 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8790 " port_is_idle_0=0x%08x,"
8791 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8792 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8799 /* Close gates #2, #3 and #4 */
8800 bnx2x_set_234_gates(bp, true);
8802 /* TBD: Indicate that "process kill" is in progress to MCP */
8804 /* Clear "unprepared" bit */
8805 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8808 /* Make sure all is written to the chip before the reset */
8811 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8812 * PSWHST, GRC and PSWRD Tetris buffer.
8816 /* Prepare to chip reset: */
8818 bnx2x_reset_mcp_prep(bp, &val);
8824 /* reset the chip */
8825 bnx2x_process_kill_chip_reset(bp);
8828 /* Recover after reset: */
8830 if (bnx2x_reset_mcp_comp(bp, val))
8836 /* Open the gates #2, #3 and #4 */
8837 bnx2x_set_234_gates(bp, false);
8839 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8840 * reset state, re-enable attentions. */
8845 static int bnx2x_leader_reset(struct bnx2x *bp)
8848 /* Try to recover after the failure */
8849 if (bnx2x_process_kill(bp)) {
8850 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8853 goto exit_leader_reset;
8856 /* Clear "reset is in progress" bit and update the driver state */
8857 bnx2x_set_reset_done(bp);
8858 bp->recovery_state = BNX2X_RECOVERY_DONE;
8862 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8867 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8869 /* Assumption: runs under rtnl lock. This together with the fact
8870 * that it's called only from bnx2x_reset_task() ensure that it
8871 * will never be called when netif_running(bp->dev) is false.
8873 static void bnx2x_parity_recover(struct bnx2x *bp)
8875 DP(NETIF_MSG_HW, "Handling parity\n");
8877 switch (bp->recovery_state) {
8878 case BNX2X_RECOVERY_INIT:
8879 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8880 /* Try to get a LEADER_LOCK HW lock */
8881 if (bnx2x_trylock_hw_lock(bp,
8882 HW_LOCK_RESOURCE_RESERVED_08))
8885 /* Stop the driver */
8886 /* If interface has been removed - break */
8887 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8890 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8891 /* Ensure "is_leader" and "recovery_state"
8892 * update values are seen on other CPUs
8897 case BNX2X_RECOVERY_WAIT:
8898 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8899 if (bp->is_leader) {
8900 u32 load_counter = bnx2x_get_load_cnt(bp);
8902 /* Wait until all other functions get
8905 schedule_delayed_work(&bp->reset_task,
8909 /* If all other functions got down -
8910 * try to bring the chip back to
8911 * normal. In any case it's an exit
8912 * point for a leader.
8914 if (bnx2x_leader_reset(bp) ||
8915 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8916 printk(KERN_ERR"%s: Recovery "
8917 "has failed. Power cycle is "
8918 "needed.\n", bp->dev->name);
8919 /* Disconnect this device */
8920 netif_device_detach(bp->dev);
8921 /* Block ifup for all function
8922 * of this ASIC until
8923 * "process kill" or power
8926 bnx2x_set_reset_in_progress(bp);
8927 /* Shut down the power */
8928 bnx2x_set_power_state(bp,
8935 } else { /* non-leader */
8936 if (!bnx2x_reset_is_done(bp)) {
8937 /* Try to get a LEADER_LOCK HW lock as
8938 * long as a former leader may have
8939 * been unloaded by the user or
8940 * released a leadership by another
8943 if (bnx2x_trylock_hw_lock(bp,
8944 HW_LOCK_RESOURCE_RESERVED_08)) {
8945 /* I'm a leader now! Restart a
8952 schedule_delayed_work(&bp->reset_task,
8956 } else { /* A leader has completed
8957 * the "process kill". It's an exit
8958 * point for a non-leader.
8960 bnx2x_nic_load(bp, LOAD_NORMAL);
8961 bp->recovery_state =
8962 BNX2X_RECOVERY_DONE;
8973 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8974 * scheduled on a general queue in order to prevent a dead lock.
8976 static void bnx2x_reset_task(struct work_struct *work)
8978 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8980 #ifdef BNX2X_STOP_ON_ERROR
8981 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8982 " so reset not done to allow debug dump,\n"
8983 KERN_ERR " you will need to reboot when done\n");
8989 if (!netif_running(bp->dev))
8990 goto reset_task_exit;
8992 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8993 bnx2x_parity_recover(bp);
8995 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8996 bnx2x_nic_load(bp, LOAD_NORMAL);
9003 /* end of nic load/unload */
9008 * Init service functions
9011 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
9014 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9015 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9016 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9017 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9018 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9019 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9020 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9021 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9023 BNX2X_ERR("Unsupported function index: %d\n", func);
9028 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9030 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9032 /* Flush all outstanding writes */
9035 /* Pretend to be function 0 */
9037 /* Flush the GRC transaction (in the chip) */
9038 new_val = REG_RD(bp, reg);
9040 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9045 /* From now we are in the "like-E1" mode */
9046 bnx2x_int_disable(bp);
9048 /* Flush all outstanding writes */
9051 /* Restore the original funtion settings */
9052 REG_WR(bp, reg, orig_func);
9053 new_val = REG_RD(bp, reg);
9054 if (new_val != orig_func) {
9055 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9056 orig_func, new_val);
9061 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9063 if (CHIP_IS_E1H(bp))
9064 bnx2x_undi_int_disable_e1h(bp, func);
9066 bnx2x_int_disable(bp);
9069 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9073 /* Check if there is any driver already loaded */
9074 val = REG_RD(bp, MISC_REG_UNPREPARED);
9076 /* Check if it is the UNDI driver
9077 * UNDI driver initializes CID offset for normal bell to 0x7
9079 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9080 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9082 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9084 int func = BP_FUNC(bp);
9088 /* clear the UNDI indication */
9089 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9091 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9093 /* try unload UNDI on port 0 */
9096 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9097 DRV_MSG_SEQ_NUMBER_MASK);
9098 reset_code = bnx2x_fw_command(bp, reset_code);
9100 /* if UNDI is loaded on the other port */
9101 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9103 /* send "DONE" for previous unload */
9104 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9106 /* unload UNDI on port 1 */
9109 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9110 DRV_MSG_SEQ_NUMBER_MASK);
9111 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9113 bnx2x_fw_command(bp, reset_code);
9116 /* now it's safe to release the lock */
9117 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9119 bnx2x_undi_int_disable(bp, func);
9121 /* close input traffic and wait for it */
9122 /* Do not rcv packets to BRB */
9124 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9125 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9126 /* Do not direct rcv packets that are not for MCP to
9129 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9130 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9133 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9134 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9137 /* save NIG port swap info */
9138 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9139 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9142 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9145 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9147 /* take the NIG out of reset and restore swap values */
9149 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9150 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9151 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9152 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9154 /* send unload done to the MCP */
9155 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9157 /* restore our func and fw_seq */
9160 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9161 DRV_MSG_SEQ_NUMBER_MASK);
9164 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9168 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9170 u32 val, val2, val3, val4, id;
9173 /* Get the chip revision id and number. */
9174 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9175 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9176 id = ((val & 0xffff) << 16);
9177 val = REG_RD(bp, MISC_REG_CHIP_REV);
9178 id |= ((val & 0xf) << 12);
9179 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9180 id |= ((val & 0xff) << 4);
9181 val = REG_RD(bp, MISC_REG_BOND_ID);
9183 bp->common.chip_id = id;
9184 bp->link_params.chip_id = bp->common.chip_id;
9185 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9187 val = (REG_RD(bp, 0x2874) & 0x55);
9188 if ((bp->common.chip_id & 0x1) ||
9189 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9190 bp->flags |= ONE_PORT_FLAG;
9191 BNX2X_DEV_INFO("single port device\n");
9194 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9195 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9196 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9197 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9198 bp->common.flash_size, bp->common.flash_size);
9200 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9201 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9202 bp->link_params.shmem_base = bp->common.shmem_base;
9203 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9204 bp->common.shmem_base, bp->common.shmem2_base);
9206 if (!bp->common.shmem_base ||
9207 (bp->common.shmem_base < 0xA0000) ||
9208 (bp->common.shmem_base >= 0xC0000)) {
9209 BNX2X_DEV_INFO("MCP not active\n");
9210 bp->flags |= NO_MCP_FLAG;
9214 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9215 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9216 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9217 BNX2X_ERROR("BAD MCP validity signature\n");
9219 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9220 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9222 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9223 SHARED_HW_CFG_LED_MODE_MASK) >>
9224 SHARED_HW_CFG_LED_MODE_SHIFT);
9226 bp->link_params.feature_config_flags = 0;
9227 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9228 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9229 bp->link_params.feature_config_flags |=
9230 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9232 bp->link_params.feature_config_flags &=
9233 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9235 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9236 bp->common.bc_ver = val;
9237 BNX2X_DEV_INFO("bc_ver %X\n", val);
9238 if (val < BNX2X_BC_VER) {
9239 /* for now only warn
9240 * later we might need to enforce this */
9241 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9242 "please upgrade BC\n", BNX2X_BC_VER, val);
9244 bp->link_params.feature_config_flags |=
9245 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9246 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9248 if (BP_E1HVN(bp) == 0) {
9249 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9250 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9252 /* no WOL capability for E1HVN != 0 */
9253 bp->flags |= NO_WOL_FLAG;
9255 BNX2X_DEV_INFO("%sWoL capable\n",
9256 (bp->flags & NO_WOL_FLAG) ? "not " : "");
9258 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9259 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9260 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9261 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9263 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9264 val, val2, val3, val4);
9267 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9270 int port = BP_PORT(bp);
9273 switch (switch_cfg) {
9275 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9278 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9279 switch (ext_phy_type) {
9280 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9281 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9284 bp->port.supported |= (SUPPORTED_10baseT_Half |
9285 SUPPORTED_10baseT_Full |
9286 SUPPORTED_100baseT_Half |
9287 SUPPORTED_100baseT_Full |
9288 SUPPORTED_1000baseT_Full |
9289 SUPPORTED_2500baseX_Full |
9294 SUPPORTED_Asym_Pause);
9297 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9298 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9301 bp->port.supported |= (SUPPORTED_10baseT_Half |
9302 SUPPORTED_10baseT_Full |
9303 SUPPORTED_100baseT_Half |
9304 SUPPORTED_100baseT_Full |
9305 SUPPORTED_1000baseT_Full |
9310 SUPPORTED_Asym_Pause);
9314 BNX2X_ERR("NVRAM config error. "
9315 "BAD SerDes ext_phy_config 0x%x\n",
9316 bp->link_params.ext_phy_config);
9320 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9322 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9325 case SWITCH_CFG_10G:
9326 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9329 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9330 switch (ext_phy_type) {
9331 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9332 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9335 bp->port.supported |= (SUPPORTED_10baseT_Half |
9336 SUPPORTED_10baseT_Full |
9337 SUPPORTED_100baseT_Half |
9338 SUPPORTED_100baseT_Full |
9339 SUPPORTED_1000baseT_Full |
9340 SUPPORTED_2500baseX_Full |
9341 SUPPORTED_10000baseT_Full |
9346 SUPPORTED_Asym_Pause);
9349 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9350 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9353 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9354 SUPPORTED_1000baseT_Full |
9358 SUPPORTED_Asym_Pause);
9361 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9362 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9365 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9366 SUPPORTED_2500baseX_Full |
9367 SUPPORTED_1000baseT_Full |
9371 SUPPORTED_Asym_Pause);
9374 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9375 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9378 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9381 SUPPORTED_Asym_Pause);
9384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9385 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9388 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9389 SUPPORTED_1000baseT_Full |
9392 SUPPORTED_Asym_Pause);
9395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9396 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9399 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9400 SUPPORTED_1000baseT_Full |
9404 SUPPORTED_Asym_Pause);
9407 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9408 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9411 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9412 SUPPORTED_1000baseT_Full |
9416 SUPPORTED_Asym_Pause);
9419 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9420 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9423 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9427 SUPPORTED_Asym_Pause);
9430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9431 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9434 bp->port.supported |= (SUPPORTED_10baseT_Half |
9435 SUPPORTED_10baseT_Full |
9436 SUPPORTED_100baseT_Half |
9437 SUPPORTED_100baseT_Full |
9438 SUPPORTED_1000baseT_Full |
9439 SUPPORTED_10000baseT_Full |
9443 SUPPORTED_Asym_Pause);
9446 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9447 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9448 bp->link_params.ext_phy_config);
9452 BNX2X_ERR("NVRAM config error. "
9453 "BAD XGXS ext_phy_config 0x%x\n",
9454 bp->link_params.ext_phy_config);
9458 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9460 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9465 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9466 bp->port.link_config);
9469 bp->link_params.phy_addr = bp->port.phy_addr;
9471 /* mask what we support according to speed_cap_mask */
9472 if (!(bp->link_params.speed_cap_mask &
9473 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9474 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9476 if (!(bp->link_params.speed_cap_mask &
9477 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9478 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9480 if (!(bp->link_params.speed_cap_mask &
9481 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9482 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9484 if (!(bp->link_params.speed_cap_mask &
9485 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9486 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9488 if (!(bp->link_params.speed_cap_mask &
9489 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9490 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9491 SUPPORTED_1000baseT_Full);
9493 if (!(bp->link_params.speed_cap_mask &
9494 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9495 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9497 if (!(bp->link_params.speed_cap_mask &
9498 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9499 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9501 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9504 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9506 bp->link_params.req_duplex = DUPLEX_FULL;
9508 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9509 case PORT_FEATURE_LINK_SPEED_AUTO:
9510 if (bp->port.supported & SUPPORTED_Autoneg) {
9511 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9512 bp->port.advertising = bp->port.supported;
9515 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9517 if ((ext_phy_type ==
9518 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9520 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9521 /* force 10G, no AN */
9522 bp->link_params.req_line_speed = SPEED_10000;
9523 bp->port.advertising =
9524 (ADVERTISED_10000baseT_Full |
9528 BNX2X_ERR("NVRAM config error. "
9529 "Invalid link_config 0x%x"
9530 " Autoneg not supported\n",
9531 bp->port.link_config);
9536 case PORT_FEATURE_LINK_SPEED_10M_FULL:
9537 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9538 bp->link_params.req_line_speed = SPEED_10;
9539 bp->port.advertising = (ADVERTISED_10baseT_Full |
9542 BNX2X_ERROR("NVRAM config error. "
9543 "Invalid link_config 0x%x"
9544 " speed_cap_mask 0x%x\n",
9545 bp->port.link_config,
9546 bp->link_params.speed_cap_mask);
9551 case PORT_FEATURE_LINK_SPEED_10M_HALF:
9552 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9553 bp->link_params.req_line_speed = SPEED_10;
9554 bp->link_params.req_duplex = DUPLEX_HALF;
9555 bp->port.advertising = (ADVERTISED_10baseT_Half |
9558 BNX2X_ERROR("NVRAM config error. "
9559 "Invalid link_config 0x%x"
9560 " speed_cap_mask 0x%x\n",
9561 bp->port.link_config,
9562 bp->link_params.speed_cap_mask);
9567 case PORT_FEATURE_LINK_SPEED_100M_FULL:
9568 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9569 bp->link_params.req_line_speed = SPEED_100;
9570 bp->port.advertising = (ADVERTISED_100baseT_Full |
9573 BNX2X_ERROR("NVRAM config error. "
9574 "Invalid link_config 0x%x"
9575 " speed_cap_mask 0x%x\n",
9576 bp->port.link_config,
9577 bp->link_params.speed_cap_mask);
9582 case PORT_FEATURE_LINK_SPEED_100M_HALF:
9583 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9584 bp->link_params.req_line_speed = SPEED_100;
9585 bp->link_params.req_duplex = DUPLEX_HALF;
9586 bp->port.advertising = (ADVERTISED_100baseT_Half |
9589 BNX2X_ERROR("NVRAM config error. "
9590 "Invalid link_config 0x%x"
9591 " speed_cap_mask 0x%x\n",
9592 bp->port.link_config,
9593 bp->link_params.speed_cap_mask);
9598 case PORT_FEATURE_LINK_SPEED_1G:
9599 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9600 bp->link_params.req_line_speed = SPEED_1000;
9601 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9604 BNX2X_ERROR("NVRAM config error. "
9605 "Invalid link_config 0x%x"
9606 " speed_cap_mask 0x%x\n",
9607 bp->port.link_config,
9608 bp->link_params.speed_cap_mask);
9613 case PORT_FEATURE_LINK_SPEED_2_5G:
9614 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9615 bp->link_params.req_line_speed = SPEED_2500;
9616 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9619 BNX2X_ERROR("NVRAM config error. "
9620 "Invalid link_config 0x%x"
9621 " speed_cap_mask 0x%x\n",
9622 bp->port.link_config,
9623 bp->link_params.speed_cap_mask);
9628 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9629 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9630 case PORT_FEATURE_LINK_SPEED_10G_KR:
9631 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9632 bp->link_params.req_line_speed = SPEED_10000;
9633 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9636 BNX2X_ERROR("NVRAM config error. "
9637 "Invalid link_config 0x%x"
9638 " speed_cap_mask 0x%x\n",
9639 bp->port.link_config,
9640 bp->link_params.speed_cap_mask);
9646 BNX2X_ERROR("NVRAM config error. "
9647 "BAD link speed link_config 0x%x\n",
9648 bp->port.link_config);
9649 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9650 bp->port.advertising = bp->port.supported;
9654 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9655 PORT_FEATURE_FLOW_CONTROL_MASK);
9656 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9657 !(bp->port.supported & SUPPORTED_Autoneg))
9658 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9660 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
9661 " advertising 0x%x\n",
9662 bp->link_params.req_line_speed,
9663 bp->link_params.req_duplex,
9664 bp->link_params.req_flow_ctrl, bp->port.advertising);
9667 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9669 mac_hi = cpu_to_be16(mac_hi);
9670 mac_lo = cpu_to_be32(mac_lo);
9671 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9672 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9675 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9677 int port = BP_PORT(bp);
9683 bp->link_params.bp = bp;
9684 bp->link_params.port = port;
9686 bp->link_params.lane_config =
9687 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9688 bp->link_params.ext_phy_config =
9690 dev_info.port_hw_config[port].external_phy_config);
9691 /* BCM8727_NOC => BCM8727 no over current */
9692 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9693 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9694 bp->link_params.ext_phy_config &=
9695 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9696 bp->link_params.ext_phy_config |=
9697 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9698 bp->link_params.feature_config_flags |=
9699 FEATURE_CONFIG_BCM8727_NOC;
9702 bp->link_params.speed_cap_mask =
9704 dev_info.port_hw_config[port].speed_capability_mask);
9706 bp->port.link_config =
9707 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9709 /* Get the 4 lanes xgxs config rx and tx */
9710 for (i = 0; i < 2; i++) {
9712 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9713 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9714 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9717 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9718 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9719 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9722 /* If the device is capable of WoL, set the default state according
9725 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9726 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9727 (config & PORT_FEATURE_WOL_ENABLED));
9729 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9730 " speed_cap_mask 0x%08x link_config 0x%08x\n",
9731 bp->link_params.lane_config,
9732 bp->link_params.ext_phy_config,
9733 bp->link_params.speed_cap_mask, bp->port.link_config);
9735 bp->link_params.switch_cfg |= (bp->port.link_config &
9736 PORT_FEATURE_CONNECTED_SWITCH_MASK);
9737 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9739 bnx2x_link_settings_requested(bp);
9742 * If connected directly, work with the internal PHY, otherwise, work
9743 * with the external PHY
9745 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9746 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9747 bp->mdio.prtad = bp->link_params.phy_addr;
9749 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9750 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9752 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9754 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9755 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9756 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9757 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9758 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9761 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9762 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9763 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9767 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9769 int func = BP_FUNC(bp);
9773 bnx2x_get_common_hwinfo(bp);
9777 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9779 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9781 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9782 FUNC_MF_CFG_E1HOV_TAG_MASK);
9783 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9785 BNX2X_DEV_INFO("%s function mode\n",
9786 IS_E1HMF(bp) ? "multi" : "single");
9789 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9791 FUNC_MF_CFG_E1HOV_TAG_MASK);
9792 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9794 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9796 func, bp->e1hov, bp->e1hov);
9798 BNX2X_ERROR("No valid E1HOV for func %d,"
9799 " aborting\n", func);
9804 BNX2X_ERROR("VN %d in single function mode,"
9805 " aborting\n", BP_E1HVN(bp));
9811 if (!BP_NOMCP(bp)) {
9812 bnx2x_get_port_hwinfo(bp);
9814 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9815 DRV_MSG_SEQ_NUMBER_MASK);
9816 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9820 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9821 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9822 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9823 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9824 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9825 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9826 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9827 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9828 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9829 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9830 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9832 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9840 /* only supposed to happen on emulation/FPGA */
9841 BNX2X_ERROR("warning: random MAC workaround active\n");
9842 random_ether_addr(bp->dev->dev_addr);
9843 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9849 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9851 int cnt, i, block_end, rodi;
9852 char vpd_data[BNX2X_VPD_LEN+1];
9853 char str_id_reg[VENDOR_ID_LEN+1];
9854 char str_id_cap[VENDOR_ID_LEN+1];
9857 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9858 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9860 if (cnt < BNX2X_VPD_LEN)
9863 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9864 PCI_VPD_LRDT_RO_DATA);
9869 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9870 pci_vpd_lrdt_size(&vpd_data[i]);
9872 i += PCI_VPD_LRDT_TAG_SIZE;
9874 if (block_end > BNX2X_VPD_LEN)
9877 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9878 PCI_VPD_RO_KEYWORD_MFR_ID);
9882 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9884 if (len != VENDOR_ID_LEN)
9887 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9889 /* vendor specific info */
9890 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9891 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9892 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9893 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9895 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9896 PCI_VPD_RO_KEYWORD_VENDOR0);
9898 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9900 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9902 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9903 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9904 bp->fw_ver[len] = ' ';
9913 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9915 int func = BP_FUNC(bp);
9919 /* Disable interrupt handling until HW is initialized */
9920 atomic_set(&bp->intr_sem, 1);
9921 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9923 mutex_init(&bp->port.phy_mutex);
9924 mutex_init(&bp->fw_mb_mutex);
9926 mutex_init(&bp->cnic_mutex);
9929 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9930 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9932 rc = bnx2x_get_hwinfo(bp);
9934 bnx2x_read_fwinfo(bp);
9935 /* need to reset chip if undi was active */
9937 bnx2x_undi_unload(bp);
9939 if (CHIP_REV_IS_FPGA(bp))
9940 dev_err(&bp->pdev->dev, "FPGA detected\n");
9942 if (BP_NOMCP(bp) && (func == 0))
9943 dev_err(&bp->pdev->dev, "MCP disabled, "
9944 "must load devices in order!\n");
9946 /* Set multi queue mode */
9947 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9948 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9949 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9950 "requested is not MSI-X\n");
9951 multi_mode = ETH_RSS_MODE_DISABLED;
9953 bp->multi_mode = multi_mode;
9954 bp->int_mode = int_mode;
9956 bp->dev->features |= NETIF_F_GRO;
9960 bp->flags &= ~TPA_ENABLE_FLAG;
9961 bp->dev->features &= ~NETIF_F_LRO;
9963 bp->flags |= TPA_ENABLE_FLAG;
9964 bp->dev->features |= NETIF_F_LRO;
9966 bp->disable_tpa = disable_tpa;
9969 bp->dropless_fc = 0;
9971 bp->dropless_fc = dropless_fc;
9975 bp->tx_ring_size = MAX_TX_AVAIL;
9976 bp->rx_ring_size = MAX_RX_AVAIL;
9980 /* make sure that the numbers are in the right granularity */
9981 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9982 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9984 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9985 bp->current_interval = (poll ? poll : timer_interval);
9987 init_timer(&bp->timer);
9988 bp->timer.expires = jiffies + bp->current_interval;
9989 bp->timer.data = (unsigned long) bp;
9990 bp->timer.function = bnx2x_timer;
9996 * ethtool service functions
9999 /* All ethtool functions called with rtnl_lock */
10001 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10003 struct bnx2x *bp = netdev_priv(dev);
10005 cmd->supported = bp->port.supported;
10006 cmd->advertising = bp->port.advertising;
10008 if ((bp->state == BNX2X_STATE_OPEN) &&
10009 !(bp->flags & MF_FUNC_DIS) &&
10010 (bp->link_vars.link_up)) {
10011 cmd->speed = bp->link_vars.line_speed;
10012 cmd->duplex = bp->link_vars.duplex;
10013 if (IS_E1HMF(bp)) {
10017 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
10018 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10019 if (vn_max_rate < cmd->speed)
10020 cmd->speed = vn_max_rate;
10027 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10029 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10031 switch (ext_phy_type) {
10032 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10033 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10034 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10036 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10037 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10038 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10039 cmd->port = PORT_FIBRE;
10042 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10043 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10044 cmd->port = PORT_TP;
10047 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10048 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10049 bp->link_params.ext_phy_config);
10053 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10054 bp->link_params.ext_phy_config);
10058 cmd->port = PORT_TP;
10060 cmd->phy_address = bp->mdio.prtad;
10061 cmd->transceiver = XCVR_INTERNAL;
10063 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10064 cmd->autoneg = AUTONEG_ENABLE;
10066 cmd->autoneg = AUTONEG_DISABLE;
10071 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10072 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10073 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10074 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10075 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10076 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10077 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10082 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10084 struct bnx2x *bp = netdev_priv(dev);
10090 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10091 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10092 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10093 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10094 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10095 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10096 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10098 if (cmd->autoneg == AUTONEG_ENABLE) {
10099 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10100 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10104 /* advertise the requested speed and duplex if supported */
10105 cmd->advertising &= bp->port.supported;
10107 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10108 bp->link_params.req_duplex = DUPLEX_FULL;
10109 bp->port.advertising |= (ADVERTISED_Autoneg |
10112 } else { /* forced speed */
10113 /* advertise the requested speed and duplex if supported */
10114 switch (cmd->speed) {
10116 if (cmd->duplex == DUPLEX_FULL) {
10117 if (!(bp->port.supported &
10118 SUPPORTED_10baseT_Full)) {
10120 "10M full not supported\n");
10124 advertising = (ADVERTISED_10baseT_Full |
10127 if (!(bp->port.supported &
10128 SUPPORTED_10baseT_Half)) {
10130 "10M half not supported\n");
10134 advertising = (ADVERTISED_10baseT_Half |
10140 if (cmd->duplex == DUPLEX_FULL) {
10141 if (!(bp->port.supported &
10142 SUPPORTED_100baseT_Full)) {
10144 "100M full not supported\n");
10148 advertising = (ADVERTISED_100baseT_Full |
10151 if (!(bp->port.supported &
10152 SUPPORTED_100baseT_Half)) {
10154 "100M half not supported\n");
10158 advertising = (ADVERTISED_100baseT_Half |
10164 if (cmd->duplex != DUPLEX_FULL) {
10165 DP(NETIF_MSG_LINK, "1G half not supported\n");
10169 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10170 DP(NETIF_MSG_LINK, "1G full not supported\n");
10174 advertising = (ADVERTISED_1000baseT_Full |
10179 if (cmd->duplex != DUPLEX_FULL) {
10181 "2.5G half not supported\n");
10185 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10187 "2.5G full not supported\n");
10191 advertising = (ADVERTISED_2500baseX_Full |
10196 if (cmd->duplex != DUPLEX_FULL) {
10197 DP(NETIF_MSG_LINK, "10G half not supported\n");
10201 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10202 DP(NETIF_MSG_LINK, "10G full not supported\n");
10206 advertising = (ADVERTISED_10000baseT_Full |
10211 DP(NETIF_MSG_LINK, "Unsupported speed\n");
10215 bp->link_params.req_line_speed = cmd->speed;
10216 bp->link_params.req_duplex = cmd->duplex;
10217 bp->port.advertising = advertising;
10220 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10221 DP_LEVEL " req_duplex %d advertising 0x%x\n",
10222 bp->link_params.req_line_speed, bp->link_params.req_duplex,
10223 bp->port.advertising);
10225 if (netif_running(dev)) {
10226 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10227 bnx2x_link_set(bp);
10233 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10234 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10236 static int bnx2x_get_regs_len(struct net_device *dev)
10238 struct bnx2x *bp = netdev_priv(dev);
10239 int regdump_len = 0;
10242 if (CHIP_IS_E1(bp)) {
10243 for (i = 0; i < REGS_COUNT; i++)
10244 if (IS_E1_ONLINE(reg_addrs[i].info))
10245 regdump_len += reg_addrs[i].size;
10247 for (i = 0; i < WREGS_COUNT_E1; i++)
10248 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10249 regdump_len += wreg_addrs_e1[i].size *
10250 (1 + wreg_addrs_e1[i].read_regs_count);
10253 for (i = 0; i < REGS_COUNT; i++)
10254 if (IS_E1H_ONLINE(reg_addrs[i].info))
10255 regdump_len += reg_addrs[i].size;
10257 for (i = 0; i < WREGS_COUNT_E1H; i++)
10258 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10259 regdump_len += wreg_addrs_e1h[i].size *
10260 (1 + wreg_addrs_e1h[i].read_regs_count);
10263 regdump_len += sizeof(struct dump_hdr);
10265 return regdump_len;
10268 static void bnx2x_get_regs(struct net_device *dev,
10269 struct ethtool_regs *regs, void *_p)
10272 struct bnx2x *bp = netdev_priv(dev);
10273 struct dump_hdr dump_hdr = {0};
10276 memset(p, 0, regs->len);
10278 if (!netif_running(bp->dev))
10281 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10282 dump_hdr.dump_sign = dump_sign_all;
10283 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10284 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10285 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10286 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10287 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10289 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10290 p += dump_hdr.hdr_size + 1;
10292 if (CHIP_IS_E1(bp)) {
10293 for (i = 0; i < REGS_COUNT; i++)
10294 if (IS_E1_ONLINE(reg_addrs[i].info))
10295 for (j = 0; j < reg_addrs[i].size; j++)
10297 reg_addrs[i].addr + j*4);
10300 for (i = 0; i < REGS_COUNT; i++)
10301 if (IS_E1H_ONLINE(reg_addrs[i].info))
10302 for (j = 0; j < reg_addrs[i].size; j++)
10304 reg_addrs[i].addr + j*4);
10308 #define PHY_FW_VER_LEN 10
10310 static void bnx2x_get_drvinfo(struct net_device *dev,
10311 struct ethtool_drvinfo *info)
10313 struct bnx2x *bp = netdev_priv(dev);
10314 u8 phy_fw_ver[PHY_FW_VER_LEN];
10316 strcpy(info->driver, DRV_MODULE_NAME);
10317 strcpy(info->version, DRV_MODULE_VERSION);
10319 phy_fw_ver[0] = '\0';
10320 if (bp->port.pmf) {
10321 bnx2x_acquire_phy_lock(bp);
10322 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10323 (bp->state != BNX2X_STATE_CLOSED),
10324 phy_fw_ver, PHY_FW_VER_LEN);
10325 bnx2x_release_phy_lock(bp);
10328 strncpy(info->fw_version, bp->fw_ver, 32);
10329 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10331 (bp->common.bc_ver & 0xff0000) >> 16,
10332 (bp->common.bc_ver & 0xff00) >> 8,
10333 (bp->common.bc_ver & 0xff),
10334 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10335 strcpy(info->bus_info, pci_name(bp->pdev));
10336 info->n_stats = BNX2X_NUM_STATS;
10337 info->testinfo_len = BNX2X_NUM_TESTS;
10338 info->eedump_len = bp->common.flash_size;
10339 info->regdump_len = bnx2x_get_regs_len(dev);
10342 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10344 struct bnx2x *bp = netdev_priv(dev);
10346 if (bp->flags & NO_WOL_FLAG) {
10347 wol->supported = 0;
10350 wol->supported = WAKE_MAGIC;
10352 wol->wolopts = WAKE_MAGIC;
10356 memset(&wol->sopass, 0, sizeof(wol->sopass));
10359 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10361 struct bnx2x *bp = netdev_priv(dev);
10363 if (wol->wolopts & ~WAKE_MAGIC)
10366 if (wol->wolopts & WAKE_MAGIC) {
10367 if (bp->flags & NO_WOL_FLAG)
10377 static u32 bnx2x_get_msglevel(struct net_device *dev)
10379 struct bnx2x *bp = netdev_priv(dev);
10381 return bp->msg_enable;
10384 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10386 struct bnx2x *bp = netdev_priv(dev);
10388 if (capable(CAP_NET_ADMIN))
10389 bp->msg_enable = level;
10392 static int bnx2x_nway_reset(struct net_device *dev)
10394 struct bnx2x *bp = netdev_priv(dev);
10399 if (netif_running(dev)) {
10400 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10401 bnx2x_link_set(bp);
10407 static u32 bnx2x_get_link(struct net_device *dev)
10409 struct bnx2x *bp = netdev_priv(dev);
10411 if (bp->flags & MF_FUNC_DIS)
10414 return bp->link_vars.link_up;
10417 static int bnx2x_get_eeprom_len(struct net_device *dev)
10419 struct bnx2x *bp = netdev_priv(dev);
10421 return bp->common.flash_size;
10424 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10426 int port = BP_PORT(bp);
10430 /* adjust timeout for emulation/FPGA */
10431 count = NVRAM_TIMEOUT_COUNT;
10432 if (CHIP_REV_IS_SLOW(bp))
10435 /* request access to nvram interface */
10436 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10437 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10439 for (i = 0; i < count*10; i++) {
10440 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10441 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10447 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10448 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10455 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10457 int port = BP_PORT(bp);
10461 /* adjust timeout for emulation/FPGA */
10462 count = NVRAM_TIMEOUT_COUNT;
10463 if (CHIP_REV_IS_SLOW(bp))
10466 /* relinquish nvram interface */
10467 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10468 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10470 for (i = 0; i < count*10; i++) {
10471 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10472 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10478 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10479 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10486 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10490 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10492 /* enable both bits, even on read */
10493 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10494 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10495 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10498 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10502 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10504 /* disable both bits, even after read */
10505 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10506 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10507 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10510 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10516 /* build the command word */
10517 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10519 /* need to clear DONE bit separately */
10520 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10522 /* address of the NVRAM to read from */
10523 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10524 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10526 /* issue a read command */
10527 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10529 /* adjust timeout for emulation/FPGA */
10530 count = NVRAM_TIMEOUT_COUNT;
10531 if (CHIP_REV_IS_SLOW(bp))
10534 /* wait for completion */
10537 for (i = 0; i < count; i++) {
10539 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10541 if (val & MCPR_NVM_COMMAND_DONE) {
10542 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10543 /* we read nvram data in cpu order
10544 * but ethtool sees it as an array of bytes
10545 * converting to big-endian will do the work */
10546 *ret_val = cpu_to_be32(val);
10555 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10562 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10564 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10569 if (offset + buf_size > bp->common.flash_size) {
10570 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10571 " buf_size (0x%x) > flash_size (0x%x)\n",
10572 offset, buf_size, bp->common.flash_size);
10576 /* request access to nvram interface */
10577 rc = bnx2x_acquire_nvram_lock(bp);
10581 /* enable access to nvram interface */
10582 bnx2x_enable_nvram_access(bp);
10584 /* read the first word(s) */
10585 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10586 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10587 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10588 memcpy(ret_buf, &val, 4);
10590 /* advance to the next dword */
10591 offset += sizeof(u32);
10592 ret_buf += sizeof(u32);
10593 buf_size -= sizeof(u32);
10598 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10599 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10600 memcpy(ret_buf, &val, 4);
10603 /* disable access to nvram interface */
10604 bnx2x_disable_nvram_access(bp);
10605 bnx2x_release_nvram_lock(bp);
10610 static int bnx2x_get_eeprom(struct net_device *dev,
10611 struct ethtool_eeprom *eeprom, u8 *eebuf)
10613 struct bnx2x *bp = netdev_priv(dev);
10616 if (!netif_running(dev))
10619 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10620 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10621 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10622 eeprom->len, eeprom->len);
10624 /* parameters already validated in ethtool_get_eeprom */
10626 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10631 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10636 /* build the command word */
10637 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10639 /* need to clear DONE bit separately */
10640 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10642 /* write the data */
10643 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10645 /* address of the NVRAM to write to */
10646 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10647 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10649 /* issue the write command */
10650 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10652 /* adjust timeout for emulation/FPGA */
10653 count = NVRAM_TIMEOUT_COUNT;
10654 if (CHIP_REV_IS_SLOW(bp))
10657 /* wait for completion */
10659 for (i = 0; i < count; i++) {
10661 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10662 if (val & MCPR_NVM_COMMAND_DONE) {
10671 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
10673 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10681 if (offset + buf_size > bp->common.flash_size) {
10682 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10683 " buf_size (0x%x) > flash_size (0x%x)\n",
10684 offset, buf_size, bp->common.flash_size);
10688 /* request access to nvram interface */
10689 rc = bnx2x_acquire_nvram_lock(bp);
10693 /* enable access to nvram interface */
10694 bnx2x_enable_nvram_access(bp);
10696 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10697 align_offset = (offset & ~0x03);
10698 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10701 val &= ~(0xff << BYTE_OFFSET(offset));
10702 val |= (*data_buf << BYTE_OFFSET(offset));
10704 /* nvram data is returned as an array of bytes
10705 * convert it back to cpu order */
10706 val = be32_to_cpu(val);
10708 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10712 /* disable access to nvram interface */
10713 bnx2x_disable_nvram_access(bp);
10714 bnx2x_release_nvram_lock(bp);
10719 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10725 u32 written_so_far;
10727 if (buf_size == 1) /* ethtool */
10728 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10730 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10732 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10737 if (offset + buf_size > bp->common.flash_size) {
10738 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10739 " buf_size (0x%x) > flash_size (0x%x)\n",
10740 offset, buf_size, bp->common.flash_size);
10744 /* request access to nvram interface */
10745 rc = bnx2x_acquire_nvram_lock(bp);
10749 /* enable access to nvram interface */
10750 bnx2x_enable_nvram_access(bp);
10752 written_so_far = 0;
10753 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10754 while ((written_so_far < buf_size) && (rc == 0)) {
10755 if (written_so_far == (buf_size - sizeof(u32)))
10756 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10757 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10758 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10759 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10760 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10762 memcpy(&val, data_buf, 4);
10764 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10766 /* advance to the next dword */
10767 offset += sizeof(u32);
10768 data_buf += sizeof(u32);
10769 written_so_far += sizeof(u32);
10773 /* disable access to nvram interface */
10774 bnx2x_disable_nvram_access(bp);
10775 bnx2x_release_nvram_lock(bp);
10780 static int bnx2x_set_eeprom(struct net_device *dev,
10781 struct ethtool_eeprom *eeprom, u8 *eebuf)
10783 struct bnx2x *bp = netdev_priv(dev);
10784 int port = BP_PORT(bp);
10787 if (!netif_running(dev))
10790 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10791 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10792 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10793 eeprom->len, eeprom->len);
10795 /* parameters already validated in ethtool_set_eeprom */
10797 /* PHY eeprom can be accessed only by the PMF */
10798 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10802 if (eeprom->magic == 0x50485950) {
10803 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10804 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10806 bnx2x_acquire_phy_lock(bp);
10807 rc |= bnx2x_link_reset(&bp->link_params,
10808 &bp->link_vars, 0);
10809 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10810 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10811 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10812 MISC_REGISTERS_GPIO_HIGH, port);
10813 bnx2x_release_phy_lock(bp);
10814 bnx2x_link_report(bp);
10816 } else if (eeprom->magic == 0x50485952) {
10817 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10818 if (bp->state == BNX2X_STATE_OPEN) {
10819 bnx2x_acquire_phy_lock(bp);
10820 rc |= bnx2x_link_reset(&bp->link_params,
10821 &bp->link_vars, 1);
10823 rc |= bnx2x_phy_init(&bp->link_params,
10825 bnx2x_release_phy_lock(bp);
10826 bnx2x_calc_fc_adv(bp);
10828 } else if (eeprom->magic == 0x53985943) {
10829 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10830 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10831 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10833 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10835 /* DSP Remove Download Mode */
10836 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10837 MISC_REGISTERS_GPIO_LOW, port);
10839 bnx2x_acquire_phy_lock(bp);
10841 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10843 /* wait 0.5 sec to allow it to run */
10845 bnx2x_ext_phy_hw_reset(bp, port);
10847 bnx2x_release_phy_lock(bp);
10850 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10855 static int bnx2x_get_coalesce(struct net_device *dev,
10856 struct ethtool_coalesce *coal)
10858 struct bnx2x *bp = netdev_priv(dev);
10860 memset(coal, 0, sizeof(struct ethtool_coalesce));
10862 coal->rx_coalesce_usecs = bp->rx_ticks;
10863 coal->tx_coalesce_usecs = bp->tx_ticks;
10868 static int bnx2x_set_coalesce(struct net_device *dev,
10869 struct ethtool_coalesce *coal)
10871 struct bnx2x *bp = netdev_priv(dev);
10873 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10874 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10875 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10877 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10878 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10879 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10881 if (netif_running(dev))
10882 bnx2x_update_coalesce(bp);
10887 static void bnx2x_get_ringparam(struct net_device *dev,
10888 struct ethtool_ringparam *ering)
10890 struct bnx2x *bp = netdev_priv(dev);
10892 ering->rx_max_pending = MAX_RX_AVAIL;
10893 ering->rx_mini_max_pending = 0;
10894 ering->rx_jumbo_max_pending = 0;
10896 ering->rx_pending = bp->rx_ring_size;
10897 ering->rx_mini_pending = 0;
10898 ering->rx_jumbo_pending = 0;
10900 ering->tx_max_pending = MAX_TX_AVAIL;
10901 ering->tx_pending = bp->tx_ring_size;
10904 static int bnx2x_set_ringparam(struct net_device *dev,
10905 struct ethtool_ringparam *ering)
10907 struct bnx2x *bp = netdev_priv(dev);
10910 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10911 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10915 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10916 (ering->tx_pending > MAX_TX_AVAIL) ||
10917 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10920 bp->rx_ring_size = ering->rx_pending;
10921 bp->tx_ring_size = ering->tx_pending;
10923 if (netif_running(dev)) {
10924 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10925 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10931 static void bnx2x_get_pauseparam(struct net_device *dev,
10932 struct ethtool_pauseparam *epause)
10934 struct bnx2x *bp = netdev_priv(dev);
10936 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10937 BNX2X_FLOW_CTRL_AUTO) &&
10938 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10940 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10941 BNX2X_FLOW_CTRL_RX);
10942 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10943 BNX2X_FLOW_CTRL_TX);
10945 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10946 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10947 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10950 static int bnx2x_set_pauseparam(struct net_device *dev,
10951 struct ethtool_pauseparam *epause)
10953 struct bnx2x *bp = netdev_priv(dev);
10958 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10959 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10960 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10962 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10964 if (epause->rx_pause)
10965 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10967 if (epause->tx_pause)
10968 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10970 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10971 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10973 if (epause->autoneg) {
10974 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10975 DP(NETIF_MSG_LINK, "autoneg not supported\n");
10979 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10980 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10984 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10986 if (netif_running(dev)) {
10987 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10988 bnx2x_link_set(bp);
10994 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10996 struct bnx2x *bp = netdev_priv(dev);
11000 if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
11003 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11004 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11008 /* TPA requires Rx CSUM offloading */
11009 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
11010 if (!bp->disable_tpa) {
11011 if (!(dev->features & NETIF_F_LRO)) {
11012 dev->features |= NETIF_F_LRO;
11013 bp->flags |= TPA_ENABLE_FLAG;
11018 } else if (dev->features & NETIF_F_LRO) {
11019 dev->features &= ~NETIF_F_LRO;
11020 bp->flags &= ~TPA_ENABLE_FLAG;
11024 if (data & ETH_FLAG_RXHASH)
11025 dev->features |= NETIF_F_RXHASH;
11027 dev->features &= ~NETIF_F_RXHASH;
11029 if (changed && netif_running(dev)) {
11030 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11031 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11037 static u32 bnx2x_get_rx_csum(struct net_device *dev)
11039 struct bnx2x *bp = netdev_priv(dev);
11041 return bp->rx_csum;
11044 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11046 struct bnx2x *bp = netdev_priv(dev);
11049 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11050 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11054 bp->rx_csum = data;
11056 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11057 TPA'ed packets will be discarded due to wrong TCP CSUM */
11059 u32 flags = ethtool_op_get_flags(dev);
11061 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11067 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11070 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11071 dev->features |= NETIF_F_TSO6;
11073 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11074 dev->features &= ~NETIF_F_TSO6;
11080 static const struct {
11081 char string[ETH_GSTRING_LEN];
11082 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11083 { "register_test (offline)" },
11084 { "memory_test (offline)" },
11085 { "loopback_test (offline)" },
11086 { "nvram_test (online)" },
11087 { "interrupt_test (online)" },
11088 { "link_test (online)" },
11089 { "idle check (online)" }
11092 static int bnx2x_test_registers(struct bnx2x *bp)
11094 int idx, i, rc = -ENODEV;
11096 int port = BP_PORT(bp);
11097 static const struct {
11102 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11103 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11104 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11105 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11106 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11107 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11108 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11109 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11110 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11111 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11112 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11113 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11114 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11115 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11116 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11117 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11118 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11119 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
11120 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
11121 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11122 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
11123 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11124 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11125 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11126 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11127 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11128 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11129 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11130 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
11131 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11132 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
11133 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11134 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11135 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11136 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11137 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11138 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11140 { 0xffffffff, 0, 0x00000000 }
11143 if (!netif_running(bp->dev))
11146 /* Repeat the test twice:
11147 First by writing 0x00000000, second by writing 0xffffffff */
11148 for (idx = 0; idx < 2; idx++) {
11155 wr_val = 0xffffffff;
11159 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11160 u32 offset, mask, save_val, val;
11162 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11163 mask = reg_tbl[i].mask;
11165 save_val = REG_RD(bp, offset);
11167 REG_WR(bp, offset, (wr_val & mask));
11168 val = REG_RD(bp, offset);
11170 /* Restore the original register's value */
11171 REG_WR(bp, offset, save_val);
11173 /* verify value is as expected */
11174 if ((val & mask) != (wr_val & mask)) {
11175 DP(NETIF_MSG_PROBE,
11176 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11177 offset, val, wr_val, mask);
11178 goto test_reg_exit;
11189 static int bnx2x_test_memory(struct bnx2x *bp)
11191 int i, j, rc = -ENODEV;
11193 static const struct {
11197 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11198 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11199 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11200 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11201 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11202 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11203 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11207 static const struct {
11213 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11214 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11215 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11216 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11217 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11218 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11220 { NULL, 0xffffffff, 0, 0 }
11223 if (!netif_running(bp->dev))
11226 /* Go through all the memories */
11227 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11228 for (j = 0; j < mem_tbl[i].size; j++)
11229 REG_RD(bp, mem_tbl[i].offset + j*4);
11231 /* Check the parity status */
11232 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11233 val = REG_RD(bp, prty_tbl[i].offset);
11234 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11235 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11237 "%s is 0x%x\n", prty_tbl[i].name, val);
11238 goto test_mem_exit;
11248 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11253 while (bnx2x_link_test(bp) && cnt--)
11257 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11259 unsigned int pkt_size, num_pkts, i;
11260 struct sk_buff *skb;
11261 unsigned char *packet;
11262 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11263 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11264 u16 tx_start_idx, tx_idx;
11265 u16 rx_start_idx, rx_idx;
11266 u16 pkt_prod, bd_prod;
11267 struct sw_tx_bd *tx_buf;
11268 struct eth_tx_start_bd *tx_start_bd;
11269 struct eth_tx_parse_bd *pbd = NULL;
11270 dma_addr_t mapping;
11271 union eth_rx_cqe *cqe;
11273 struct sw_rx_bd *rx_buf;
11277 /* check the loopback mode */
11278 switch (loopback_mode) {
11279 case BNX2X_PHY_LOOPBACK:
11280 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11283 case BNX2X_MAC_LOOPBACK:
11284 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11285 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11291 /* prepare the loopback packet */
11292 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11293 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11294 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11297 goto test_loopback_exit;
11299 packet = skb_put(skb, pkt_size);
11300 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11301 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11302 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11303 for (i = ETH_HLEN; i < pkt_size; i++)
11304 packet[i] = (unsigned char) (i & 0xff);
11306 /* send the loopback packet */
11308 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11309 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11311 pkt_prod = fp_tx->tx_pkt_prod++;
11312 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11313 tx_buf->first_bd = fp_tx->tx_bd_prod;
11317 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11318 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11319 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11320 skb_headlen(skb), DMA_TO_DEVICE);
11321 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11322 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11323 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11324 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11325 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11326 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11327 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11328 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11330 /* turn on parsing and get a BD */
11331 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11332 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11334 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11338 fp_tx->tx_db.data.prod += 2;
11340 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11345 fp_tx->tx_bd_prod += 2; /* start + pbd */
11349 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11350 if (tx_idx != tx_start_idx + num_pkts)
11351 goto test_loopback_exit;
11353 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11354 if (rx_idx != rx_start_idx + num_pkts)
11355 goto test_loopback_exit;
11357 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11358 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11359 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11360 goto test_loopback_rx_exit;
11362 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11363 if (len != pkt_size)
11364 goto test_loopback_rx_exit;
11366 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11368 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11369 for (i = ETH_HLEN; i < pkt_size; i++)
11370 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11371 goto test_loopback_rx_exit;
11375 test_loopback_rx_exit:
11377 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11378 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11379 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11380 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11382 /* Update producers */
11383 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11384 fp_rx->rx_sge_prod);
11386 test_loopback_exit:
11387 bp->link_params.loopback_mode = LOOPBACK_NONE;
11392 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11399 if (!netif_running(bp->dev))
11400 return BNX2X_LOOPBACK_FAILED;
11402 bnx2x_netif_stop(bp, 1);
11403 bnx2x_acquire_phy_lock(bp);
11405 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11407 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11408 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11411 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11413 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11414 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11417 bnx2x_release_phy_lock(bp);
11418 bnx2x_netif_start(bp);
11423 #define CRC32_RESIDUAL 0xdebb20e3
11425 static int bnx2x_test_nvram(struct bnx2x *bp)
11427 static const struct {
11431 { 0, 0x14 }, /* bootstrap */
11432 { 0x14, 0xec }, /* dir */
11433 { 0x100, 0x350 }, /* manuf_info */
11434 { 0x450, 0xf0 }, /* feature_info */
11435 { 0x640, 0x64 }, /* upgrade_key_info */
11437 { 0x708, 0x70 }, /* manuf_key_info */
11441 __be32 buf[0x350 / 4];
11442 u8 *data = (u8 *)buf;
11449 rc = bnx2x_nvram_read(bp, 0, data, 4);
11451 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11452 goto test_nvram_exit;
11455 magic = be32_to_cpu(buf[0]);
11456 if (magic != 0x669955aa) {
11457 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11459 goto test_nvram_exit;
11462 for (i = 0; nvram_tbl[i].size; i++) {
11464 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11465 nvram_tbl[i].size);
11467 DP(NETIF_MSG_PROBE,
11468 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11469 goto test_nvram_exit;
11472 crc = ether_crc_le(nvram_tbl[i].size, data);
11473 if (crc != CRC32_RESIDUAL) {
11474 DP(NETIF_MSG_PROBE,
11475 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11477 goto test_nvram_exit;
11485 static int bnx2x_test_intr(struct bnx2x *bp)
11487 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11490 if (!netif_running(bp->dev))
11493 config->hdr.length = 0;
11494 if (CHIP_IS_E1(bp))
11495 /* use last unicast entries */
11496 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11498 config->hdr.offset = BP_FUNC(bp);
11499 config->hdr.client_id = bp->fp->cl_id;
11500 config->hdr.reserved1 = 0;
11502 bp->set_mac_pending++;
11504 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11505 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11506 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11508 for (i = 0; i < 10; i++) {
11509 if (!bp->set_mac_pending)
11512 msleep_interruptible(10);
11521 static void bnx2x_self_test(struct net_device *dev,
11522 struct ethtool_test *etest, u64 *buf)
11524 struct bnx2x *bp = netdev_priv(dev);
11526 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11527 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11528 etest->flags |= ETH_TEST_FL_FAILED;
11532 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11534 if (!netif_running(dev))
11537 /* offline tests are not supported in MF mode */
11539 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11541 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11542 int port = BP_PORT(bp);
11546 /* save current value of input enable for TX port IF */
11547 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11548 /* disable input for TX port IF */
11549 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11551 link_up = (bnx2x_link_test(bp) == 0);
11552 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11553 bnx2x_nic_load(bp, LOAD_DIAG);
11554 /* wait until link state is restored */
11555 bnx2x_wait_for_link(bp, link_up);
11557 if (bnx2x_test_registers(bp) != 0) {
11559 etest->flags |= ETH_TEST_FL_FAILED;
11561 if (bnx2x_test_memory(bp) != 0) {
11563 etest->flags |= ETH_TEST_FL_FAILED;
11565 buf[2] = bnx2x_test_loopback(bp, link_up);
11567 etest->flags |= ETH_TEST_FL_FAILED;
11569 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11571 /* restore input for TX port IF */
11572 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11574 bnx2x_nic_load(bp, LOAD_NORMAL);
11575 /* wait until link state is restored */
11576 bnx2x_wait_for_link(bp, link_up);
11578 if (bnx2x_test_nvram(bp) != 0) {
11580 etest->flags |= ETH_TEST_FL_FAILED;
11582 if (bnx2x_test_intr(bp) != 0) {
11584 etest->flags |= ETH_TEST_FL_FAILED;
11587 if (bnx2x_link_test(bp) != 0) {
11589 etest->flags |= ETH_TEST_FL_FAILED;
11592 #ifdef BNX2X_EXTRA_DEBUG
11593 bnx2x_panic_dump(bp);
11597 static const struct {
11600 u8 string[ETH_GSTRING_LEN];
11601 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11602 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11603 { Q_STATS_OFFSET32(error_bytes_received_hi),
11604 8, "[%d]: rx_error_bytes" },
11605 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11606 8, "[%d]: rx_ucast_packets" },
11607 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11608 8, "[%d]: rx_mcast_packets" },
11609 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11610 8, "[%d]: rx_bcast_packets" },
11611 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11612 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11613 4, "[%d]: rx_phy_ip_err_discards"},
11614 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11615 4, "[%d]: rx_skb_alloc_discard" },
11616 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11618 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11619 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11620 8, "[%d]: tx_ucast_packets" },
11621 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11622 8, "[%d]: tx_mcast_packets" },
11623 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11624 8, "[%d]: tx_bcast_packets" }
11627 static const struct {
11631 #define STATS_FLAGS_PORT 1
11632 #define STATS_FLAGS_FUNC 2
11633 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11634 u8 string[ETH_GSTRING_LEN];
11635 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11636 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11637 8, STATS_FLAGS_BOTH, "rx_bytes" },
11638 { STATS_OFFSET32(error_bytes_received_hi),
11639 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11640 { STATS_OFFSET32(total_unicast_packets_received_hi),
11641 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11642 { STATS_OFFSET32(total_multicast_packets_received_hi),
11643 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11644 { STATS_OFFSET32(total_broadcast_packets_received_hi),
11645 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11646 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11647 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11648 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11649 8, STATS_FLAGS_PORT, "rx_align_errors" },
11650 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11651 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11652 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11653 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11654 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11655 8, STATS_FLAGS_PORT, "rx_fragments" },
11656 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11657 8, STATS_FLAGS_PORT, "rx_jabbers" },
11658 { STATS_OFFSET32(no_buff_discard_hi),
11659 8, STATS_FLAGS_BOTH, "rx_discards" },
11660 { STATS_OFFSET32(mac_filter_discard),
11661 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11662 { STATS_OFFSET32(xxoverflow_discard),
11663 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11664 { STATS_OFFSET32(brb_drop_hi),
11665 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11666 { STATS_OFFSET32(brb_truncate_hi),
11667 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11668 { STATS_OFFSET32(pause_frames_received_hi),
11669 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11670 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11671 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11672 { STATS_OFFSET32(nig_timer_max),
11673 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11674 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11675 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11676 { STATS_OFFSET32(rx_skb_alloc_failed),
11677 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11678 { STATS_OFFSET32(hw_csum_err),
11679 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11681 { STATS_OFFSET32(total_bytes_transmitted_hi),
11682 8, STATS_FLAGS_BOTH, "tx_bytes" },
11683 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11684 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11685 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11686 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11687 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11688 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11689 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11690 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11691 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11692 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11693 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11694 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11695 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11696 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11697 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11698 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11699 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11700 8, STATS_FLAGS_PORT, "tx_deferred" },
11701 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11702 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11703 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11704 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11705 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11706 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11707 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11708 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11709 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11710 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11711 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11712 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11713 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11714 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11715 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11716 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11717 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11718 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11719 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11720 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11721 { STATS_OFFSET32(pause_frames_sent_hi),
11722 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11725 #define IS_PORT_STAT(i) \
11726 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11727 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11728 #define IS_E1HMF_MODE_STAT(bp) \
11729 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11731 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11733 struct bnx2x *bp = netdev_priv(dev);
11736 switch (stringset) {
11738 if (is_multi(bp)) {
11739 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11740 if (!IS_E1HMF_MODE_STAT(bp))
11741 num_stats += BNX2X_NUM_STATS;
11743 if (IS_E1HMF_MODE_STAT(bp)) {
11745 for (i = 0; i < BNX2X_NUM_STATS; i++)
11746 if (IS_FUNC_STAT(i))
11749 num_stats = BNX2X_NUM_STATS;
11754 return BNX2X_NUM_TESTS;
11761 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11763 struct bnx2x *bp = netdev_priv(dev);
11766 switch (stringset) {
11768 if (is_multi(bp)) {
11770 for_each_queue(bp, i) {
11771 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11772 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11773 bnx2x_q_stats_arr[j].string, i);
11774 k += BNX2X_NUM_Q_STATS;
11776 if (IS_E1HMF_MODE_STAT(bp))
11778 for (j = 0; j < BNX2X_NUM_STATS; j++)
11779 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11780 bnx2x_stats_arr[j].string);
11782 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11783 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11785 strcpy(buf + j*ETH_GSTRING_LEN,
11786 bnx2x_stats_arr[i].string);
11793 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11798 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11799 struct ethtool_stats *stats, u64 *buf)
11801 struct bnx2x *bp = netdev_priv(dev);
11802 u32 *hw_stats, *offset;
11805 if (is_multi(bp)) {
11807 for_each_queue(bp, i) {
11808 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11809 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11810 if (bnx2x_q_stats_arr[j].size == 0) {
11811 /* skip this counter */
11815 offset = (hw_stats +
11816 bnx2x_q_stats_arr[j].offset);
11817 if (bnx2x_q_stats_arr[j].size == 4) {
11818 /* 4-byte counter */
11819 buf[k + j] = (u64) *offset;
11822 /* 8-byte counter */
11823 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11825 k += BNX2X_NUM_Q_STATS;
11827 if (IS_E1HMF_MODE_STAT(bp))
11829 hw_stats = (u32 *)&bp->eth_stats;
11830 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11831 if (bnx2x_stats_arr[j].size == 0) {
11832 /* skip this counter */
11836 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11837 if (bnx2x_stats_arr[j].size == 4) {
11838 /* 4-byte counter */
11839 buf[k + j] = (u64) *offset;
11842 /* 8-byte counter */
11843 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11846 hw_stats = (u32 *)&bp->eth_stats;
11847 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11848 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11850 if (bnx2x_stats_arr[i].size == 0) {
11851 /* skip this counter */
11856 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11857 if (bnx2x_stats_arr[i].size == 4) {
11858 /* 4-byte counter */
11859 buf[j] = (u64) *offset;
11863 /* 8-byte counter */
11864 buf[j] = HILO_U64(*offset, *(offset + 1));
11870 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11872 struct bnx2x *bp = netdev_priv(dev);
11875 if (!netif_running(dev))
11884 for (i = 0; i < (data * 2); i++) {
11886 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11889 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11891 msleep_interruptible(500);
11892 if (signal_pending(current))
11896 if (bp->link_vars.link_up)
11897 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11898 bp->link_vars.line_speed);
11903 static const struct ethtool_ops bnx2x_ethtool_ops = {
11904 .get_settings = bnx2x_get_settings,
11905 .set_settings = bnx2x_set_settings,
11906 .get_drvinfo = bnx2x_get_drvinfo,
11907 .get_regs_len = bnx2x_get_regs_len,
11908 .get_regs = bnx2x_get_regs,
11909 .get_wol = bnx2x_get_wol,
11910 .set_wol = bnx2x_set_wol,
11911 .get_msglevel = bnx2x_get_msglevel,
11912 .set_msglevel = bnx2x_set_msglevel,
11913 .nway_reset = bnx2x_nway_reset,
11914 .get_link = bnx2x_get_link,
11915 .get_eeprom_len = bnx2x_get_eeprom_len,
11916 .get_eeprom = bnx2x_get_eeprom,
11917 .set_eeprom = bnx2x_set_eeprom,
11918 .get_coalesce = bnx2x_get_coalesce,
11919 .set_coalesce = bnx2x_set_coalesce,
11920 .get_ringparam = bnx2x_get_ringparam,
11921 .set_ringparam = bnx2x_set_ringparam,
11922 .get_pauseparam = bnx2x_get_pauseparam,
11923 .set_pauseparam = bnx2x_set_pauseparam,
11924 .get_rx_csum = bnx2x_get_rx_csum,
11925 .set_rx_csum = bnx2x_set_rx_csum,
11926 .get_tx_csum = ethtool_op_get_tx_csum,
11927 .set_tx_csum = ethtool_op_set_tx_hw_csum,
11928 .set_flags = bnx2x_set_flags,
11929 .get_flags = ethtool_op_get_flags,
11930 .get_sg = ethtool_op_get_sg,
11931 .set_sg = ethtool_op_set_sg,
11932 .get_tso = ethtool_op_get_tso,
11933 .set_tso = bnx2x_set_tso,
11934 .self_test = bnx2x_self_test,
11935 .get_sset_count = bnx2x_get_sset_count,
11936 .get_strings = bnx2x_get_strings,
11937 .phys_id = bnx2x_phys_id,
11938 .get_ethtool_stats = bnx2x_get_ethtool_stats,
11941 /* end of ethtool_ops */
11943 /****************************************************************************
11944 * General service functions
11945 ****************************************************************************/
11947 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11951 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11955 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11956 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11957 PCI_PM_CTRL_PME_STATUS));
11959 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11960 /* delay required during transition out of D3hot */
11965 /* If there are other clients above don't
11966 shut down the power */
11967 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11969 /* Don't shut down the power for emulation and FPGA */
11970 if (CHIP_REV_IS_SLOW(bp))
11973 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11977 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11979 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11982 /* No more memory access after this point until
11983 * device is brought back to D0.
11993 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11997 /* Tell compiler that status block fields can change */
11999 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
12000 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
12002 return (fp->rx_comp_cons != rx_cons_sb);
12006 * net_device service functions
12009 static int bnx2x_poll(struct napi_struct *napi, int budget)
12012 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
12014 struct bnx2x *bp = fp->bp;
12017 #ifdef BNX2X_STOP_ON_ERROR
12018 if (unlikely(bp->panic)) {
12019 napi_complete(napi);
12024 if (bnx2x_has_tx_work(fp))
12027 if (bnx2x_has_rx_work(fp)) {
12028 work_done += bnx2x_rx_int(fp, budget - work_done);
12030 /* must not complete if we consumed full budget */
12031 if (work_done >= budget)
12035 /* Fall out from the NAPI loop if needed */
12036 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12037 bnx2x_update_fpsb_idx(fp);
12038 /* bnx2x_has_rx_work() reads the status block, thus we need
12039 * to ensure that status block indices have been actually read
12040 * (bnx2x_update_fpsb_idx) prior to this check
12041 * (bnx2x_has_rx_work) so that we won't write the "newer"
12042 * value of the status block to IGU (if there was a DMA right
12043 * after bnx2x_has_rx_work and if there is no rmb, the memory
12044 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12045 * before bnx2x_ack_sb). In this case there will never be
12046 * another interrupt until there is another update of the
12047 * status block, while there is still unhandled work.
12051 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12052 napi_complete(napi);
12053 /* Re-enable interrupts */
12054 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12055 le16_to_cpu(fp->fp_c_idx),
12057 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12058 le16_to_cpu(fp->fp_u_idx),
12059 IGU_INT_ENABLE, 1);
12069 /* we split the first BD into headers and data BDs
12070 * to ease the pain of our fellow microcode engineers
12071 * we use one mapping for both BDs
12072 * So far this has only been observed to happen
12073 * in Other Operating Systems(TM)
12075 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12076 struct bnx2x_fastpath *fp,
12077 struct sw_tx_bd *tx_buf,
12078 struct eth_tx_start_bd **tx_bd, u16 hlen,
12079 u16 bd_prod, int nbd)
12081 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12082 struct eth_tx_bd *d_tx_bd;
12083 dma_addr_t mapping;
12084 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12086 /* first fix first BD */
12087 h_tx_bd->nbd = cpu_to_le16(nbd);
12088 h_tx_bd->nbytes = cpu_to_le16(hlen);
12090 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12091 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12092 h_tx_bd->addr_lo, h_tx_bd->nbd);
12094 /* now get a new data BD
12095 * (after the pbd) and fill it */
12096 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12097 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12099 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12100 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12102 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12103 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12104 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12106 /* this marks the BD as one that has no individual mapping */
12107 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12109 DP(NETIF_MSG_TX_QUEUED,
12110 "TSO split data size is %d (%x:%x)\n",
12111 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12114 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12119 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12122 csum = (u16) ~csum_fold(csum_sub(csum,
12123 csum_partial(t_header - fix, fix, 0)));
12126 csum = (u16) ~csum_fold(csum_add(csum,
12127 csum_partial(t_header, -fix, 0)));
12129 return swab16(csum);
12132 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12136 if (skb->ip_summed != CHECKSUM_PARTIAL)
12140 if (skb->protocol == htons(ETH_P_IPV6)) {
12142 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12143 rc |= XMIT_CSUM_TCP;
12147 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12148 rc |= XMIT_CSUM_TCP;
12152 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12153 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12155 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12156 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12161 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12162 /* check if packet requires linearization (packet is too fragmented)
12163 no need to check fragmentation if page size > 8K (there will be no
12164 violation to FW restrictions) */
12165 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12170 int first_bd_sz = 0;
12172 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12173 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12175 if (xmit_type & XMIT_GSO) {
12176 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12177 /* Check if LSO packet needs to be copied:
12178 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12179 int wnd_size = MAX_FETCH_BD - 3;
12180 /* Number of windows to check */
12181 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12186 /* Headers length */
12187 hlen = (int)(skb_transport_header(skb) - skb->data) +
12190 /* Amount of data (w/o headers) on linear part of SKB*/
12191 first_bd_sz = skb_headlen(skb) - hlen;
12193 wnd_sum = first_bd_sz;
12195 /* Calculate the first sum - it's special */
12196 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12198 skb_shinfo(skb)->frags[frag_idx].size;
12200 /* If there was data on linear skb data - check it */
12201 if (first_bd_sz > 0) {
12202 if (unlikely(wnd_sum < lso_mss)) {
12207 wnd_sum -= first_bd_sz;
12210 /* Others are easier: run through the frag list and
12211 check all windows */
12212 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12214 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12216 if (unlikely(wnd_sum < lso_mss)) {
12221 skb_shinfo(skb)->frags[wnd_idx].size;
12224 /* in non-LSO too fragmented packet should always
12231 if (unlikely(to_copy))
12232 DP(NETIF_MSG_TX_QUEUED,
12233 "Linearization IS REQUIRED for %s packet. "
12234 "num_frags %d hlen %d first_bd_sz %d\n",
12235 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12236 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12242 /* called with netif_tx_lock
12243 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12244 * netif_wake_queue()
12246 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12248 struct bnx2x *bp = netdev_priv(dev);
12249 struct bnx2x_fastpath *fp;
12250 struct netdev_queue *txq;
12251 struct sw_tx_bd *tx_buf;
12252 struct eth_tx_start_bd *tx_start_bd;
12253 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12254 struct eth_tx_parse_bd *pbd = NULL;
12255 u16 pkt_prod, bd_prod;
12257 dma_addr_t mapping;
12258 u32 xmit_type = bnx2x_xmit_type(bp, skb);
12261 __le16 pkt_size = 0;
12262 struct ethhdr *eth;
12263 u8 mac_type = UNICAST_ADDRESS;
12265 #ifdef BNX2X_STOP_ON_ERROR
12266 if (unlikely(bp->panic))
12267 return NETDEV_TX_BUSY;
12270 fp_index = skb_get_queue_mapping(skb);
12271 txq = netdev_get_tx_queue(dev, fp_index);
12273 fp = &bp->fp[fp_index];
12275 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12276 fp->eth_q_stats.driver_xoff++;
12277 netif_tx_stop_queue(txq);
12278 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12279 return NETDEV_TX_BUSY;
12282 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12283 " gso type %x xmit_type %x\n",
12284 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12285 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12287 eth = (struct ethhdr *)skb->data;
12289 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12290 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12291 if (is_broadcast_ether_addr(eth->h_dest))
12292 mac_type = BROADCAST_ADDRESS;
12294 mac_type = MULTICAST_ADDRESS;
12297 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12298 /* First, check if we need to linearize the skb (due to FW
12299 restrictions). No need to check fragmentation if page size > 8K
12300 (there will be no violation to FW restrictions) */
12301 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12302 /* Statistics of linearization */
12304 if (skb_linearize(skb) != 0) {
12305 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12306 "silently dropping this SKB\n");
12307 dev_kfree_skb_any(skb);
12308 return NETDEV_TX_OK;
12314 Please read carefully. First we use one BD which we mark as start,
12315 then we have a parsing info BD (used for TSO or xsum),
12316 and only then we have the rest of the TSO BDs.
12317 (don't forget to mark the last one as last,
12318 and to unmap only AFTER you write to the BD ...)
12319 And above all, all pdb sizes are in words - NOT DWORDS!
12322 pkt_prod = fp->tx_pkt_prod++;
12323 bd_prod = TX_BD(fp->tx_bd_prod);
12325 /* get a tx_buf and first BD */
12326 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12327 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12329 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12330 tx_start_bd->general_data = (mac_type <<
12331 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12333 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12335 /* remember the first BD of the packet */
12336 tx_buf->first_bd = fp->tx_bd_prod;
12340 DP(NETIF_MSG_TX_QUEUED,
12341 "sending pkt %u @%p next_idx %u bd %u @%p\n",
12342 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12345 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12346 (bp->flags & HW_VLAN_TX_FLAG)) {
12347 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12348 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12351 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12353 /* turn on parsing and get a BD */
12354 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12355 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12357 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12359 if (xmit_type & XMIT_CSUM) {
12360 hlen = (skb_network_header(skb) - skb->data) / 2;
12362 /* for now NS flag is not used in Linux */
12364 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12365 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12367 pbd->ip_hlen = (skb_transport_header(skb) -
12368 skb_network_header(skb)) / 2;
12370 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12372 pbd->total_hlen = cpu_to_le16(hlen);
12375 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12377 if (xmit_type & XMIT_CSUM_V4)
12378 tx_start_bd->bd_flags.as_bitfield |=
12379 ETH_TX_BD_FLAGS_IP_CSUM;
12381 tx_start_bd->bd_flags.as_bitfield |=
12382 ETH_TX_BD_FLAGS_IPV6;
12384 if (xmit_type & XMIT_CSUM_TCP) {
12385 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12388 s8 fix = SKB_CS_OFF(skb); /* signed! */
12390 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12392 DP(NETIF_MSG_TX_QUEUED,
12393 "hlen %d fix %d csum before fix %x\n",
12394 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12396 /* HW bug: fixup the CSUM */
12397 pbd->tcp_pseudo_csum =
12398 bnx2x_csum_fix(skb_transport_header(skb),
12401 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12402 pbd->tcp_pseudo_csum);
12406 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12407 skb_headlen(skb), DMA_TO_DEVICE);
12409 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12410 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12411 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12412 tx_start_bd->nbd = cpu_to_le16(nbd);
12413 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12414 pkt_size = tx_start_bd->nbytes;
12416 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
12417 " nbytes %d flags %x vlan %x\n",
12418 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12419 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12420 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12422 if (xmit_type & XMIT_GSO) {
12424 DP(NETIF_MSG_TX_QUEUED,
12425 "TSO packet len %d hlen %d total len %d tso size %d\n",
12426 skb->len, hlen, skb_headlen(skb),
12427 skb_shinfo(skb)->gso_size);
12429 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12431 if (unlikely(skb_headlen(skb) > hlen))
12432 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12433 hlen, bd_prod, ++nbd);
12435 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12436 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12437 pbd->tcp_flags = pbd_tcp_flags(skb);
12439 if (xmit_type & XMIT_GSO_V4) {
12440 pbd->ip_id = swab16(ip_hdr(skb)->id);
12441 pbd->tcp_pseudo_csum =
12442 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12443 ip_hdr(skb)->daddr,
12444 0, IPPROTO_TCP, 0));
12447 pbd->tcp_pseudo_csum =
12448 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12449 &ipv6_hdr(skb)->daddr,
12450 0, IPPROTO_TCP, 0));
12452 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12454 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12456 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12457 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12459 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12460 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12461 if (total_pkt_bd == NULL)
12462 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12464 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12466 frag->size, DMA_TO_DEVICE);
12468 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12469 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12470 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12471 le16_add_cpu(&pkt_size, frag->size);
12473 DP(NETIF_MSG_TX_QUEUED,
12474 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12475 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12476 le16_to_cpu(tx_data_bd->nbytes));
12479 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12481 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12483 /* now send a tx doorbell, counting the next BD
12484 * if the packet contains or ends with it
12486 if (TX_BD_POFF(bd_prod) < nbd)
12489 if (total_pkt_bd != NULL)
12490 total_pkt_bd->total_pkt_bytes = pkt_size;
12493 DP(NETIF_MSG_TX_QUEUED,
12494 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12495 " tcp_flags %x xsum %x seq %u hlen %u\n",
12496 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12497 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12498 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12500 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
12503 * Make sure that the BD data is updated before updating the producer
12504 * since FW might read the BD right after the producer is updated.
12505 * This is only applicable for weak-ordered memory model archs such
12506 * as IA-64. The following barrier is also mandatory since FW will
12507 * assumes packets must have BDs.
12511 fp->tx_db.data.prod += nbd;
12513 DOORBELL(bp, fp->index, fp->tx_db.raw);
12517 fp->tx_bd_prod += nbd;
12519 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12520 netif_tx_stop_queue(txq);
12522 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12523 * ordering of set_bit() in netif_tx_stop_queue() and read of
12524 * fp->bd_tx_cons */
12527 fp->eth_q_stats.driver_xoff++;
12528 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12529 netif_tx_wake_queue(txq);
12533 return NETDEV_TX_OK;
12536 /* called with rtnl_lock */
12537 static int bnx2x_open(struct net_device *dev)
12539 struct bnx2x *bp = netdev_priv(dev);
12541 netif_carrier_off(dev);
12543 bnx2x_set_power_state(bp, PCI_D0);
12545 if (!bnx2x_reset_is_done(bp)) {
12547 /* Reset MCP mail box sequence if there is on going
12552 /* If it's the first function to load and reset done
12553 * is still not cleared it may mean that. We don't
12554 * check the attention state here because it may have
12555 * already been cleared by a "common" reset but we
12556 * shell proceed with "process kill" anyway.
12558 if ((bnx2x_get_load_cnt(bp) == 0) &&
12559 bnx2x_trylock_hw_lock(bp,
12560 HW_LOCK_RESOURCE_RESERVED_08) &&
12561 (!bnx2x_leader_reset(bp))) {
12562 DP(NETIF_MSG_HW, "Recovered in open\n");
12566 bnx2x_set_power_state(bp, PCI_D3hot);
12568 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12569 " completed yet. Try again later. If u still see this"
12570 " message after a few retries then power cycle is"
12571 " required.\n", bp->dev->name);
12577 bp->recovery_state = BNX2X_RECOVERY_DONE;
12579 return bnx2x_nic_load(bp, LOAD_OPEN);
12582 /* called with rtnl_lock */
12583 static int bnx2x_close(struct net_device *dev)
12585 struct bnx2x *bp = netdev_priv(dev);
12587 /* Unload the driver, release IRQs */
12588 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12589 bnx2x_set_power_state(bp, PCI_D3hot);
12594 /* called with netif_tx_lock from dev_mcast.c */
12595 static void bnx2x_set_rx_mode(struct net_device *dev)
12597 struct bnx2x *bp = netdev_priv(dev);
12598 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12599 int port = BP_PORT(bp);
12601 if (bp->state != BNX2X_STATE_OPEN) {
12602 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12606 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12608 if (dev->flags & IFF_PROMISC)
12609 rx_mode = BNX2X_RX_MODE_PROMISC;
12611 else if ((dev->flags & IFF_ALLMULTI) ||
12612 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12614 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12616 else { /* some multicasts */
12617 if (CHIP_IS_E1(bp)) {
12618 int i, old, offset;
12619 struct netdev_hw_addr *ha;
12620 struct mac_configuration_cmd *config =
12621 bnx2x_sp(bp, mcast_config);
12624 netdev_for_each_mc_addr(ha, dev) {
12625 config->config_table[i].
12626 cam_entry.msb_mac_addr =
12627 swab16(*(u16 *)&ha->addr[0]);
12628 config->config_table[i].
12629 cam_entry.middle_mac_addr =
12630 swab16(*(u16 *)&ha->addr[2]);
12631 config->config_table[i].
12632 cam_entry.lsb_mac_addr =
12633 swab16(*(u16 *)&ha->addr[4]);
12634 config->config_table[i].cam_entry.flags =
12636 config->config_table[i].
12637 target_table_entry.flags = 0;
12638 config->config_table[i].target_table_entry.
12639 clients_bit_vector =
12640 cpu_to_le32(1 << BP_L_ID(bp));
12641 config->config_table[i].
12642 target_table_entry.vlan_id = 0;
12645 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12646 config->config_table[i].
12647 cam_entry.msb_mac_addr,
12648 config->config_table[i].
12649 cam_entry.middle_mac_addr,
12650 config->config_table[i].
12651 cam_entry.lsb_mac_addr);
12654 old = config->hdr.length;
12656 for (; i < old; i++) {
12657 if (CAM_IS_INVALID(config->
12658 config_table[i])) {
12659 /* already invalidated */
12663 CAM_INVALIDATE(config->
12668 if (CHIP_REV_IS_SLOW(bp))
12669 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12671 offset = BNX2X_MAX_MULTICAST*(1 + port);
12673 config->hdr.length = i;
12674 config->hdr.offset = offset;
12675 config->hdr.client_id = bp->fp->cl_id;
12676 config->hdr.reserved1 = 0;
12678 bp->set_mac_pending++;
12681 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12682 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12683 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12686 /* Accept one or more multicasts */
12687 struct netdev_hw_addr *ha;
12688 u32 mc_filter[MC_HASH_SIZE];
12689 u32 crc, bit, regidx;
12692 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12694 netdev_for_each_mc_addr(ha, dev) {
12695 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12698 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12699 bit = (crc >> 24) & 0xff;
12702 mc_filter[regidx] |= (1 << bit);
12705 for (i = 0; i < MC_HASH_SIZE; i++)
12706 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12711 bp->rx_mode = rx_mode;
12712 bnx2x_set_storm_rx_mode(bp);
12715 /* called with rtnl_lock */
12716 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12718 struct sockaddr *addr = p;
12719 struct bnx2x *bp = netdev_priv(dev);
12721 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12724 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12725 if (netif_running(dev)) {
12726 if (CHIP_IS_E1(bp))
12727 bnx2x_set_eth_mac_addr_e1(bp, 1);
12729 bnx2x_set_eth_mac_addr_e1h(bp, 1);
12735 /* called with rtnl_lock */
12736 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12737 int devad, u16 addr)
12739 struct bnx2x *bp = netdev_priv(netdev);
12742 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12744 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12745 prtad, devad, addr);
12747 if (prtad != bp->mdio.prtad) {
12748 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12749 prtad, bp->mdio.prtad);
12753 /* The HW expects different devad if CL22 is used */
12754 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12756 bnx2x_acquire_phy_lock(bp);
12757 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12758 devad, addr, &value);
12759 bnx2x_release_phy_lock(bp);
12760 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12767 /* called with rtnl_lock */
12768 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12769 u16 addr, u16 value)
12771 struct bnx2x *bp = netdev_priv(netdev);
12772 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12775 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12776 " value 0x%x\n", prtad, devad, addr, value);
12778 if (prtad != bp->mdio.prtad) {
12779 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12780 prtad, bp->mdio.prtad);
12784 /* The HW expects different devad if CL22 is used */
12785 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12787 bnx2x_acquire_phy_lock(bp);
12788 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12789 devad, addr, value);
12790 bnx2x_release_phy_lock(bp);
12794 /* called with rtnl_lock */
12795 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12797 struct bnx2x *bp = netdev_priv(dev);
12798 struct mii_ioctl_data *mdio = if_mii(ifr);
12800 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12801 mdio->phy_id, mdio->reg_num, mdio->val_in);
12803 if (!netif_running(dev))
12806 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12809 /* called with rtnl_lock */
12810 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12812 struct bnx2x *bp = netdev_priv(dev);
12815 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12816 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12820 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12821 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12824 /* This does not race with packet allocation
12825 * because the actual alloc size is
12826 * only updated as part of load
12828 dev->mtu = new_mtu;
12830 if (netif_running(dev)) {
12831 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12832 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12838 static void bnx2x_tx_timeout(struct net_device *dev)
12840 struct bnx2x *bp = netdev_priv(dev);
12842 #ifdef BNX2X_STOP_ON_ERROR
12846 /* This allows the netif to be shutdown gracefully before resetting */
12847 schedule_delayed_work(&bp->reset_task, 0);
12851 /* called with rtnl_lock */
12852 static void bnx2x_vlan_rx_register(struct net_device *dev,
12853 struct vlan_group *vlgrp)
12855 struct bnx2x *bp = netdev_priv(dev);
12859 /* Set flags according to the required capabilities */
12860 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12862 if (dev->features & NETIF_F_HW_VLAN_TX)
12863 bp->flags |= HW_VLAN_TX_FLAG;
12865 if (dev->features & NETIF_F_HW_VLAN_RX)
12866 bp->flags |= HW_VLAN_RX_FLAG;
12868 if (netif_running(dev))
12869 bnx2x_set_client_config(bp);
12874 #ifdef CONFIG_NET_POLL_CONTROLLER
12875 static void poll_bnx2x(struct net_device *dev)
12877 struct bnx2x *bp = netdev_priv(dev);
12879 disable_irq(bp->pdev->irq);
12880 bnx2x_interrupt(bp->pdev->irq, dev);
12881 enable_irq(bp->pdev->irq);
12885 static const struct net_device_ops bnx2x_netdev_ops = {
12886 .ndo_open = bnx2x_open,
12887 .ndo_stop = bnx2x_close,
12888 .ndo_start_xmit = bnx2x_start_xmit,
12889 .ndo_set_multicast_list = bnx2x_set_rx_mode,
12890 .ndo_set_mac_address = bnx2x_change_mac_addr,
12891 .ndo_validate_addr = eth_validate_addr,
12892 .ndo_do_ioctl = bnx2x_ioctl,
12893 .ndo_change_mtu = bnx2x_change_mtu,
12894 .ndo_tx_timeout = bnx2x_tx_timeout,
12896 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12898 #ifdef CONFIG_NET_POLL_CONTROLLER
12899 .ndo_poll_controller = poll_bnx2x,
12903 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12904 struct net_device *dev)
12909 SET_NETDEV_DEV(dev, &pdev->dev);
12910 bp = netdev_priv(dev);
12915 bp->func = PCI_FUNC(pdev->devfn);
12917 rc = pci_enable_device(pdev);
12919 dev_err(&bp->pdev->dev,
12920 "Cannot enable PCI device, aborting\n");
12924 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12925 dev_err(&bp->pdev->dev,
12926 "Cannot find PCI device base address, aborting\n");
12928 goto err_out_disable;
12931 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12932 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12933 " base address, aborting\n");
12935 goto err_out_disable;
12938 if (atomic_read(&pdev->enable_cnt) == 1) {
12939 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12941 dev_err(&bp->pdev->dev,
12942 "Cannot obtain PCI resources, aborting\n");
12943 goto err_out_disable;
12946 pci_set_master(pdev);
12947 pci_save_state(pdev);
12950 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12951 if (bp->pm_cap == 0) {
12952 dev_err(&bp->pdev->dev,
12953 "Cannot find power management capability, aborting\n");
12955 goto err_out_release;
12958 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12959 if (bp->pcie_cap == 0) {
12960 dev_err(&bp->pdev->dev,
12961 "Cannot find PCI Express capability, aborting\n");
12963 goto err_out_release;
12966 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12967 bp->flags |= USING_DAC_FLAG;
12968 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12969 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12970 " failed, aborting\n");
12972 goto err_out_release;
12975 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12976 dev_err(&bp->pdev->dev,
12977 "System does not support DMA, aborting\n");
12979 goto err_out_release;
12982 dev->mem_start = pci_resource_start(pdev, 0);
12983 dev->base_addr = dev->mem_start;
12984 dev->mem_end = pci_resource_end(pdev, 0);
12986 dev->irq = pdev->irq;
12988 bp->regview = pci_ioremap_bar(pdev, 0);
12989 if (!bp->regview) {
12990 dev_err(&bp->pdev->dev,
12991 "Cannot map register space, aborting\n");
12993 goto err_out_release;
12996 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12997 min_t(u64, BNX2X_DB_SIZE,
12998 pci_resource_len(pdev, 2)));
12999 if (!bp->doorbells) {
13000 dev_err(&bp->pdev->dev,
13001 "Cannot map doorbell space, aborting\n");
13003 goto err_out_unmap;
13006 bnx2x_set_power_state(bp, PCI_D0);
13008 /* clean indirect addresses */
13009 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13010 PCICFG_VENDOR_ID_OFFSET);
13011 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
13012 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
13013 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
13014 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
13016 /* Reset the load counter */
13017 bnx2x_clear_load_cnt(bp);
13019 dev->watchdog_timeo = TX_TIMEOUT;
13021 dev->netdev_ops = &bnx2x_netdev_ops;
13022 dev->ethtool_ops = &bnx2x_ethtool_ops;
13023 dev->features |= NETIF_F_SG;
13024 dev->features |= NETIF_F_HW_CSUM;
13025 if (bp->flags & USING_DAC_FLAG)
13026 dev->features |= NETIF_F_HIGHDMA;
13027 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13028 dev->features |= NETIF_F_TSO6;
13030 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
13031 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
13033 dev->vlan_features |= NETIF_F_SG;
13034 dev->vlan_features |= NETIF_F_HW_CSUM;
13035 if (bp->flags & USING_DAC_FLAG)
13036 dev->vlan_features |= NETIF_F_HIGHDMA;
13037 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13038 dev->vlan_features |= NETIF_F_TSO6;
13041 /* get_port_hwinfo() will set prtad and mmds properly */
13042 bp->mdio.prtad = MDIO_PRTAD_NONE;
13044 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13045 bp->mdio.dev = dev;
13046 bp->mdio.mdio_read = bnx2x_mdio_read;
13047 bp->mdio.mdio_write = bnx2x_mdio_write;
13053 iounmap(bp->regview);
13054 bp->regview = NULL;
13056 if (bp->doorbells) {
13057 iounmap(bp->doorbells);
13058 bp->doorbells = NULL;
13062 if (atomic_read(&pdev->enable_cnt) == 1)
13063 pci_release_regions(pdev);
13066 pci_disable_device(pdev);
13067 pci_set_drvdata(pdev, NULL);
13073 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13074 int *width, int *speed)
13076 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13078 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13080 /* return value of 1=2.5GHz 2=5GHz */
13081 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13084 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13086 const struct firmware *firmware = bp->firmware;
13087 struct bnx2x_fw_file_hdr *fw_hdr;
13088 struct bnx2x_fw_file_section *sections;
13089 u32 offset, len, num_ops;
13094 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13097 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13098 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13100 /* Make sure none of the offsets and sizes make us read beyond
13101 * the end of the firmware data */
13102 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13103 offset = be32_to_cpu(sections[i].offset);
13104 len = be32_to_cpu(sections[i].len);
13105 if (offset + len > firmware->size) {
13106 dev_err(&bp->pdev->dev,
13107 "Section %d length is out of bounds\n", i);
13112 /* Likewise for the init_ops offsets */
13113 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13114 ops_offsets = (u16 *)(firmware->data + offset);
13115 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13117 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13118 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13119 dev_err(&bp->pdev->dev,
13120 "Section offset %d is out of bounds\n", i);
13125 /* Check FW version */
13126 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13127 fw_ver = firmware->data + offset;
13128 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13129 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13130 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13131 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13132 dev_err(&bp->pdev->dev,
13133 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13134 fw_ver[0], fw_ver[1], fw_ver[2],
13135 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13136 BCM_5710_FW_MINOR_VERSION,
13137 BCM_5710_FW_REVISION_VERSION,
13138 BCM_5710_FW_ENGINEERING_VERSION);
13145 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13147 const __be32 *source = (const __be32 *)_source;
13148 u32 *target = (u32 *)_target;
13151 for (i = 0; i < n/4; i++)
13152 target[i] = be32_to_cpu(source[i]);
13156 Ops array is stored in the following format:
13157 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13159 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13161 const __be32 *source = (const __be32 *)_source;
13162 struct raw_op *target = (struct raw_op *)_target;
13165 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13166 tmp = be32_to_cpu(source[j]);
13167 target[i].op = (tmp >> 24) & 0xff;
13168 target[i].offset = tmp & 0xffffff;
13169 target[i].raw_data = be32_to_cpu(source[j + 1]);
13173 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13175 const __be16 *source = (const __be16 *)_source;
13176 u16 *target = (u16 *)_target;
13179 for (i = 0; i < n/2; i++)
13180 target[i] = be16_to_cpu(source[i]);
13183 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13185 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13186 bp->arr = kmalloc(len, GFP_KERNEL); \
13188 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13191 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13192 (u8 *)bp->arr, len); \
13195 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13197 const char *fw_file_name;
13198 struct bnx2x_fw_file_hdr *fw_hdr;
13201 if (CHIP_IS_E1(bp))
13202 fw_file_name = FW_FILE_NAME_E1;
13203 else if (CHIP_IS_E1H(bp))
13204 fw_file_name = FW_FILE_NAME_E1H;
13206 dev_err(dev, "Unsupported chip revision\n");
13210 dev_info(dev, "Loading %s\n", fw_file_name);
13212 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13214 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13215 goto request_firmware_exit;
13218 rc = bnx2x_check_firmware(bp);
13220 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13221 goto request_firmware_exit;
13224 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13226 /* Initialize the pointers to the init arrays */
13228 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13231 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13234 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13237 /* STORMs firmware */
13238 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13239 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13240 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13241 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13242 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13243 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13244 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13245 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13246 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13247 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13248 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13249 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13250 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13251 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13252 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13253 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13257 init_offsets_alloc_err:
13258 kfree(bp->init_ops);
13259 init_ops_alloc_err:
13260 kfree(bp->init_data);
13261 request_firmware_exit:
13262 release_firmware(bp->firmware);
13268 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13269 const struct pci_device_id *ent)
13271 struct net_device *dev = NULL;
13273 int pcie_width, pcie_speed;
13276 /* dev zeroed in init_etherdev */
13277 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13279 dev_err(&pdev->dev, "Cannot allocate net device\n");
13283 bp = netdev_priv(dev);
13284 bp->msg_enable = debug;
13286 pci_set_drvdata(pdev, dev);
13288 rc = bnx2x_init_dev(pdev, dev);
13294 rc = bnx2x_init_bp(bp);
13296 goto init_one_exit;
13298 /* Set init arrays */
13299 rc = bnx2x_init_firmware(bp, &pdev->dev);
13301 dev_err(&pdev->dev, "Error loading firmware\n");
13302 goto init_one_exit;
13305 rc = register_netdev(dev);
13307 dev_err(&pdev->dev, "Cannot register net device\n");
13308 goto init_one_exit;
13311 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13312 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13313 " IRQ %d, ", board_info[ent->driver_data].name,
13314 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13315 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13316 dev->base_addr, bp->pdev->irq);
13317 pr_cont("node addr %pM\n", dev->dev_addr);
13323 iounmap(bp->regview);
13326 iounmap(bp->doorbells);
13330 if (atomic_read(&pdev->enable_cnt) == 1)
13331 pci_release_regions(pdev);
13333 pci_disable_device(pdev);
13334 pci_set_drvdata(pdev, NULL);
13339 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13341 struct net_device *dev = pci_get_drvdata(pdev);
13345 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13348 bp = netdev_priv(dev);
13350 unregister_netdev(dev);
13352 /* Make sure RESET task is not scheduled before continuing */
13353 cancel_delayed_work_sync(&bp->reset_task);
13355 kfree(bp->init_ops_offsets);
13356 kfree(bp->init_ops);
13357 kfree(bp->init_data);
13358 release_firmware(bp->firmware);
13361 iounmap(bp->regview);
13364 iounmap(bp->doorbells);
13368 if (atomic_read(&pdev->enable_cnt) == 1)
13369 pci_release_regions(pdev);
13371 pci_disable_device(pdev);
13372 pci_set_drvdata(pdev, NULL);
13375 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13377 struct net_device *dev = pci_get_drvdata(pdev);
13381 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13384 bp = netdev_priv(dev);
13388 pci_save_state(pdev);
13390 if (!netif_running(dev)) {
13395 netif_device_detach(dev);
13397 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13399 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13406 static int bnx2x_resume(struct pci_dev *pdev)
13408 struct net_device *dev = pci_get_drvdata(pdev);
13413 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13416 bp = netdev_priv(dev);
13418 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13419 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13425 pci_restore_state(pdev);
13427 if (!netif_running(dev)) {
13432 bnx2x_set_power_state(bp, PCI_D0);
13433 netif_device_attach(dev);
13435 rc = bnx2x_nic_load(bp, LOAD_OPEN);
13442 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13446 bp->state = BNX2X_STATE_ERROR;
13448 bp->rx_mode = BNX2X_RX_MODE_NONE;
13450 bnx2x_netif_stop(bp, 0);
13451 netif_carrier_off(bp->dev);
13453 del_timer_sync(&bp->timer);
13454 bp->stats_state = STATS_STATE_DISABLED;
13455 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13458 bnx2x_free_irq(bp, false);
13460 if (CHIP_IS_E1(bp)) {
13461 struct mac_configuration_cmd *config =
13462 bnx2x_sp(bp, mcast_config);
13464 for (i = 0; i < config->hdr.length; i++)
13465 CAM_INVALIDATE(config->config_table[i]);
13468 /* Free SKBs, SGEs, TPA pool and driver internals */
13469 bnx2x_free_skbs(bp);
13470 for_each_queue(bp, i)
13471 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13472 for_each_queue(bp, i)
13473 netif_napi_del(&bnx2x_fp(bp, i, napi));
13474 bnx2x_free_mem(bp);
13476 bp->state = BNX2X_STATE_CLOSED;
13481 static void bnx2x_eeh_recover(struct bnx2x *bp)
13485 mutex_init(&bp->port.phy_mutex);
13487 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13488 bp->link_params.shmem_base = bp->common.shmem_base;
13489 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13491 if (!bp->common.shmem_base ||
13492 (bp->common.shmem_base < 0xA0000) ||
13493 (bp->common.shmem_base >= 0xC0000)) {
13494 BNX2X_DEV_INFO("MCP not active\n");
13495 bp->flags |= NO_MCP_FLAG;
13499 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13500 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13501 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13502 BNX2X_ERR("BAD MCP validity signature\n");
13504 if (!BP_NOMCP(bp)) {
13505 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13506 & DRV_MSG_SEQ_NUMBER_MASK);
13507 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13512 * bnx2x_io_error_detected - called when PCI error is detected
13513 * @pdev: Pointer to PCI device
13514 * @state: The current pci connection state
13516 * This function is called after a PCI bus error affecting
13517 * this device has been detected.
13519 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13520 pci_channel_state_t state)
13522 struct net_device *dev = pci_get_drvdata(pdev);
13523 struct bnx2x *bp = netdev_priv(dev);
13527 netif_device_detach(dev);
13529 if (state == pci_channel_io_perm_failure) {
13531 return PCI_ERS_RESULT_DISCONNECT;
13534 if (netif_running(dev))
13535 bnx2x_eeh_nic_unload(bp);
13537 pci_disable_device(pdev);
13541 /* Request a slot reset */
13542 return PCI_ERS_RESULT_NEED_RESET;
13546 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13547 * @pdev: Pointer to PCI device
13549 * Restart the card from scratch, as if from a cold-boot.
13551 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13553 struct net_device *dev = pci_get_drvdata(pdev);
13554 struct bnx2x *bp = netdev_priv(dev);
13558 if (pci_enable_device(pdev)) {
13559 dev_err(&pdev->dev,
13560 "Cannot re-enable PCI device after reset\n");
13562 return PCI_ERS_RESULT_DISCONNECT;
13565 pci_set_master(pdev);
13566 pci_restore_state(pdev);
13568 if (netif_running(dev))
13569 bnx2x_set_power_state(bp, PCI_D0);
13573 return PCI_ERS_RESULT_RECOVERED;
13577 * bnx2x_io_resume - called when traffic can start flowing again
13578 * @pdev: Pointer to PCI device
13580 * This callback is called when the error recovery driver tells us that
13581 * its OK to resume normal operation.
13583 static void bnx2x_io_resume(struct pci_dev *pdev)
13585 struct net_device *dev = pci_get_drvdata(pdev);
13586 struct bnx2x *bp = netdev_priv(dev);
13588 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13589 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13595 bnx2x_eeh_recover(bp);
13597 if (netif_running(dev))
13598 bnx2x_nic_load(bp, LOAD_NORMAL);
13600 netif_device_attach(dev);
13605 static struct pci_error_handlers bnx2x_err_handler = {
13606 .error_detected = bnx2x_io_error_detected,
13607 .slot_reset = bnx2x_io_slot_reset,
13608 .resume = bnx2x_io_resume,
13611 static struct pci_driver bnx2x_pci_driver = {
13612 .name = DRV_MODULE_NAME,
13613 .id_table = bnx2x_pci_tbl,
13614 .probe = bnx2x_init_one,
13615 .remove = __devexit_p(bnx2x_remove_one),
13616 .suspend = bnx2x_suspend,
13617 .resume = bnx2x_resume,
13618 .err_handler = &bnx2x_err_handler,
13621 static int __init bnx2x_init(void)
13625 pr_info("%s", version);
13627 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13628 if (bnx2x_wq == NULL) {
13629 pr_err("Cannot create workqueue\n");
13633 ret = pci_register_driver(&bnx2x_pci_driver);
13635 pr_err("Cannot register driver\n");
13636 destroy_workqueue(bnx2x_wq);
13641 static void __exit bnx2x_cleanup(void)
13643 pci_unregister_driver(&bnx2x_pci_driver);
13645 destroy_workqueue(bnx2x_wq);
13648 module_init(bnx2x_init);
13649 module_exit(bnx2x_cleanup);
13653 /* count denotes the number of new completions we have seen */
13654 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13656 struct eth_spe *spe;
13658 #ifdef BNX2X_STOP_ON_ERROR
13659 if (unlikely(bp->panic))
13663 spin_lock_bh(&bp->spq_lock);
13664 bp->cnic_spq_pending -= count;
13666 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13667 bp->cnic_spq_pending++) {
13669 if (!bp->cnic_kwq_pending)
13672 spe = bnx2x_sp_get_next(bp);
13673 *spe = *bp->cnic_kwq_cons;
13675 bp->cnic_kwq_pending--;
13677 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13678 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13680 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13681 bp->cnic_kwq_cons = bp->cnic_kwq;
13683 bp->cnic_kwq_cons++;
13685 bnx2x_sp_prod_update(bp);
13686 spin_unlock_bh(&bp->spq_lock);
13689 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13690 struct kwqe_16 *kwqes[], u32 count)
13692 struct bnx2x *bp = netdev_priv(dev);
13695 #ifdef BNX2X_STOP_ON_ERROR
13696 if (unlikely(bp->panic))
13700 spin_lock_bh(&bp->spq_lock);
13702 for (i = 0; i < count; i++) {
13703 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13705 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13708 *bp->cnic_kwq_prod = *spe;
13710 bp->cnic_kwq_pending++;
13712 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13713 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13714 spe->data.mac_config_addr.hi,
13715 spe->data.mac_config_addr.lo,
13716 bp->cnic_kwq_pending);
13718 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13719 bp->cnic_kwq_prod = bp->cnic_kwq;
13721 bp->cnic_kwq_prod++;
13724 spin_unlock_bh(&bp->spq_lock);
13726 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13727 bnx2x_cnic_sp_post(bp, 0);
13732 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13734 struct cnic_ops *c_ops;
13737 mutex_lock(&bp->cnic_mutex);
13738 c_ops = bp->cnic_ops;
13740 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13741 mutex_unlock(&bp->cnic_mutex);
13746 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13748 struct cnic_ops *c_ops;
13752 c_ops = rcu_dereference(bp->cnic_ops);
13754 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13761 * for commands that have no data
13763 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13765 struct cnic_ctl_info ctl = {0};
13769 return bnx2x_cnic_ctl_send(bp, &ctl);
13772 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13774 struct cnic_ctl_info ctl;
13776 /* first we tell CNIC and only then we count this as a completion */
13777 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13778 ctl.data.comp.cid = cid;
13780 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13781 bnx2x_cnic_sp_post(bp, 1);
13784 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13786 struct bnx2x *bp = netdev_priv(dev);
13789 switch (ctl->cmd) {
13790 case DRV_CTL_CTXTBL_WR_CMD: {
13791 u32 index = ctl->data.io.offset;
13792 dma_addr_t addr = ctl->data.io.dma_addr;
13794 bnx2x_ilt_wr(bp, index, addr);
13798 case DRV_CTL_COMPLETION_CMD: {
13799 int count = ctl->data.comp.comp_count;
13801 bnx2x_cnic_sp_post(bp, count);
13805 /* rtnl_lock is held. */
13806 case DRV_CTL_START_L2_CMD: {
13807 u32 cli = ctl->data.ring.client_id;
13809 bp->rx_mode_cl_mask |= (1 << cli);
13810 bnx2x_set_storm_rx_mode(bp);
13814 /* rtnl_lock is held. */
13815 case DRV_CTL_STOP_L2_CMD: {
13816 u32 cli = ctl->data.ring.client_id;
13818 bp->rx_mode_cl_mask &= ~(1 << cli);
13819 bnx2x_set_storm_rx_mode(bp);
13824 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13831 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13833 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13835 if (bp->flags & USING_MSIX_FLAG) {
13836 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13837 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13838 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13840 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13841 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13843 cp->irq_arr[0].status_blk = bp->cnic_sb;
13844 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13845 cp->irq_arr[1].status_blk = bp->def_status_blk;
13846 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13851 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13854 struct bnx2x *bp = netdev_priv(dev);
13855 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13860 if (atomic_read(&bp->intr_sem) != 0)
13863 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13867 bp->cnic_kwq_cons = bp->cnic_kwq;
13868 bp->cnic_kwq_prod = bp->cnic_kwq;
13869 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13871 bp->cnic_spq_pending = 0;
13872 bp->cnic_kwq_pending = 0;
13874 bp->cnic_data = data;
13877 cp->drv_state = CNIC_DRV_STATE_REGD;
13879 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13881 bnx2x_setup_cnic_irq_info(bp);
13882 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13883 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13884 rcu_assign_pointer(bp->cnic_ops, ops);
13889 static int bnx2x_unregister_cnic(struct net_device *dev)
13891 struct bnx2x *bp = netdev_priv(dev);
13892 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13894 mutex_lock(&bp->cnic_mutex);
13895 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13896 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13897 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13900 rcu_assign_pointer(bp->cnic_ops, NULL);
13901 mutex_unlock(&bp->cnic_mutex);
13903 kfree(bp->cnic_kwq);
13904 bp->cnic_kwq = NULL;
13909 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13911 struct bnx2x *bp = netdev_priv(dev);
13912 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13914 cp->drv_owner = THIS_MODULE;
13915 cp->chip_id = CHIP_ID(bp);
13916 cp->pdev = bp->pdev;
13917 cp->io_base = bp->regview;
13918 cp->io_base2 = bp->doorbells;
13919 cp->max_kwqe_pending = 8;
13920 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13921 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13922 cp->ctx_tbl_len = CNIC_ILT_LINES;
13923 cp->starting_cid = BCM_CNIC_CID_START;
13924 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13925 cp->drv_ctl = bnx2x_drv_ctl;
13926 cp->drv_register_cnic = bnx2x_register_cnic;
13927 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13931 EXPORT_SYMBOL(bnx2x_cnic_probe);
13933 #endif /* BCM_CNIC */