1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.53-1"
61 #define DRV_MODULE_RELDATE "2010/18/04"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version[] __devinitdata =
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
126 static struct workqueue_struct *bnx2x_wq;
128 enum bnx2x_board_type {
134 /* indexed by board_type, above */
137 } board_info[] __devinitdata = {
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
158 * locking is done by mcp
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
180 static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
207 struct dmae_command dmae;
208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
220 memset(&dmae, 0, sizeof(struct dmae_command));
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
228 DMAE_CMD_ENDIANITY_DW_SWAP |
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
252 mutex_lock(&bp->dmae_mutex);
256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
264 BNX2X_ERR("DMAE timeout!\n");
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
275 mutex_unlock(&bp->dmae_mutex);
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
280 struct dmae_command dmae;
281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
295 memset(&dmae, 0, sizeof(struct dmae_command));
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
303 DMAE_CMD_ENDIANITY_DW_SWAP |
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
324 mutex_lock(&bp->dmae_mutex);
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
333 while (*wb_comp != DMAE_COMP_VAL) {
336 BNX2X_ERR("DMAE timeout!\n");
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
350 mutex_unlock(&bp->dmae_mutex);
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
359 while (len > dmae_wr_max) {
360 bnx2x_write_dmae(bp, phys_addr + offset,
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
384 REG_RD_DMAE(bp, reg, wb_data, 2);
386 return HILO_U64(wb_data[0], wb_data[1]);
390 static int bnx2x_mc_assert(struct bnx2x *bp)
394 u32 row0, row1, row2, row3;
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
511 static void bnx2x_fw_dump(struct bnx2x *bp)
519 BNX2X_ERR("NO MCP - can not dump\n");
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526 pr_err("begin fw dump (mark 0x%x)\n", mark);
529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530 for (word = 0; word < 8; word++)
531 data[word] = htonl(REG_RD(bp, offset + 4*word));
533 pr_cont("%s", (char *)data);
535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536 for (word = 0; word < 8; word++)
537 data[word] = htonl(REG_RD(bp, offset + 4*word));
539 pr_cont("%s", (char *)data);
541 pr_err("end of fw dump\n");
544 static void bnx2x_panic_dump(struct bnx2x *bp)
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
552 BNX2X_ERR("begin crash dump -----------------\n");
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
563 for_each_queue(bp, i) {
564 struct bnx2x_fastpath *fp = &bp->fp[i];
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
569 i, fp->rx_bd_prod, fp->rx_bd_cons,
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
580 for_each_queue(bp, i) {
581 struct bnx2x_fastpath *fp = &bp->fp[i];
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590 fp->status_blk->c_status_block.status_block_index,
591 fp->tx_db.data.prod);
596 for_each_queue(bp, i) {
597 struct bnx2x_fastpath *fp = &bp->fp[i];
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601 for (j = start; j != end; j = RX_BD(j + 1)) {
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
611 for (j = start; j != end; j = RX_SGE(j + 1)) {
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
630 for_each_queue(bp, i) {
631 struct bnx2x_fastpath *fp = &bp->fp[i];
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
654 BNX2X_ERR("end crash dump -----------------\n");
657 static void bnx2x_int_enable(struct bnx2x *bp)
659 int port = BP_PORT(bp);
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
684 REG_WR(bp, addr, val);
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
692 REG_WR(bp, addr, val);
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
704 /* enable nig and gpio3 attention */
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
713 /* Make sure that interrupts are indeed enabled from here on */
717 static void bnx2x_int_disable(struct bnx2x *bp)
719 int port = BP_PORT(bp);
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
731 /* flush all outstanding writes */
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
744 /* disable interrupt handling */
745 atomic_inc(&bp->intr_sem);
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
752 /* make sure all ISRs are done */
754 synchronize_irq(bp->msix_table[0].vector);
759 for_each_queue(bp, i)
760 synchronize_irq(bp->msix_table[i + offset].vector);
762 synchronize_irq(bp->pdev->irq);
764 /* make sure sp_task is not running */
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
772 * General service functions
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810 u8 storm, u16 index, u8 op, u8 update)
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
814 struct igu_ack_register igu_ack;
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
827 /* Make sure that ACK is written */
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
834 struct host_status_block *fpsb = fp->status_blk;
836 barrier(); /* status block is written to by the chip */
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
855 * fast path service functions
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
860 /* Tell compiler that consumer and producer can change */
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
865 /* free skb in the packet ring at pos idx
866 * return idx of last bd freed
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
874 struct sk_buff *skb = tx_buf->skb;
875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893 BNX2X_ERR("BAD nbd!\n");
897 new_cons = nbd + tx_buf->first_bd;
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
902 /* Skip a parse bd... */
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
926 tx_buf->first_bd = 0;
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
945 #ifdef BNX2X_STOP_ON_ERROR
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
951 return (s16)(fp->bp->tx_ring_size) - used;
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
958 /* Tell compiler that status block fields can change */
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
966 struct bnx2x *bp = fp->bp;
967 struct netdev_queue *txq;
968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
970 #ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
975 txq = netdev_get_tx_queue(bp->dev, fp->index);
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
979 while (sw_cons != hw_cons) {
982 pkt_cons = TX_BD(sw_cons);
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
987 hw_cons, sw_cons, pkt_cons);
989 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1009 /* TBD need a thresh? */
1010 if (unlikely(netif_tx_queue_stopped(txq))) {
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1021 __netif_tx_lock(txq, smp_processor_id());
1023 if ((netif_tx_queue_stopped(txq)) &&
1024 (bp->state == BNX2X_STATE_OPEN) &&
1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026 netif_tx_wake_queue(txq);
1028 __netif_tx_unlock(txq);
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1046 fp->index, cid, command, bp->state,
1047 rr_cqe->ramrod_cqe.ramrod_type);
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1057 fp->state = BNX2X_FP_STATE_OPEN;
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1063 fp->state = BNX2X_FP_STATE_HALTED;
1067 BNX2X_ERR("unexpected MC reply (%d) "
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103 bp->set_mac_pending--;
1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109 bp->set_mac_pending--;
1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1115 command, bp->state);
1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1128 /* Skip "next page" elements */
1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1136 sw_buf->page = NULL;
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1158 if (unlikely(page == NULL))
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1168 sw_buf->page = page;
1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1205 /* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
1226 *prod_bd = *cons_bd;
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1232 u16 last_max = fp->last_max_sge;
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1255 struct bnx2x *bp = fp->bp;
1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
1259 u16 last_max, last_elem, first_elem;
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1346 #ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1353 fp->tpa_queue_used);
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1362 struct sw_rx_page *rx_pg, old_rx_pg;
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1371 /* This is needed in order to enable forwarding support */
1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374 max(frag_size, (u32)len_on_bd));
1376 #ifdef BNX2X_STOP_ON_ERROR
1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394 rx_pg = &fp->rx_page_ring[sge_idx];
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1405 /* Unmap the page as we r going to pass it to the stack */
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1417 frag_size -= frag_len;
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1438 if (likely(new_skb)) {
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1450 prefetch(((char *)(skb)) + 128);
1452 #ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1462 skb_reserve(skb, pad);
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 iph = (struct iphdr *)skb->data;
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1492 napi_gro_receive(&fp->napi, skb);
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1504 /* else drop the packet and keep the buffer in the bin */
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
1507 fp->eth_q_stats.rx_skb_alloc_failed++;
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1518 struct ustorm_eth_rx_producers rx_prods = {0};
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539 ((u32 *)&rx_prods)[i]);
1541 mmiowb(); /* keep prod updates ordered */
1543 DP(NETIF_MSG_RX_STATUS,
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1548 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1550 struct bnx2x *bp = fp->bp;
1551 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1555 #ifdef BNX2X_STOP_ON_ERROR
1556 if (unlikely(bp->panic))
1560 /* CQ "next element" is of the size of the regular element,
1561 that's why it's ok here */
1562 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1566 bd_cons = fp->rx_bd_cons;
1567 bd_prod = fp->rx_bd_prod;
1568 bd_prod_fw = bd_prod;
1569 sw_comp_cons = fp->rx_comp_cons;
1570 sw_comp_prod = fp->rx_comp_prod;
1572 /* Memory barrier necessary as speculative reads of the rx
1573 * buffer can be ahead of the index in the status block
1577 DP(NETIF_MSG_RX_STATUS,
1578 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1579 fp->index, hw_comp_cons, sw_comp_cons);
1581 while (sw_comp_cons != hw_comp_cons) {
1582 struct sw_rx_bd *rx_buf = NULL;
1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe;
1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589 bd_prod = RX_BD(bd_prod);
1590 bd_cons = RX_BD(bd_cons);
1592 /* Prefetch the page containing the BD descriptor
1593 at producer's index. It will be needed when new skb is
1595 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596 (&fp->rx_desc_ring[bd_prod])) -
1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1602 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1603 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1604 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1605 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1607 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1609 /* is this a slowpath msg? */
1610 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1611 bnx2x_sp_event(fp, cqe);
1614 /* this is an rx packet */
1616 rx_buf = &fp->rx_buf_ring[bd_cons];
1619 prefetch((u8 *)skb + 256);
1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621 pad = cqe->fast_path_cqe.placement_offset;
1623 /* If CQE is marked both TPA_START and TPA_END
1624 it is a non-TPA CQE */
1625 if ((!fp->disable_tpa) &&
1626 (TPA_TYPE(cqe_fp_flags) !=
1627 (TPA_TYPE_START | TPA_TYPE_END))) {
1628 u16 queue = cqe->fast_path_cqe.queue_index;
1630 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631 DP(NETIF_MSG_RX_STATUS,
1632 "calling tpa_start on queue %d\n",
1635 bnx2x_tpa_start(fp, queue, skb,
1640 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641 DP(NETIF_MSG_RX_STATUS,
1642 "calling tpa_stop on queue %d\n",
1645 if (!BNX2X_RX_SUM_FIX(cqe))
1646 BNX2X_ERR("STOP on none TCP "
1649 /* This is a size of the linear data
1651 len = le16_to_cpu(cqe->fast_path_cqe.
1653 bnx2x_tpa_stop(bp, fp, queue, pad,
1654 len, cqe, comp_ring_cons);
1655 #ifdef BNX2X_STOP_ON_ERROR
1660 bnx2x_update_sge_prod(fp,
1661 &cqe->fast_path_cqe);
1666 dma_sync_single_for_device(&bp->pdev->dev,
1667 dma_unmap_addr(rx_buf, mapping),
1668 pad + RX_COPY_THRESH,
1671 prefetch(((char *)(skb)) + 128);
1673 /* is this an error packet? */
1674 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1675 DP(NETIF_MSG_RX_ERR,
1676 "ERROR flags %x rx packet %u\n",
1677 cqe_fp_flags, sw_comp_cons);
1678 fp->eth_q_stats.rx_err_discard_pkt++;
1682 /* Since we don't have a jumbo ring
1683 * copy small packets if mtu > 1500
1685 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1686 (len <= RX_COPY_THRESH)) {
1687 struct sk_buff *new_skb;
1689 new_skb = netdev_alloc_skb(bp->dev,
1691 if (new_skb == NULL) {
1692 DP(NETIF_MSG_RX_ERR,
1693 "ERROR packet dropped "
1694 "because of alloc failure\n");
1695 fp->eth_q_stats.rx_skb_alloc_failed++;
1700 skb_copy_from_linear_data_offset(skb, pad,
1701 new_skb->data + pad, len);
1702 skb_reserve(new_skb, pad);
1703 skb_put(new_skb, len);
1705 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1710 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1711 dma_unmap_single(&bp->pdev->dev,
1712 dma_unmap_addr(rx_buf, mapping),
1715 skb_reserve(skb, pad);
1719 DP(NETIF_MSG_RX_ERR,
1720 "ERROR packet dropped because "
1721 "of alloc failure\n");
1722 fp->eth_q_stats.rx_skb_alloc_failed++;
1724 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1728 skb->protocol = eth_type_trans(skb, bp->dev);
1730 skb->ip_summed = CHECKSUM_NONE;
1732 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1733 skb->ip_summed = CHECKSUM_UNNECESSARY;
1735 fp->eth_q_stats.hw_csum_err++;
1739 skb_record_rx_queue(skb, fp->index);
1742 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1743 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1744 PARSING_FLAGS_VLAN))
1745 vlan_gro_receive(&fp->napi, bp->vlgrp,
1746 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1749 napi_gro_receive(&fp->napi, skb);
1755 bd_cons = NEXT_RX_IDX(bd_cons);
1756 bd_prod = NEXT_RX_IDX(bd_prod);
1757 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1760 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1761 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1763 if (rx_pkt == budget)
1767 fp->rx_bd_cons = bd_cons;
1768 fp->rx_bd_prod = bd_prod_fw;
1769 fp->rx_comp_cons = sw_comp_cons;
1770 fp->rx_comp_prod = sw_comp_prod;
1772 /* Update producers */
1773 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1776 fp->rx_pkt += rx_pkt;
1782 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1784 struct bnx2x_fastpath *fp = fp_cookie;
1785 struct bnx2x *bp = fp->bp;
1787 /* Return here if interrupt is disabled */
1788 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1789 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1793 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1794 fp->index, fp->sb_id);
1795 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1797 #ifdef BNX2X_STOP_ON_ERROR
1798 if (unlikely(bp->panic))
1802 /* Handle Rx and Tx according to MSI-X vector */
1803 prefetch(fp->rx_cons_sb);
1804 prefetch(fp->tx_cons_sb);
1805 prefetch(&fp->status_blk->u_status_block.status_block_index);
1806 prefetch(&fp->status_blk->c_status_block.status_block_index);
1807 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1812 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1814 struct bnx2x *bp = netdev_priv(dev_instance);
1815 u16 status = bnx2x_ack_int(bp);
1819 /* Return here if interrupt is shared and it's not for us */
1820 if (unlikely(status == 0)) {
1821 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1824 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1826 /* Return here if interrupt is disabled */
1827 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1828 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1832 #ifdef BNX2X_STOP_ON_ERROR
1833 if (unlikely(bp->panic))
1837 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1838 struct bnx2x_fastpath *fp = &bp->fp[i];
1840 mask = 0x2 << fp->sb_id;
1841 if (status & mask) {
1842 /* Handle Rx and Tx according to SB id */
1843 prefetch(fp->rx_cons_sb);
1844 prefetch(&fp->status_blk->u_status_block.
1845 status_block_index);
1846 prefetch(fp->tx_cons_sb);
1847 prefetch(&fp->status_blk->c_status_block.
1848 status_block_index);
1849 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1855 mask = 0x2 << CNIC_SB_ID(bp);
1856 if (status & (mask | 0x1)) {
1857 struct cnic_ops *c_ops = NULL;
1860 c_ops = rcu_dereference(bp->cnic_ops);
1862 c_ops->cnic_handler(bp->cnic_data, NULL);
1869 if (unlikely(status & 0x1)) {
1870 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1877 if (unlikely(status))
1878 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1884 /* end of fast path */
1886 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1891 * General service functions
1894 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1897 u32 resource_bit = (1 << resource);
1898 int func = BP_FUNC(bp);
1899 u32 hw_lock_control_reg;
1902 /* Validating that the resource is within range */
1903 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1905 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1906 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1911 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1913 hw_lock_control_reg =
1914 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1917 /* Validating that the resource is not already taken */
1918 lock_status = REG_RD(bp, hw_lock_control_reg);
1919 if (lock_status & resource_bit) {
1920 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1921 lock_status, resource_bit);
1925 /* Try for 5 second every 5ms */
1926 for (cnt = 0; cnt < 1000; cnt++) {
1927 /* Try to acquire the lock */
1928 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1929 lock_status = REG_RD(bp, hw_lock_control_reg);
1930 if (lock_status & resource_bit)
1935 DP(NETIF_MSG_HW, "Timeout\n");
1939 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1942 u32 resource_bit = (1 << resource);
1943 int func = BP_FUNC(bp);
1944 u32 hw_lock_control_reg;
1946 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1948 /* Validating that the resource is within range */
1949 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1951 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1952 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1957 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1959 hw_lock_control_reg =
1960 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1963 /* Validating that the resource is currently taken */
1964 lock_status = REG_RD(bp, hw_lock_control_reg);
1965 if (!(lock_status & resource_bit)) {
1966 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1967 lock_status, resource_bit);
1971 REG_WR(bp, hw_lock_control_reg, resource_bit);
1975 /* HW Lock for shared dual port PHYs */
1976 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1978 mutex_lock(&bp->port.phy_mutex);
1980 if (bp->port.need_hw_lock)
1981 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1984 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1986 if (bp->port.need_hw_lock)
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1989 mutex_unlock(&bp->port.phy_mutex);
1992 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1994 /* The GPIO should be swapped if swap register is set and active */
1995 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1996 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1997 int gpio_shift = gpio_num +
1998 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1999 u32 gpio_mask = (1 << gpio_shift);
2003 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2004 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2008 /* read GPIO value */
2009 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2011 /* get the requested pin value */
2012 if ((gpio_reg & gpio_mask) == gpio_mask)
2017 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2022 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2024 /* The GPIO should be swapped if swap register is set and active */
2025 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2026 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2027 int gpio_shift = gpio_num +
2028 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2029 u32 gpio_mask = (1 << gpio_shift);
2032 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2033 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2037 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2038 /* read GPIO and mask except the float bits */
2039 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2042 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2043 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2044 gpio_num, gpio_shift);
2045 /* clear FLOAT and set CLR */
2046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2050 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2051 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2052 gpio_num, gpio_shift);
2053 /* clear FLOAT and set SET */
2054 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2058 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2059 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2060 gpio_num, gpio_shift);
2062 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2069 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2075 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2077 /* The GPIO should be swapped if swap register is set and active */
2078 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2079 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2080 int gpio_shift = gpio_num +
2081 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2082 u32 gpio_mask = (1 << gpio_shift);
2085 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2086 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2090 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2092 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2095 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2096 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2097 "output low\n", gpio_num, gpio_shift);
2098 /* clear SET and set CLR */
2099 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2100 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2103 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2104 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2105 "output high\n", gpio_num, gpio_shift);
2106 /* clear CLR and set SET */
2107 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2115 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2116 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2121 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2123 u32 spio_mask = (1 << spio_num);
2126 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2127 (spio_num > MISC_REGISTERS_SPIO_7)) {
2128 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2132 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2133 /* read SPIO and mask except the float bits */
2134 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2137 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2138 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2139 /* clear FLOAT and set CLR */
2140 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2141 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2144 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2145 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2146 /* clear FLOAT and set SET */
2147 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2148 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2151 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2152 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2154 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2161 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2167 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2169 switch (bp->link_vars.ieee_fc &
2170 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2171 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2172 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2176 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2177 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2181 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2182 bp->port.advertising |= ADVERTISED_Asym_Pause;
2186 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2192 static void bnx2x_link_report(struct bnx2x *bp)
2194 if (bp->flags & MF_FUNC_DIS) {
2195 netif_carrier_off(bp->dev);
2196 netdev_err(bp->dev, "NIC Link is Down\n");
2200 if (bp->link_vars.link_up) {
2203 if (bp->state == BNX2X_STATE_OPEN)
2204 netif_carrier_on(bp->dev);
2205 netdev_info(bp->dev, "NIC Link is Up, ");
2207 line_speed = bp->link_vars.line_speed;
2212 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2213 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2214 if (vn_max_rate < line_speed)
2215 line_speed = vn_max_rate;
2217 pr_cont("%d Mbps ", line_speed);
2219 if (bp->link_vars.duplex == DUPLEX_FULL)
2220 pr_cont("full duplex");
2222 pr_cont("half duplex");
2224 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2225 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2226 pr_cont(", receive ");
2227 if (bp->link_vars.flow_ctrl &
2229 pr_cont("& transmit ");
2231 pr_cont(", transmit ");
2233 pr_cont("flow control ON");
2237 } else { /* link_down */
2238 netif_carrier_off(bp->dev);
2239 netdev_err(bp->dev, "NIC Link is Down\n");
2243 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2245 if (!BP_NOMCP(bp)) {
2248 /* Initialize link parameters structure variables */
2249 /* It is recommended to turn off RX FC for jumbo frames
2250 for better performance */
2251 if (bp->dev->mtu > 5000)
2252 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2254 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2256 bnx2x_acquire_phy_lock(bp);
2258 if (load_mode == LOAD_DIAG)
2259 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2261 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2263 bnx2x_release_phy_lock(bp);
2265 bnx2x_calc_fc_adv(bp);
2267 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2268 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2269 bnx2x_link_report(bp);
2274 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2278 static void bnx2x_link_set(struct bnx2x *bp)
2280 if (!BP_NOMCP(bp)) {
2281 bnx2x_acquire_phy_lock(bp);
2282 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2283 bnx2x_release_phy_lock(bp);
2285 bnx2x_calc_fc_adv(bp);
2287 BNX2X_ERR("Bootcode is missing - can not set link\n");
2290 static void bnx2x__link_reset(struct bnx2x *bp)
2292 if (!BP_NOMCP(bp)) {
2293 bnx2x_acquire_phy_lock(bp);
2294 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2295 bnx2x_release_phy_lock(bp);
2297 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2300 static u8 bnx2x_link_test(struct bnx2x *bp)
2304 if (!BP_NOMCP(bp)) {
2305 bnx2x_acquire_phy_lock(bp);
2306 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2307 bnx2x_release_phy_lock(bp);
2309 BNX2X_ERR("Bootcode is missing - can not test link\n");
2314 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2316 u32 r_param = bp->link_vars.line_speed / 8;
2317 u32 fair_periodic_timeout_usec;
2320 memset(&(bp->cmng.rs_vars), 0,
2321 sizeof(struct rate_shaping_vars_per_port));
2322 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2324 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2325 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2327 /* this is the threshold below which no timer arming will occur
2328 1.25 coefficient is for the threshold to be a little bigger
2329 than the real time, to compensate for timer in-accuracy */
2330 bp->cmng.rs_vars.rs_threshold =
2331 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2333 /* resolution of fairness timer */
2334 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2335 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2336 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2338 /* this is the threshold below which we won't arm the timer anymore */
2339 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2341 /* we multiply by 1e3/8 to get bytes/msec.
2342 We don't want the credits to pass a credit
2343 of the t_fair*FAIR_MEM (algorithm resolution) */
2344 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2345 /* since each tick is 4 usec */
2346 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2349 /* Calculates the sum of vn_min_rates.
2350 It's needed for further normalizing of the min_rates.
2352 sum of vn_min_rates.
2354 0 - if all the min_rates are 0.
2355 In the later case fainess algorithm should be deactivated.
2356 If not all min_rates are zero then those that are zeroes will be set to 1.
2358 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2361 int port = BP_PORT(bp);
2364 bp->vn_weight_sum = 0;
2365 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2366 int func = 2*vn + port;
2367 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2368 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2369 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2371 /* Skip hidden vns */
2372 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2375 /* If min rate is zero - set it to 1 */
2377 vn_min_rate = DEF_MIN_RATE;
2381 bp->vn_weight_sum += vn_min_rate;
2384 /* ... only if all min rates are zeros - disable fairness */
2386 bp->cmng.flags.cmng_enables &=
2387 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2388 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2389 " fairness will be disabled\n");
2391 bp->cmng.flags.cmng_enables |=
2392 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2395 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2397 struct rate_shaping_vars_per_vn m_rs_vn;
2398 struct fairness_vars_per_vn m_fair_vn;
2399 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2400 u16 vn_min_rate, vn_max_rate;
2403 /* If function is hidden - set min and max to zeroes */
2404 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2409 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2410 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2411 /* If min rate is zero - set it to 1 */
2413 vn_min_rate = DEF_MIN_RATE;
2414 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2415 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2418 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2419 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2421 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2422 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2424 /* global vn counter - maximal Mbps for this vn */
2425 m_rs_vn.vn_counter.rate = vn_max_rate;
2427 /* quota - number of bytes transmitted in this period */
2428 m_rs_vn.vn_counter.quota =
2429 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2431 if (bp->vn_weight_sum) {
2432 /* credit for each period of the fairness algorithm:
2433 number of bytes in T_FAIR (the vn share the port rate).
2434 vn_weight_sum should not be larger than 10000, thus
2435 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2437 m_fair_vn.vn_credit_delta =
2438 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2439 (8 * bp->vn_weight_sum))),
2440 (bp->cmng.fair_vars.fair_threshold * 2));
2441 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2442 m_fair_vn.vn_credit_delta);
2445 /* Store it to internal memory */
2446 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2447 REG_WR(bp, BAR_XSTRORM_INTMEM +
2448 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2449 ((u32 *)(&m_rs_vn))[i]);
2451 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2452 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2454 ((u32 *)(&m_fair_vn))[i]);
2458 /* This function is called upon link interrupt */
2459 static void bnx2x_link_attn(struct bnx2x *bp)
2461 u32 prev_link_status = bp->link_vars.link_status;
2462 /* Make sure that we are synced with the current statistics */
2463 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2465 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2467 if (bp->link_vars.link_up) {
2469 /* dropless flow control */
2470 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2471 int port = BP_PORT(bp);
2472 u32 pause_enabled = 0;
2474 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2477 REG_WR(bp, BAR_USTRORM_INTMEM +
2478 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2482 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2483 struct host_port_stats *pstats;
2485 pstats = bnx2x_sp(bp, port_stats);
2486 /* reset old bmac stats */
2487 memset(&(pstats->mac_stx[0]), 0,
2488 sizeof(struct mac_stx));
2490 if (bp->state == BNX2X_STATE_OPEN)
2491 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2494 /* indicate link status only if link status actually changed */
2495 if (prev_link_status != bp->link_vars.link_status)
2496 bnx2x_link_report(bp);
2499 int port = BP_PORT(bp);
2503 /* Set the attention towards other drivers on the same port */
2504 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2505 if (vn == BP_E1HVN(bp))
2508 func = ((vn << 1) | port);
2509 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2510 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2513 if (bp->link_vars.link_up) {
2516 /* Init rate shaping and fairness contexts */
2517 bnx2x_init_port_minmax(bp);
2519 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2520 bnx2x_init_vn_minmax(bp, 2*vn + port);
2522 /* Store it to internal memory */
2524 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2525 REG_WR(bp, BAR_XSTRORM_INTMEM +
2526 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2527 ((u32 *)(&bp->cmng))[i]);
2532 static void bnx2x__link_status_update(struct bnx2x *bp)
2534 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2537 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2539 if (bp->link_vars.link_up)
2540 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2542 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2544 bnx2x_calc_vn_weight_sum(bp);
2546 /* indicate link status */
2547 bnx2x_link_report(bp);
2550 static void bnx2x_pmf_update(struct bnx2x *bp)
2552 int port = BP_PORT(bp);
2556 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2558 /* enable nig attention */
2559 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2560 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2561 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2563 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2571 * General service functions
2574 /* send the MCP a request, block until there is a reply */
2575 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2577 int func = BP_FUNC(bp);
2578 u32 seq = ++bp->fw_seq;
2581 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2583 mutex_lock(&bp->fw_mb_mutex);
2584 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2585 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2588 /* let the FW do it's magic ... */
2591 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2593 /* Give the FW up to 5 second (500*10ms) */
2594 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2596 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2597 cnt*delay, rc, seq);
2599 /* is this a reply to our command? */
2600 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2601 rc &= FW_MSG_CODE_MASK;
2604 BNX2X_ERR("FW failed to respond!\n");
2608 mutex_unlock(&bp->fw_mb_mutex);
2613 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2614 static void bnx2x_set_rx_mode(struct net_device *dev);
2616 static void bnx2x_e1h_disable(struct bnx2x *bp)
2618 int port = BP_PORT(bp);
2620 netif_tx_disable(bp->dev);
2622 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2624 netif_carrier_off(bp->dev);
2627 static void bnx2x_e1h_enable(struct bnx2x *bp)
2629 int port = BP_PORT(bp);
2631 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2633 /* Tx queue should be only reenabled */
2634 netif_tx_wake_all_queues(bp->dev);
2637 * Should not call netif_carrier_on since it will be called if the link
2638 * is up when checking for link state
2642 static void bnx2x_update_min_max(struct bnx2x *bp)
2644 int port = BP_PORT(bp);
2647 /* Init rate shaping and fairness contexts */
2648 bnx2x_init_port_minmax(bp);
2650 bnx2x_calc_vn_weight_sum(bp);
2652 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2653 bnx2x_init_vn_minmax(bp, 2*vn + port);
2658 /* Set the attention towards other drivers on the same port */
2659 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2660 if (vn == BP_E1HVN(bp))
2663 func = ((vn << 1) | port);
2664 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2665 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2668 /* Store it to internal memory */
2669 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2670 REG_WR(bp, BAR_XSTRORM_INTMEM +
2671 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2672 ((u32 *)(&bp->cmng))[i]);
2676 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2678 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2680 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2683 * This is the only place besides the function initialization
2684 * where the bp->flags can change so it is done without any
2687 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2688 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2689 bp->flags |= MF_FUNC_DIS;
2691 bnx2x_e1h_disable(bp);
2693 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2694 bp->flags &= ~MF_FUNC_DIS;
2696 bnx2x_e1h_enable(bp);
2698 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2700 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2702 bnx2x_update_min_max(bp);
2703 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2706 /* Report results to MCP */
2708 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2710 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2713 /* must be called under the spq lock */
2714 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2716 struct eth_spe *next_spe = bp->spq_prod_bd;
2718 if (bp->spq_prod_bd == bp->spq_last_bd) {
2719 bp->spq_prod_bd = bp->spq;
2720 bp->spq_prod_idx = 0;
2721 DP(NETIF_MSG_TIMER, "end of spq\n");
2729 /* must be called under the spq lock */
2730 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2732 int func = BP_FUNC(bp);
2734 /* Make sure that BD data is updated before writing the producer */
2737 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2742 /* the slow path queue is odd since completions arrive on the fastpath ring */
2743 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2744 u32 data_hi, u32 data_lo, int common)
2746 struct eth_spe *spe;
2748 #ifdef BNX2X_STOP_ON_ERROR
2749 if (unlikely(bp->panic))
2753 spin_lock_bh(&bp->spq_lock);
2755 if (!bp->spq_left) {
2756 BNX2X_ERR("BUG! SPQ ring full!\n");
2757 spin_unlock_bh(&bp->spq_lock);
2762 spe = bnx2x_sp_get_next(bp);
2764 /* CID needs port number to be encoded int it */
2765 spe->hdr.conn_and_cmd_data =
2766 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2768 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2771 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2773 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2774 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2778 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2779 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2780 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2781 (u32)(U64_LO(bp->spq_mapping) +
2782 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2783 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2785 bnx2x_sp_prod_update(bp);
2786 spin_unlock_bh(&bp->spq_lock);
2790 /* acquire split MCP access lock register */
2791 static int bnx2x_acquire_alr(struct bnx2x *bp)
2797 for (j = 0; j < 1000; j++) {
2799 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2800 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2801 if (val & (1L << 31))
2806 if (!(val & (1L << 31))) {
2807 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2814 /* release split MCP access lock register */
2815 static void bnx2x_release_alr(struct bnx2x *bp)
2817 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2820 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2822 struct host_def_status_block *def_sb = bp->def_status_blk;
2825 barrier(); /* status block is written to by the chip */
2826 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2827 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2830 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2831 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2834 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2835 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2838 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2839 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2842 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2843 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2850 * slow path service functions
2853 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2855 int port = BP_PORT(bp);
2856 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2857 COMMAND_REG_ATTN_BITS_SET);
2858 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2859 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2860 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2861 NIG_REG_MASK_INTERRUPT_PORT0;
2865 if (bp->attn_state & asserted)
2866 BNX2X_ERR("IGU ERROR\n");
2868 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2869 aeu_mask = REG_RD(bp, aeu_addr);
2871 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2872 aeu_mask, asserted);
2873 aeu_mask &= ~(asserted & 0x3ff);
2874 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2876 REG_WR(bp, aeu_addr, aeu_mask);
2877 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2879 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2880 bp->attn_state |= asserted;
2881 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2883 if (asserted & ATTN_HARD_WIRED_MASK) {
2884 if (asserted & ATTN_NIG_FOR_FUNC) {
2886 bnx2x_acquire_phy_lock(bp);
2888 /* save nig interrupt mask */
2889 nig_mask = REG_RD(bp, nig_int_mask_addr);
2890 REG_WR(bp, nig_int_mask_addr, 0);
2892 bnx2x_link_attn(bp);
2894 /* handle unicore attn? */
2896 if (asserted & ATTN_SW_TIMER_4_FUNC)
2897 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2899 if (asserted & GPIO_2_FUNC)
2900 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2902 if (asserted & GPIO_3_FUNC)
2903 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2905 if (asserted & GPIO_4_FUNC)
2906 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2909 if (asserted & ATTN_GENERAL_ATTN_1) {
2910 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2911 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2913 if (asserted & ATTN_GENERAL_ATTN_2) {
2914 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2915 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2917 if (asserted & ATTN_GENERAL_ATTN_3) {
2918 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2919 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2922 if (asserted & ATTN_GENERAL_ATTN_4) {
2923 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2924 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2926 if (asserted & ATTN_GENERAL_ATTN_5) {
2927 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2928 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2930 if (asserted & ATTN_GENERAL_ATTN_6) {
2931 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2932 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2936 } /* if hardwired */
2938 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2940 REG_WR(bp, hc_addr, asserted);
2942 /* now set back the mask */
2943 if (asserted & ATTN_NIG_FOR_FUNC) {
2944 REG_WR(bp, nig_int_mask_addr, nig_mask);
2945 bnx2x_release_phy_lock(bp);
2949 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2951 int port = BP_PORT(bp);
2953 /* mark the failure */
2954 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2955 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2956 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2957 bp->link_params.ext_phy_config);
2959 /* log the failure */
2960 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2961 " the driver to shutdown the card to prevent permanent"
2962 " damage. Please contact OEM Support for assistance\n");
2965 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2967 int port = BP_PORT(bp);
2969 u32 val, swap_val, swap_override;
2971 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2972 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2974 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2976 val = REG_RD(bp, reg_offset);
2977 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2978 REG_WR(bp, reg_offset, val);
2980 BNX2X_ERR("SPIO5 hw attention\n");
2982 /* Fan failure attention */
2983 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2984 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2985 /* Low power mode is controlled by GPIO 2 */
2986 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2987 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2988 /* The PHY reset is controlled by GPIO 1 */
2989 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2990 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2993 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2994 /* The PHY reset is controlled by GPIO 1 */
2995 /* fake the port number to cancel the swap done in
2997 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2998 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2999 port = (swap_val && swap_override) ^ 1;
3000 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3001 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3007 bnx2x_fan_failure(bp);
3010 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3011 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3012 bnx2x_acquire_phy_lock(bp);
3013 bnx2x_handle_module_detect_int(&bp->link_params);
3014 bnx2x_release_phy_lock(bp);
3017 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3019 val = REG_RD(bp, reg_offset);
3020 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3021 REG_WR(bp, reg_offset, val);
3023 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3024 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3029 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3033 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3035 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3036 BNX2X_ERR("DB hw attention 0x%x\n", val);
3037 /* DORQ discard attention */
3039 BNX2X_ERR("FATAL error from DORQ\n");
3042 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3044 int port = BP_PORT(bp);
3047 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3048 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3050 val = REG_RD(bp, reg_offset);
3051 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3052 REG_WR(bp, reg_offset, val);
3054 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3055 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3060 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3064 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3066 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3067 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3068 /* CFC error attention */
3070 BNX2X_ERR("FATAL error from CFC\n");
3073 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3075 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3076 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3077 /* RQ_USDMDP_FIFO_OVERFLOW */
3079 BNX2X_ERR("FATAL error from PXP\n");
3082 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3084 int port = BP_PORT(bp);
3087 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3088 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3090 val = REG_RD(bp, reg_offset);
3091 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3092 REG_WR(bp, reg_offset, val);
3094 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3095 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3100 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3104 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3106 if (attn & BNX2X_PMF_LINK_ASSERT) {
3107 int func = BP_FUNC(bp);
3109 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3110 bp->mf_config = SHMEM_RD(bp,
3111 mf_cfg.func_mf_config[func].config);
3112 val = SHMEM_RD(bp, func_mb[func].drv_status);
3113 if (val & DRV_STATUS_DCC_EVENT_MASK)
3115 (val & DRV_STATUS_DCC_EVENT_MASK));
3116 bnx2x__link_status_update(bp);
3117 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3118 bnx2x_pmf_update(bp);
3120 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3122 BNX2X_ERR("MC assert!\n");
3123 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3125 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3126 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3129 } else if (attn & BNX2X_MCP_ASSERT) {
3131 BNX2X_ERR("MCP assert!\n");
3132 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3136 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3139 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3140 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3141 if (attn & BNX2X_GRC_TIMEOUT) {
3142 val = CHIP_IS_E1H(bp) ?
3143 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3144 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3146 if (attn & BNX2X_GRC_RSV) {
3147 val = CHIP_IS_E1H(bp) ?
3148 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3149 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3151 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3155 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3156 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3159 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3160 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3161 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3162 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3163 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3164 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3166 * should be run under rtnl lock
3168 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3170 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3171 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3172 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3178 * should be run under rtnl lock
3180 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3182 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3184 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3190 * should be run under rtnl lock
3192 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3194 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3195 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3196 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3200 * should be run under rtnl lock
3202 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3204 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3206 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3208 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3209 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3215 * should be run under rtnl lock
3217 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3219 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3221 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3223 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3224 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3232 * should be run under rtnl lock
3234 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3236 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3239 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3241 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3242 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3245 static inline void _print_next_block(int idx, const char *blk)
3252 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3256 for (i = 0; sig; i++) {
3257 cur_bit = ((u32)0x1 << i);
3258 if (sig & cur_bit) {
3260 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3261 _print_next_block(par_num++, "BRB");
3263 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3264 _print_next_block(par_num++, "PARSER");
3266 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3267 _print_next_block(par_num++, "TSDM");
3269 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3270 _print_next_block(par_num++, "SEARCHER");
3272 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3273 _print_next_block(par_num++, "TSEMI");
3285 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3289 for (i = 0; sig; i++) {
3290 cur_bit = ((u32)0x1 << i);
3291 if (sig & cur_bit) {
3293 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3294 _print_next_block(par_num++, "PBCLIENT");
3296 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3297 _print_next_block(par_num++, "QM");
3299 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3300 _print_next_block(par_num++, "XSDM");
3302 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3303 _print_next_block(par_num++, "XSEMI");
3305 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3306 _print_next_block(par_num++, "DOORBELLQ");
3308 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3309 _print_next_block(par_num++, "VAUX PCI CORE");
3311 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3312 _print_next_block(par_num++, "DEBUG");
3314 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3315 _print_next_block(par_num++, "USDM");
3317 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3318 _print_next_block(par_num++, "USEMI");
3320 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3321 _print_next_block(par_num++, "UPB");
3323 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3324 _print_next_block(par_num++, "CSDM");
3336 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3340 for (i = 0; sig; i++) {
3341 cur_bit = ((u32)0x1 << i);
3342 if (sig & cur_bit) {
3344 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3345 _print_next_block(par_num++, "CSEMI");
3347 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3348 _print_next_block(par_num++, "PXP");
3350 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3351 _print_next_block(par_num++,
3352 "PXPPCICLOCKCLIENT");
3354 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3355 _print_next_block(par_num++, "CFC");
3357 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3358 _print_next_block(par_num++, "CDU");
3360 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3361 _print_next_block(par_num++, "IGU");
3363 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3364 _print_next_block(par_num++, "MISC");
3376 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3380 for (i = 0; sig; i++) {
3381 cur_bit = ((u32)0x1 << i);
3382 if (sig & cur_bit) {
3384 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3385 _print_next_block(par_num++, "MCP ROM");
3387 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3388 _print_next_block(par_num++, "MCP UMP RX");
3390 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3391 _print_next_block(par_num++, "MCP UMP TX");
3393 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3394 _print_next_block(par_num++, "MCP SCPAD");
3406 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3409 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3410 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3412 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3413 "[0]:0x%08x [1]:0x%08x "
3414 "[2]:0x%08x [3]:0x%08x\n",
3415 sig0 & HW_PRTY_ASSERT_SET_0,
3416 sig1 & HW_PRTY_ASSERT_SET_1,
3417 sig2 & HW_PRTY_ASSERT_SET_2,
3418 sig3 & HW_PRTY_ASSERT_SET_3);
3419 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3421 par_num = bnx2x_print_blocks_with_parity0(
3422 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3423 par_num = bnx2x_print_blocks_with_parity1(
3424 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3425 par_num = bnx2x_print_blocks_with_parity2(
3426 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3427 par_num = bnx2x_print_blocks_with_parity3(
3428 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3435 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3437 struct attn_route attn;
3438 int port = BP_PORT(bp);
3440 attn.sig[0] = REG_RD(bp,
3441 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3443 attn.sig[1] = REG_RD(bp,
3444 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3446 attn.sig[2] = REG_RD(bp,
3447 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3449 attn.sig[3] = REG_RD(bp,
3450 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3453 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3457 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3459 struct attn_route attn, *group_mask;
3460 int port = BP_PORT(bp);
3466 /* need to take HW lock because MCP or other port might also
3467 try to handle this event */
3468 bnx2x_acquire_alr(bp);
3470 if (bnx2x_chk_parity_attn(bp)) {
3471 bp->recovery_state = BNX2X_RECOVERY_INIT;
3472 bnx2x_set_reset_in_progress(bp);
3473 schedule_delayed_work(&bp->reset_task, 0);
3474 /* Disable HW interrupts */
3475 bnx2x_int_disable(bp);
3476 bnx2x_release_alr(bp);
3477 /* In case of parity errors don't handle attentions so that
3478 * other function would "see" parity errors.
3483 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3484 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3485 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3486 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3487 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3488 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3490 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3491 if (deasserted & (1 << index)) {
3492 group_mask = &bp->attn_group[index];
3494 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3495 index, group_mask->sig[0], group_mask->sig[1],
3496 group_mask->sig[2], group_mask->sig[3]);
3498 bnx2x_attn_int_deasserted3(bp,
3499 attn.sig[3] & group_mask->sig[3]);
3500 bnx2x_attn_int_deasserted1(bp,
3501 attn.sig[1] & group_mask->sig[1]);
3502 bnx2x_attn_int_deasserted2(bp,
3503 attn.sig[2] & group_mask->sig[2]);
3504 bnx2x_attn_int_deasserted0(bp,
3505 attn.sig[0] & group_mask->sig[0]);
3509 bnx2x_release_alr(bp);
3511 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3514 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3516 REG_WR(bp, reg_addr, val);
3518 if (~bp->attn_state & deasserted)
3519 BNX2X_ERR("IGU ERROR\n");
3521 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3522 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3524 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3525 aeu_mask = REG_RD(bp, reg_addr);
3527 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3528 aeu_mask, deasserted);
3529 aeu_mask |= (deasserted & 0x3ff);
3530 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3532 REG_WR(bp, reg_addr, aeu_mask);
3533 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3535 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3536 bp->attn_state &= ~deasserted;
3537 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3540 static void bnx2x_attn_int(struct bnx2x *bp)
3542 /* read local copy of bits */
3543 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3545 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3547 u32 attn_state = bp->attn_state;
3549 /* look for changed bits */
3550 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3551 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3554 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3555 attn_bits, attn_ack, asserted, deasserted);
3557 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3558 BNX2X_ERR("BAD attention state\n");
3560 /* handle bits that were raised */
3562 bnx2x_attn_int_asserted(bp, asserted);
3565 bnx2x_attn_int_deasserted(bp, deasserted);
3568 static void bnx2x_sp_task(struct work_struct *work)
3570 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3573 /* Return here if interrupt is disabled */
3574 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3575 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3579 status = bnx2x_update_dsb_idx(bp);
3580 /* if (status == 0) */
3581 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3583 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3591 /* CStorm events: STAT_QUERY */
3593 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3597 if (unlikely(status))
3598 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3601 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3603 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3605 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3607 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3609 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3613 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3615 struct net_device *dev = dev_instance;
3616 struct bnx2x *bp = netdev_priv(dev);
3618 /* Return here if interrupt is disabled */
3619 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3620 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3624 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3626 #ifdef BNX2X_STOP_ON_ERROR
3627 if (unlikely(bp->panic))
3633 struct cnic_ops *c_ops;
3636 c_ops = rcu_dereference(bp->cnic_ops);
3638 c_ops->cnic_handler(bp->cnic_data, NULL);
3642 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3647 /* end of slow path */
3651 /****************************************************************************
3653 ****************************************************************************/
3655 /* sum[hi:lo] += add[hi:lo] */
3656 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3659 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3662 /* difference = minuend - subtrahend */
3663 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3665 if (m_lo < s_lo) { \
3667 d_hi = m_hi - s_hi; \
3669 /* we can 'loan' 1 */ \
3671 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3673 /* m_hi <= s_hi */ \
3678 /* m_lo >= s_lo */ \
3679 if (m_hi < s_hi) { \
3683 /* m_hi >= s_hi */ \
3684 d_hi = m_hi - s_hi; \
3685 d_lo = m_lo - s_lo; \
3690 #define UPDATE_STAT64(s, t) \
3692 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3693 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3694 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3695 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3696 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3697 pstats->mac_stx[1].t##_lo, diff.lo); \
3700 #define UPDATE_STAT64_NIG(s, t) \
3702 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3703 diff.lo, new->s##_lo, old->s##_lo); \
3704 ADD_64(estats->t##_hi, diff.hi, \
3705 estats->t##_lo, diff.lo); \
3708 /* sum[hi:lo] += add */
3709 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3712 s_hi += (s_lo < a) ? 1 : 0; \
3715 #define UPDATE_EXTEND_STAT(s) \
3717 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3718 pstats->mac_stx[1].s##_lo, \
3722 #define UPDATE_EXTEND_TSTAT(s, t) \
3724 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3725 old_tclient->s = tclient->s; \
3726 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3729 #define UPDATE_EXTEND_USTAT(s, t) \
3731 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3732 old_uclient->s = uclient->s; \
3733 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3736 #define UPDATE_EXTEND_XSTAT(s, t) \
3738 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3739 old_xclient->s = xclient->s; \
3740 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3743 /* minuend -= subtrahend */
3744 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3746 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3749 /* minuend[hi:lo] -= subtrahend */
3750 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3752 SUB_64(m_hi, 0, m_lo, s); \
3755 #define SUB_EXTEND_USTAT(s, t) \
3757 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3758 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3762 * General service functions
3765 static inline long bnx2x_hilo(u32 *hiref)
3767 u32 lo = *(hiref + 1);
3768 #if (BITS_PER_LONG == 64)
3771 return HILO_U64(hi, lo);
3778 * Init service functions
3781 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3783 if (!bp->stats_pending) {
3784 struct eth_query_ramrod_data ramrod_data = {0};
3787 ramrod_data.drv_counter = bp->stats_counter++;
3788 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3789 for_each_queue(bp, i)
3790 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3792 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3793 ((u32 *)&ramrod_data)[1],
3794 ((u32 *)&ramrod_data)[0], 0);
3796 /* stats ramrod has it's own slot on the spq */
3798 bp->stats_pending = 1;
3803 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3805 struct dmae_command *dmae = &bp->stats_dmae;
3806 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3808 *stats_comp = DMAE_COMP_VAL;
3809 if (CHIP_REV_IS_SLOW(bp))
3813 if (bp->executer_idx) {
3814 int loader_idx = PMF_DMAE_C(bp);
3816 memset(dmae, 0, sizeof(struct dmae_command));
3818 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3819 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3820 DMAE_CMD_DST_RESET |
3822 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3824 DMAE_CMD_ENDIANITY_DW_SWAP |
3826 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3828 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3829 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3830 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3831 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3832 sizeof(struct dmae_command) *
3833 (loader_idx + 1)) >> 2;
3834 dmae->dst_addr_hi = 0;
3835 dmae->len = sizeof(struct dmae_command) >> 2;
3838 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3839 dmae->comp_addr_hi = 0;
3843 bnx2x_post_dmae(bp, dmae, loader_idx);
3845 } else if (bp->func_stx) {
3847 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3851 static int bnx2x_stats_comp(struct bnx2x *bp)
3853 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3857 while (*stats_comp != DMAE_COMP_VAL) {
3859 BNX2X_ERR("timeout waiting for stats finished\n");
3869 * Statistics service functions
3872 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3874 struct dmae_command *dmae;
3876 int loader_idx = PMF_DMAE_C(bp);
3877 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3880 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3881 BNX2X_ERR("BUG!\n");
3885 bp->executer_idx = 0;
3887 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3889 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3891 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3893 DMAE_CMD_ENDIANITY_DW_SWAP |
3895 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3896 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3898 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3899 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3900 dmae->src_addr_lo = bp->port.port_stx >> 2;
3901 dmae->src_addr_hi = 0;
3902 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3903 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3904 dmae->len = DMAE_LEN32_RD_MAX;
3905 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3906 dmae->comp_addr_hi = 0;
3909 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3910 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3911 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3912 dmae->src_addr_hi = 0;
3913 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3914 DMAE_LEN32_RD_MAX * 4);
3915 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3916 DMAE_LEN32_RD_MAX * 4);
3917 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3918 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3919 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3920 dmae->comp_val = DMAE_COMP_VAL;
3923 bnx2x_hw_stats_post(bp);
3924 bnx2x_stats_comp(bp);
3927 static void bnx2x_port_stats_init(struct bnx2x *bp)
3929 struct dmae_command *dmae;
3930 int port = BP_PORT(bp);
3931 int vn = BP_E1HVN(bp);
3933 int loader_idx = PMF_DMAE_C(bp);
3935 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3938 if (!bp->link_vars.link_up || !bp->port.pmf) {
3939 BNX2X_ERR("BUG!\n");
3943 bp->executer_idx = 0;
3946 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3947 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3948 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3950 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3952 DMAE_CMD_ENDIANITY_DW_SWAP |
3954 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3955 (vn << DMAE_CMD_E1HVN_SHIFT));
3957 if (bp->port.port_stx) {
3959 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3960 dmae->opcode = opcode;
3961 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3962 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3963 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3964 dmae->dst_addr_hi = 0;
3965 dmae->len = sizeof(struct host_port_stats) >> 2;
3966 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3967 dmae->comp_addr_hi = 0;
3973 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3974 dmae->opcode = opcode;
3975 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3976 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3977 dmae->dst_addr_lo = bp->func_stx >> 2;
3978 dmae->dst_addr_hi = 0;
3979 dmae->len = sizeof(struct host_func_stats) >> 2;
3980 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3981 dmae->comp_addr_hi = 0;
3986 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3987 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3988 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3990 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3992 DMAE_CMD_ENDIANITY_DW_SWAP |
3994 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3995 (vn << DMAE_CMD_E1HVN_SHIFT));
3997 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3999 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4000 NIG_REG_INGRESS_BMAC0_MEM);
4002 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4003 BIGMAC_REGISTER_TX_STAT_GTBYT */
4004 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4005 dmae->opcode = opcode;
4006 dmae->src_addr_lo = (mac_addr +
4007 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4008 dmae->src_addr_hi = 0;
4009 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4010 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4011 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4012 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4013 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4014 dmae->comp_addr_hi = 0;
4017 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4018 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4019 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4020 dmae->opcode = opcode;
4021 dmae->src_addr_lo = (mac_addr +
4022 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4023 dmae->src_addr_hi = 0;
4024 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4025 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4026 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4027 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4028 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4029 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4030 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4031 dmae->comp_addr_hi = 0;
4034 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4036 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4038 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4039 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4040 dmae->opcode = opcode;
4041 dmae->src_addr_lo = (mac_addr +
4042 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4043 dmae->src_addr_hi = 0;
4044 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4045 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4046 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4047 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4048 dmae->comp_addr_hi = 0;
4051 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4052 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4053 dmae->opcode = opcode;
4054 dmae->src_addr_lo = (mac_addr +
4055 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4056 dmae->src_addr_hi = 0;
4057 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4058 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4059 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4060 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4062 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4063 dmae->comp_addr_hi = 0;
4066 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4067 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4068 dmae->opcode = opcode;
4069 dmae->src_addr_lo = (mac_addr +
4070 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4071 dmae->src_addr_hi = 0;
4072 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4073 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4074 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4075 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4076 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4077 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4078 dmae->comp_addr_hi = 0;
4083 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4084 dmae->opcode = opcode;
4085 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4086 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4087 dmae->src_addr_hi = 0;
4088 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4089 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4090 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4091 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4092 dmae->comp_addr_hi = 0;
4095 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4096 dmae->opcode = opcode;
4097 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4098 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4099 dmae->src_addr_hi = 0;
4100 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4101 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4102 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4103 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4104 dmae->len = (2*sizeof(u32)) >> 2;
4105 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4106 dmae->comp_addr_hi = 0;
4109 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4110 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4111 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4112 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4114 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4116 DMAE_CMD_ENDIANITY_DW_SWAP |
4118 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4119 (vn << DMAE_CMD_E1HVN_SHIFT));
4120 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4121 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4122 dmae->src_addr_hi = 0;
4123 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4124 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4125 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4126 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4127 dmae->len = (2*sizeof(u32)) >> 2;
4128 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4129 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4130 dmae->comp_val = DMAE_COMP_VAL;
4135 static void bnx2x_func_stats_init(struct bnx2x *bp)
4137 struct dmae_command *dmae = &bp->stats_dmae;
4138 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4141 if (!bp->func_stx) {
4142 BNX2X_ERR("BUG!\n");
4146 bp->executer_idx = 0;
4147 memset(dmae, 0, sizeof(struct dmae_command));
4149 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4150 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4151 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4153 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4155 DMAE_CMD_ENDIANITY_DW_SWAP |
4157 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4158 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4159 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4160 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4161 dmae->dst_addr_lo = bp->func_stx >> 2;
4162 dmae->dst_addr_hi = 0;
4163 dmae->len = sizeof(struct host_func_stats) >> 2;
4164 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4165 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4166 dmae->comp_val = DMAE_COMP_VAL;
4171 static void bnx2x_stats_start(struct bnx2x *bp)
4174 bnx2x_port_stats_init(bp);
4176 else if (bp->func_stx)
4177 bnx2x_func_stats_init(bp);
4179 bnx2x_hw_stats_post(bp);
4180 bnx2x_storm_stats_post(bp);
4183 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4185 bnx2x_stats_comp(bp);
4186 bnx2x_stats_pmf_update(bp);
4187 bnx2x_stats_start(bp);
4190 static void bnx2x_stats_restart(struct bnx2x *bp)
4192 bnx2x_stats_comp(bp);
4193 bnx2x_stats_start(bp);
4196 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4198 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4199 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4200 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4206 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4207 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4208 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4209 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4210 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4211 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4212 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4213 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4214 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4215 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4216 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4217 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4218 UPDATE_STAT64(tx_stat_gt127,
4219 tx_stat_etherstatspkts65octetsto127octets);
4220 UPDATE_STAT64(tx_stat_gt255,
4221 tx_stat_etherstatspkts128octetsto255octets);
4222 UPDATE_STAT64(tx_stat_gt511,
4223 tx_stat_etherstatspkts256octetsto511octets);
4224 UPDATE_STAT64(tx_stat_gt1023,
4225 tx_stat_etherstatspkts512octetsto1023octets);
4226 UPDATE_STAT64(tx_stat_gt1518,
4227 tx_stat_etherstatspkts1024octetsto1522octets);
4228 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4229 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4230 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4231 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4232 UPDATE_STAT64(tx_stat_gterr,
4233 tx_stat_dot3statsinternalmactransmiterrors);
4234 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4236 estats->pause_frames_received_hi =
4237 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4238 estats->pause_frames_received_lo =
4239 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4241 estats->pause_frames_sent_hi =
4242 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4243 estats->pause_frames_sent_lo =
4244 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4247 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4249 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4250 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4251 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4253 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4254 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4255 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4256 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4257 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4258 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4259 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4260 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4261 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4262 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4263 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4264 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4265 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4266 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4267 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4268 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4269 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4270 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4271 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4272 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4273 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4274 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4275 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4276 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4277 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4278 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4279 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4280 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4281 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4282 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4283 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4285 estats->pause_frames_received_hi =
4286 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4287 estats->pause_frames_received_lo =
4288 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4289 ADD_64(estats->pause_frames_received_hi,
4290 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4291 estats->pause_frames_received_lo,
4292 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4294 estats->pause_frames_sent_hi =
4295 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4296 estats->pause_frames_sent_lo =
4297 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4298 ADD_64(estats->pause_frames_sent_hi,
4299 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4300 estats->pause_frames_sent_lo,
4301 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4304 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4306 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4307 struct nig_stats *old = &(bp->port.old_nig_stats);
4308 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4309 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4315 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4316 bnx2x_bmac_stats_update(bp);
4318 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4319 bnx2x_emac_stats_update(bp);
4321 else { /* unreached */
4322 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4326 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4327 new->brb_discard - old->brb_discard);
4328 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4329 new->brb_truncate - old->brb_truncate);
4331 UPDATE_STAT64_NIG(egress_mac_pkt0,
4332 etherstatspkts1024octetsto1522octets);
4333 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4335 memcpy(old, new, sizeof(struct nig_stats));
4337 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4338 sizeof(struct mac_stx));
4339 estats->brb_drop_hi = pstats->brb_drop_hi;
4340 estats->brb_drop_lo = pstats->brb_drop_lo;
4342 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4344 if (!BP_NOMCP(bp)) {
4346 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4347 if (nig_timer_max != estats->nig_timer_max) {
4348 estats->nig_timer_max = nig_timer_max;
4349 BNX2X_ERR("NIG timer max (%u)\n",
4350 estats->nig_timer_max);
4357 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4359 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4360 struct tstorm_per_port_stats *tport =
4361 &stats->tstorm_common.port_statistics;
4362 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4363 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4366 memcpy(&(fstats->total_bytes_received_hi),
4367 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4368 sizeof(struct host_func_stats) - 2*sizeof(u32));
4369 estats->error_bytes_received_hi = 0;
4370 estats->error_bytes_received_lo = 0;
4371 estats->etherstatsoverrsizepkts_hi = 0;
4372 estats->etherstatsoverrsizepkts_lo = 0;
4373 estats->no_buff_discard_hi = 0;
4374 estats->no_buff_discard_lo = 0;
4376 for_each_queue(bp, i) {
4377 struct bnx2x_fastpath *fp = &bp->fp[i];
4378 int cl_id = fp->cl_id;
4379 struct tstorm_per_client_stats *tclient =
4380 &stats->tstorm_common.client_statistics[cl_id];
4381 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4382 struct ustorm_per_client_stats *uclient =
4383 &stats->ustorm_common.client_statistics[cl_id];
4384 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4385 struct xstorm_per_client_stats *xclient =
4386 &stats->xstorm_common.client_statistics[cl_id];
4387 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4388 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4391 /* are storm stats valid? */
4392 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4393 bp->stats_counter) {
4394 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4395 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4396 i, xclient->stats_counter, bp->stats_counter);
4399 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4400 bp->stats_counter) {
4401 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4402 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4403 i, tclient->stats_counter, bp->stats_counter);
4406 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4407 bp->stats_counter) {
4408 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4409 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4410 i, uclient->stats_counter, bp->stats_counter);
4414 qstats->total_bytes_received_hi =
4415 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4416 qstats->total_bytes_received_lo =
4417 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4419 ADD_64(qstats->total_bytes_received_hi,
4420 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4421 qstats->total_bytes_received_lo,
4422 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4424 ADD_64(qstats->total_bytes_received_hi,
4425 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4426 qstats->total_bytes_received_lo,
4427 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4429 SUB_64(qstats->total_bytes_received_hi,
4430 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4431 qstats->total_bytes_received_lo,
4432 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4434 SUB_64(qstats->total_bytes_received_hi,
4435 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4436 qstats->total_bytes_received_lo,
4437 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4439 SUB_64(qstats->total_bytes_received_hi,
4440 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4441 qstats->total_bytes_received_lo,
4442 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4444 qstats->valid_bytes_received_hi =
4445 qstats->total_bytes_received_hi;
4446 qstats->valid_bytes_received_lo =
4447 qstats->total_bytes_received_lo;
4449 qstats->error_bytes_received_hi =
4450 le32_to_cpu(tclient->rcv_error_bytes.hi);
4451 qstats->error_bytes_received_lo =
4452 le32_to_cpu(tclient->rcv_error_bytes.lo);
4454 ADD_64(qstats->total_bytes_received_hi,
4455 qstats->error_bytes_received_hi,
4456 qstats->total_bytes_received_lo,
4457 qstats->error_bytes_received_lo);
4459 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4460 total_unicast_packets_received);
4461 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4462 total_multicast_packets_received);
4463 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4464 total_broadcast_packets_received);
4465 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4466 etherstatsoverrsizepkts);
4467 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4469 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4470 total_unicast_packets_received);
4471 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4472 total_multicast_packets_received);
4473 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4474 total_broadcast_packets_received);
4475 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4476 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4477 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4479 qstats->total_bytes_transmitted_hi =
4480 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4481 qstats->total_bytes_transmitted_lo =
4482 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4484 ADD_64(qstats->total_bytes_transmitted_hi,
4485 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4486 qstats->total_bytes_transmitted_lo,
4487 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4489 ADD_64(qstats->total_bytes_transmitted_hi,
4490 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4491 qstats->total_bytes_transmitted_lo,
4492 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4494 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4495 total_unicast_packets_transmitted);
4496 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4497 total_multicast_packets_transmitted);
4498 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4499 total_broadcast_packets_transmitted);
4501 old_tclient->checksum_discard = tclient->checksum_discard;
4502 old_tclient->ttl0_discard = tclient->ttl0_discard;
4504 ADD_64(fstats->total_bytes_received_hi,
4505 qstats->total_bytes_received_hi,
4506 fstats->total_bytes_received_lo,
4507 qstats->total_bytes_received_lo);
4508 ADD_64(fstats->total_bytes_transmitted_hi,
4509 qstats->total_bytes_transmitted_hi,
4510 fstats->total_bytes_transmitted_lo,
4511 qstats->total_bytes_transmitted_lo);
4512 ADD_64(fstats->total_unicast_packets_received_hi,
4513 qstats->total_unicast_packets_received_hi,
4514 fstats->total_unicast_packets_received_lo,
4515 qstats->total_unicast_packets_received_lo);
4516 ADD_64(fstats->total_multicast_packets_received_hi,
4517 qstats->total_multicast_packets_received_hi,
4518 fstats->total_multicast_packets_received_lo,
4519 qstats->total_multicast_packets_received_lo);
4520 ADD_64(fstats->total_broadcast_packets_received_hi,
4521 qstats->total_broadcast_packets_received_hi,
4522 fstats->total_broadcast_packets_received_lo,
4523 qstats->total_broadcast_packets_received_lo);
4524 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4525 qstats->total_unicast_packets_transmitted_hi,
4526 fstats->total_unicast_packets_transmitted_lo,
4527 qstats->total_unicast_packets_transmitted_lo);
4528 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4529 qstats->total_multicast_packets_transmitted_hi,
4530 fstats->total_multicast_packets_transmitted_lo,
4531 qstats->total_multicast_packets_transmitted_lo);
4532 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4533 qstats->total_broadcast_packets_transmitted_hi,
4534 fstats->total_broadcast_packets_transmitted_lo,
4535 qstats->total_broadcast_packets_transmitted_lo);
4536 ADD_64(fstats->valid_bytes_received_hi,
4537 qstats->valid_bytes_received_hi,
4538 fstats->valid_bytes_received_lo,
4539 qstats->valid_bytes_received_lo);
4541 ADD_64(estats->error_bytes_received_hi,
4542 qstats->error_bytes_received_hi,
4543 estats->error_bytes_received_lo,
4544 qstats->error_bytes_received_lo);
4545 ADD_64(estats->etherstatsoverrsizepkts_hi,
4546 qstats->etherstatsoverrsizepkts_hi,
4547 estats->etherstatsoverrsizepkts_lo,
4548 qstats->etherstatsoverrsizepkts_lo);
4549 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4550 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4553 ADD_64(fstats->total_bytes_received_hi,
4554 estats->rx_stat_ifhcinbadoctets_hi,
4555 fstats->total_bytes_received_lo,
4556 estats->rx_stat_ifhcinbadoctets_lo);
4558 memcpy(estats, &(fstats->total_bytes_received_hi),
4559 sizeof(struct host_func_stats) - 2*sizeof(u32));
4561 ADD_64(estats->etherstatsoverrsizepkts_hi,
4562 estats->rx_stat_dot3statsframestoolong_hi,
4563 estats->etherstatsoverrsizepkts_lo,
4564 estats->rx_stat_dot3statsframestoolong_lo);
4565 ADD_64(estats->error_bytes_received_hi,
4566 estats->rx_stat_ifhcinbadoctets_hi,
4567 estats->error_bytes_received_lo,
4568 estats->rx_stat_ifhcinbadoctets_lo);
4571 estats->mac_filter_discard =
4572 le32_to_cpu(tport->mac_filter_discard);
4573 estats->xxoverflow_discard =
4574 le32_to_cpu(tport->xxoverflow_discard);
4575 estats->brb_truncate_discard =
4576 le32_to_cpu(tport->brb_truncate_discard);
4577 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4580 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4582 bp->stats_pending = 0;
4587 static void bnx2x_net_stats_update(struct bnx2x *bp)
4589 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4590 struct net_device_stats *nstats = &bp->dev->stats;
4593 nstats->rx_packets =
4594 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4595 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4596 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4598 nstats->tx_packets =
4599 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4600 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4601 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4603 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4605 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4607 nstats->rx_dropped = estats->mac_discard;
4608 for_each_queue(bp, i)
4609 nstats->rx_dropped +=
4610 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4612 nstats->tx_dropped = 0;
4615 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4617 nstats->collisions =
4618 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4620 nstats->rx_length_errors =
4621 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4622 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4623 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4624 bnx2x_hilo(&estats->brb_truncate_hi);
4625 nstats->rx_crc_errors =
4626 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4627 nstats->rx_frame_errors =
4628 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4629 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4630 nstats->rx_missed_errors = estats->xxoverflow_discard;
4632 nstats->rx_errors = nstats->rx_length_errors +
4633 nstats->rx_over_errors +
4634 nstats->rx_crc_errors +
4635 nstats->rx_frame_errors +
4636 nstats->rx_fifo_errors +
4637 nstats->rx_missed_errors;
4639 nstats->tx_aborted_errors =
4640 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4641 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4642 nstats->tx_carrier_errors =
4643 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4644 nstats->tx_fifo_errors = 0;
4645 nstats->tx_heartbeat_errors = 0;
4646 nstats->tx_window_errors = 0;
4648 nstats->tx_errors = nstats->tx_aborted_errors +
4649 nstats->tx_carrier_errors +
4650 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4653 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4655 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4658 estats->driver_xoff = 0;
4659 estats->rx_err_discard_pkt = 0;
4660 estats->rx_skb_alloc_failed = 0;
4661 estats->hw_csum_err = 0;
4662 for_each_queue(bp, i) {
4663 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4665 estats->driver_xoff += qstats->driver_xoff;
4666 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4667 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4668 estats->hw_csum_err += qstats->hw_csum_err;
4672 static void bnx2x_stats_update(struct bnx2x *bp)
4674 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4676 if (*stats_comp != DMAE_COMP_VAL)
4680 bnx2x_hw_stats_update(bp);
4682 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4683 BNX2X_ERR("storm stats were not updated for 3 times\n");
4688 bnx2x_net_stats_update(bp);
4689 bnx2x_drv_stats_update(bp);
4691 if (netif_msg_timer(bp)) {
4692 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4695 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4697 estats->brb_drop_lo, estats->brb_truncate_lo);
4699 for_each_queue(bp, i) {
4700 struct bnx2x_fastpath *fp = &bp->fp[i];
4701 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4703 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4704 " rx pkt(%lu) rx calls(%lu %lu)\n",
4705 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4707 le16_to_cpu(*fp->rx_cons_sb),
4708 bnx2x_hilo(&qstats->
4709 total_unicast_packets_received_hi),
4710 fp->rx_calls, fp->rx_pkt);
4713 for_each_queue(bp, i) {
4714 struct bnx2x_fastpath *fp = &bp->fp[i];
4715 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4716 struct netdev_queue *txq =
4717 netdev_get_tx_queue(bp->dev, i);
4719 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4720 " tx pkt(%lu) tx calls (%lu)"
4721 " %s (Xoff events %u)\n",
4722 fp->name, bnx2x_tx_avail(fp),
4723 le16_to_cpu(*fp->tx_cons_sb),
4724 bnx2x_hilo(&qstats->
4725 total_unicast_packets_transmitted_hi),
4727 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4728 qstats->driver_xoff);
4732 bnx2x_hw_stats_post(bp);
4733 bnx2x_storm_stats_post(bp);
4736 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4738 struct dmae_command *dmae;
4740 int loader_idx = PMF_DMAE_C(bp);
4741 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4743 bp->executer_idx = 0;
4745 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4747 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4749 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4751 DMAE_CMD_ENDIANITY_DW_SWAP |
4753 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4754 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4756 if (bp->port.port_stx) {
4758 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4760 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4762 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4763 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4764 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4765 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4766 dmae->dst_addr_hi = 0;
4767 dmae->len = sizeof(struct host_port_stats) >> 2;
4769 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4770 dmae->comp_addr_hi = 0;
4773 dmae->comp_addr_lo =
4774 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4775 dmae->comp_addr_hi =
4776 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4777 dmae->comp_val = DMAE_COMP_VAL;
4785 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4786 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4787 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4788 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4789 dmae->dst_addr_lo = bp->func_stx >> 2;
4790 dmae->dst_addr_hi = 0;
4791 dmae->len = sizeof(struct host_func_stats) >> 2;
4792 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4793 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4794 dmae->comp_val = DMAE_COMP_VAL;
4800 static void bnx2x_stats_stop(struct bnx2x *bp)
4804 bnx2x_stats_comp(bp);
4807 update = (bnx2x_hw_stats_update(bp) == 0);
4809 update |= (bnx2x_storm_stats_update(bp) == 0);
4812 bnx2x_net_stats_update(bp);
4815 bnx2x_port_stats_stop(bp);
4817 bnx2x_hw_stats_post(bp);
4818 bnx2x_stats_comp(bp);
4822 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4826 static const struct {
4827 void (*action)(struct bnx2x *bp);
4828 enum bnx2x_stats_state next_state;
4829 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4832 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4833 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4834 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4835 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4838 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4839 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4840 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4841 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4845 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4847 enum bnx2x_stats_state state = bp->stats_state;
4849 if (unlikely(bp->panic))
4852 bnx2x_stats_stm[state][event].action(bp);
4853 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4855 /* Make sure the state has been "changed" */
4858 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4859 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4860 state, event, bp->stats_state);
4863 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4865 struct dmae_command *dmae;
4866 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4869 if (!bp->port.pmf || !bp->port.port_stx) {
4870 BNX2X_ERR("BUG!\n");
4874 bp->executer_idx = 0;
4876 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4877 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4878 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4879 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4881 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4883 DMAE_CMD_ENDIANITY_DW_SWAP |
4885 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4886 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4887 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4888 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4889 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4890 dmae->dst_addr_hi = 0;
4891 dmae->len = sizeof(struct host_port_stats) >> 2;
4892 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4893 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4894 dmae->comp_val = DMAE_COMP_VAL;
4897 bnx2x_hw_stats_post(bp);
4898 bnx2x_stats_comp(bp);
4901 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4903 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4904 int port = BP_PORT(bp);
4909 if (!bp->port.pmf || !bp->func_stx) {
4910 BNX2X_ERR("BUG!\n");
4914 /* save our func_stx */
4915 func_stx = bp->func_stx;
4917 for (vn = VN_0; vn < vn_max; vn++) {
4920 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4921 bnx2x_func_stats_init(bp);
4922 bnx2x_hw_stats_post(bp);
4923 bnx2x_stats_comp(bp);
4926 /* restore our func_stx */
4927 bp->func_stx = func_stx;
4930 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4932 struct dmae_command *dmae = &bp->stats_dmae;
4933 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4936 if (!bp->func_stx) {
4937 BNX2X_ERR("BUG!\n");
4941 bp->executer_idx = 0;
4942 memset(dmae, 0, sizeof(struct dmae_command));
4944 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4945 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4946 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4948 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4950 DMAE_CMD_ENDIANITY_DW_SWAP |
4952 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4953 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4954 dmae->src_addr_lo = bp->func_stx >> 2;
4955 dmae->src_addr_hi = 0;
4956 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4957 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4958 dmae->len = sizeof(struct host_func_stats) >> 2;
4959 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4960 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4961 dmae->comp_val = DMAE_COMP_VAL;
4964 bnx2x_hw_stats_post(bp);
4965 bnx2x_stats_comp(bp);
4968 static void bnx2x_stats_init(struct bnx2x *bp)
4970 int port = BP_PORT(bp);
4971 int func = BP_FUNC(bp);
4974 bp->stats_pending = 0;
4975 bp->executer_idx = 0;
4976 bp->stats_counter = 0;
4978 /* port and func stats for management */
4979 if (!BP_NOMCP(bp)) {
4980 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4981 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4984 bp->port.port_stx = 0;
4987 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4988 bp->port.port_stx, bp->func_stx);
4991 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4992 bp->port.old_nig_stats.brb_discard =
4993 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4994 bp->port.old_nig_stats.brb_truncate =
4995 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4996 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4997 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4998 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4999 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5001 /* function stats */
5002 for_each_queue(bp, i) {
5003 struct bnx2x_fastpath *fp = &bp->fp[i];
5005 memset(&fp->old_tclient, 0,
5006 sizeof(struct tstorm_per_client_stats));
5007 memset(&fp->old_uclient, 0,
5008 sizeof(struct ustorm_per_client_stats));
5009 memset(&fp->old_xclient, 0,
5010 sizeof(struct xstorm_per_client_stats));
5011 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5014 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5015 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5017 bp->stats_state = STATS_STATE_DISABLED;
5020 if (bp->port.port_stx)
5021 bnx2x_port_stats_base_init(bp);
5024 bnx2x_func_stats_base_init(bp);
5026 } else if (bp->func_stx)
5027 bnx2x_func_stats_base_update(bp);
5030 static void bnx2x_timer(unsigned long data)
5032 struct bnx2x *bp = (struct bnx2x *) data;
5034 if (!netif_running(bp->dev))
5037 if (atomic_read(&bp->intr_sem) != 0)
5041 struct bnx2x_fastpath *fp = &bp->fp[0];
5045 rc = bnx2x_rx_int(fp, 1000);
5048 if (!BP_NOMCP(bp)) {
5049 int func = BP_FUNC(bp);
5053 ++bp->fw_drv_pulse_wr_seq;
5054 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5055 /* TBD - add SYSTEM_TIME */
5056 drv_pulse = bp->fw_drv_pulse_wr_seq;
5057 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5059 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5060 MCP_PULSE_SEQ_MASK);
5061 /* The delta between driver pulse and mcp response
5062 * should be 1 (before mcp response) or 0 (after mcp response)
5064 if ((drv_pulse != mcp_pulse) &&
5065 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5066 /* someone lost a heartbeat... */
5067 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5068 drv_pulse, mcp_pulse);
5072 if (bp->state == BNX2X_STATE_OPEN)
5073 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5076 mod_timer(&bp->timer, jiffies + bp->current_interval);
5079 /* end of Statistics */
5084 * nic init service functions
5087 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5089 int port = BP_PORT(bp);
5092 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5093 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5094 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5095 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5096 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5097 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5100 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5101 dma_addr_t mapping, int sb_id)
5103 int port = BP_PORT(bp);
5104 int func = BP_FUNC(bp);
5109 section = ((u64)mapping) + offsetof(struct host_status_block,
5111 sb->u_status_block.status_block_id = sb_id;
5113 REG_WR(bp, BAR_CSTRORM_INTMEM +
5114 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5115 REG_WR(bp, BAR_CSTRORM_INTMEM +
5116 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5118 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5119 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5121 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5122 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5123 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5126 section = ((u64)mapping) + offsetof(struct host_status_block,
5128 sb->c_status_block.status_block_id = sb_id;
5130 REG_WR(bp, BAR_CSTRORM_INTMEM +
5131 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5132 REG_WR(bp, BAR_CSTRORM_INTMEM +
5133 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5135 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5136 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5138 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5139 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5140 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5142 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5145 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5147 int func = BP_FUNC(bp);
5149 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5150 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5151 sizeof(struct tstorm_def_status_block)/4);
5152 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5153 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5154 sizeof(struct cstorm_def_status_block_u)/4);
5155 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5156 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5157 sizeof(struct cstorm_def_status_block_c)/4);
5158 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5159 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5160 sizeof(struct xstorm_def_status_block)/4);
5163 static void bnx2x_init_def_sb(struct bnx2x *bp,
5164 struct host_def_status_block *def_sb,
5165 dma_addr_t mapping, int sb_id)
5167 int port = BP_PORT(bp);
5168 int func = BP_FUNC(bp);
5169 int index, val, reg_offset;
5173 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5174 atten_status_block);
5175 def_sb->atten_status_block.status_block_id = sb_id;
5179 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5180 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5182 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5183 bp->attn_group[index].sig[0] = REG_RD(bp,
5184 reg_offset + 0x10*index);
5185 bp->attn_group[index].sig[1] = REG_RD(bp,
5186 reg_offset + 0x4 + 0x10*index);
5187 bp->attn_group[index].sig[2] = REG_RD(bp,
5188 reg_offset + 0x8 + 0x10*index);
5189 bp->attn_group[index].sig[3] = REG_RD(bp,
5190 reg_offset + 0xc + 0x10*index);
5193 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5194 HC_REG_ATTN_MSG0_ADDR_L);
5196 REG_WR(bp, reg_offset, U64_LO(section));
5197 REG_WR(bp, reg_offset + 4, U64_HI(section));
5199 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5201 val = REG_RD(bp, reg_offset);
5203 REG_WR(bp, reg_offset, val);
5206 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5207 u_def_status_block);
5208 def_sb->u_def_status_block.status_block_id = sb_id;
5210 REG_WR(bp, BAR_CSTRORM_INTMEM +
5211 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5212 REG_WR(bp, BAR_CSTRORM_INTMEM +
5213 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5215 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5216 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5218 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5219 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5220 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5223 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5224 c_def_status_block);
5225 def_sb->c_def_status_block.status_block_id = sb_id;
5227 REG_WR(bp, BAR_CSTRORM_INTMEM +
5228 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5229 REG_WR(bp, BAR_CSTRORM_INTMEM +
5230 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5232 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5233 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5235 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5236 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5237 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5240 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5241 t_def_status_block);
5242 def_sb->t_def_status_block.status_block_id = sb_id;
5244 REG_WR(bp, BAR_TSTRORM_INTMEM +
5245 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5246 REG_WR(bp, BAR_TSTRORM_INTMEM +
5247 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5249 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5250 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5252 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5253 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5254 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5257 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5258 x_def_status_block);
5259 def_sb->x_def_status_block.status_block_id = sb_id;
5261 REG_WR(bp, BAR_XSTRORM_INTMEM +
5262 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5263 REG_WR(bp, BAR_XSTRORM_INTMEM +
5264 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5266 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5267 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5269 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5270 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5271 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5273 bp->stats_pending = 0;
5274 bp->set_mac_pending = 0;
5276 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5279 static void bnx2x_update_coalesce(struct bnx2x *bp)
5281 int port = BP_PORT(bp);
5284 for_each_queue(bp, i) {
5285 int sb_id = bp->fp[i].sb_id;
5287 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5288 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5289 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5290 U_SB_ETH_RX_CQ_INDEX),
5291 bp->rx_ticks/(4 * BNX2X_BTR));
5292 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5293 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5294 U_SB_ETH_RX_CQ_INDEX),
5295 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5297 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5298 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5299 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5300 C_SB_ETH_TX_CQ_INDEX),
5301 bp->tx_ticks/(4 * BNX2X_BTR));
5302 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5303 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5304 C_SB_ETH_TX_CQ_INDEX),
5305 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5309 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5310 struct bnx2x_fastpath *fp, int last)
5314 for (i = 0; i < last; i++) {
5315 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5316 struct sk_buff *skb = rx_buf->skb;
5319 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5323 if (fp->tpa_state[i] == BNX2X_TPA_START)
5324 dma_unmap_single(&bp->pdev->dev,
5325 dma_unmap_addr(rx_buf, mapping),
5326 bp->rx_buf_size, DMA_FROM_DEVICE);
5333 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5335 int func = BP_FUNC(bp);
5336 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5337 ETH_MAX_AGGREGATION_QUEUES_E1H;
5338 u16 ring_prod, cqe_ring_prod;
5341 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5343 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5345 if (bp->flags & TPA_ENABLE_FLAG) {
5347 for_each_queue(bp, j) {
5348 struct bnx2x_fastpath *fp = &bp->fp[j];
5350 for (i = 0; i < max_agg_queues; i++) {
5351 fp->tpa_pool[i].skb =
5352 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5353 if (!fp->tpa_pool[i].skb) {
5354 BNX2X_ERR("Failed to allocate TPA "
5355 "skb pool for queue[%d] - "
5356 "disabling TPA on this "
5358 bnx2x_free_tpa_pool(bp, fp, i);
5359 fp->disable_tpa = 1;
5362 dma_unmap_addr_set((struct sw_rx_bd *)
5363 &bp->fp->tpa_pool[i],
5365 fp->tpa_state[i] = BNX2X_TPA_STOP;
5370 for_each_queue(bp, j) {
5371 struct bnx2x_fastpath *fp = &bp->fp[j];
5374 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5375 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5377 /* "next page" elements initialization */
5379 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5380 struct eth_rx_sge *sge;
5382 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5384 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5385 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5387 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5388 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5391 bnx2x_init_sge_ring_bit_mask(fp);
5394 for (i = 1; i <= NUM_RX_RINGS; i++) {
5395 struct eth_rx_bd *rx_bd;
5397 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5399 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5400 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5402 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5403 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5407 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5408 struct eth_rx_cqe_next_page *nextpg;
5410 nextpg = (struct eth_rx_cqe_next_page *)
5411 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5413 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5414 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5416 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5417 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5420 /* Allocate SGEs and initialize the ring elements */
5421 for (i = 0, ring_prod = 0;
5422 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5424 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5425 BNX2X_ERR("was only able to allocate "
5427 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5428 /* Cleanup already allocated elements */
5429 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5430 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5431 fp->disable_tpa = 1;
5435 ring_prod = NEXT_SGE_IDX(ring_prod);
5437 fp->rx_sge_prod = ring_prod;
5439 /* Allocate BDs and initialize BD ring */
5440 fp->rx_comp_cons = 0;
5441 cqe_ring_prod = ring_prod = 0;
5442 for (i = 0; i < bp->rx_ring_size; i++) {
5443 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5444 BNX2X_ERR("was only able to allocate "
5445 "%d rx skbs on queue[%d]\n", i, j);
5446 fp->eth_q_stats.rx_skb_alloc_failed++;
5449 ring_prod = NEXT_RX_IDX(ring_prod);
5450 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5451 WARN_ON(ring_prod <= i);
5454 fp->rx_bd_prod = ring_prod;
5455 /* must not have more available CQEs than BDs */
5456 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5458 fp->rx_pkt = fp->rx_calls = 0;
5461 * this will generate an interrupt (to the TSTORM)
5462 * must only be done after chip is initialized
5464 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5469 REG_WR(bp, BAR_USTRORM_INTMEM +
5470 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5471 U64_LO(fp->rx_comp_mapping));
5472 REG_WR(bp, BAR_USTRORM_INTMEM +
5473 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5474 U64_HI(fp->rx_comp_mapping));
5478 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5482 for_each_queue(bp, j) {
5483 struct bnx2x_fastpath *fp = &bp->fp[j];
5485 for (i = 1; i <= NUM_TX_RINGS; i++) {
5486 struct eth_tx_next_bd *tx_next_bd =
5487 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5489 tx_next_bd->addr_hi =
5490 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5491 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5492 tx_next_bd->addr_lo =
5493 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5494 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5497 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5498 fp->tx_db.data.zero_fill1 = 0;
5499 fp->tx_db.data.prod = 0;
5501 fp->tx_pkt_prod = 0;
5502 fp->tx_pkt_cons = 0;
5505 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5510 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5512 int func = BP_FUNC(bp);
5514 spin_lock_init(&bp->spq_lock);
5516 bp->spq_left = MAX_SPQ_PENDING;
5517 bp->spq_prod_idx = 0;
5518 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5519 bp->spq_prod_bd = bp->spq;
5520 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5522 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5523 U64_LO(bp->spq_mapping));
5525 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5526 U64_HI(bp->spq_mapping));
5528 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5532 static void bnx2x_init_context(struct bnx2x *bp)
5537 for_each_queue(bp, i) {
5538 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5539 struct bnx2x_fastpath *fp = &bp->fp[i];
5540 u8 cl_id = fp->cl_id;
5542 context->ustorm_st_context.common.sb_index_numbers =
5543 BNX2X_RX_SB_INDEX_NUM;
5544 context->ustorm_st_context.common.clientId = cl_id;
5545 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5546 context->ustorm_st_context.common.flags =
5547 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5548 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5549 context->ustorm_st_context.common.statistics_counter_id =
5551 context->ustorm_st_context.common.mc_alignment_log_size =
5552 BNX2X_RX_ALIGN_SHIFT;
5553 context->ustorm_st_context.common.bd_buff_size =
5555 context->ustorm_st_context.common.bd_page_base_hi =
5556 U64_HI(fp->rx_desc_mapping);
5557 context->ustorm_st_context.common.bd_page_base_lo =
5558 U64_LO(fp->rx_desc_mapping);
5559 if (!fp->disable_tpa) {
5560 context->ustorm_st_context.common.flags |=
5561 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5562 context->ustorm_st_context.common.sge_buff_size =
5563 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5565 context->ustorm_st_context.common.sge_page_base_hi =
5566 U64_HI(fp->rx_sge_mapping);
5567 context->ustorm_st_context.common.sge_page_base_lo =
5568 U64_LO(fp->rx_sge_mapping);
5570 context->ustorm_st_context.common.max_sges_for_packet =
5571 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5572 context->ustorm_st_context.common.max_sges_for_packet =
5573 ((context->ustorm_st_context.common.
5574 max_sges_for_packet + PAGES_PER_SGE - 1) &
5575 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5578 context->ustorm_ag_context.cdu_usage =
5579 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5580 CDU_REGION_NUMBER_UCM_AG,
5581 ETH_CONNECTION_TYPE);
5583 context->xstorm_ag_context.cdu_reserved =
5584 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5585 CDU_REGION_NUMBER_XCM_AG,
5586 ETH_CONNECTION_TYPE);
5590 for_each_queue(bp, i) {
5591 struct bnx2x_fastpath *fp = &bp->fp[i];
5592 struct eth_context *context =
5593 bnx2x_sp(bp, context[i].eth);
5595 context->cstorm_st_context.sb_index_number =
5596 C_SB_ETH_TX_CQ_INDEX;
5597 context->cstorm_st_context.status_block_id = fp->sb_id;
5599 context->xstorm_st_context.tx_bd_page_base_hi =
5600 U64_HI(fp->tx_desc_mapping);
5601 context->xstorm_st_context.tx_bd_page_base_lo =
5602 U64_LO(fp->tx_desc_mapping);
5603 context->xstorm_st_context.statistics_data = (fp->cl_id |
5604 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5608 static void bnx2x_init_ind_table(struct bnx2x *bp)
5610 int func = BP_FUNC(bp);
5613 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5617 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5618 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5619 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5620 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5621 bp->fp->cl_id + (i % bp->num_queues));
5624 static void bnx2x_set_client_config(struct bnx2x *bp)
5626 struct tstorm_eth_client_config tstorm_client = {0};
5627 int port = BP_PORT(bp);
5630 tstorm_client.mtu = bp->dev->mtu;
5631 tstorm_client.config_flags =
5632 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5633 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5635 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5636 tstorm_client.config_flags |=
5637 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5638 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5642 for_each_queue(bp, i) {
5643 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5645 REG_WR(bp, BAR_TSTRORM_INTMEM +
5646 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5647 ((u32 *)&tstorm_client)[0]);
5648 REG_WR(bp, BAR_TSTRORM_INTMEM +
5649 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5650 ((u32 *)&tstorm_client)[1]);
5653 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5654 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5657 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5659 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5660 int mode = bp->rx_mode;
5661 int mask = bp->rx_mode_cl_mask;
5662 int func = BP_FUNC(bp);
5663 int port = BP_PORT(bp);
5665 /* All but management unicast packets should pass to the host as well */
5667 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5668 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5669 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5670 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5672 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5675 case BNX2X_RX_MODE_NONE: /* no Rx */
5676 tstorm_mac_filter.ucast_drop_all = mask;
5677 tstorm_mac_filter.mcast_drop_all = mask;
5678 tstorm_mac_filter.bcast_drop_all = mask;
5681 case BNX2X_RX_MODE_NORMAL:
5682 tstorm_mac_filter.bcast_accept_all = mask;
5685 case BNX2X_RX_MODE_ALLMULTI:
5686 tstorm_mac_filter.mcast_accept_all = mask;
5687 tstorm_mac_filter.bcast_accept_all = mask;
5690 case BNX2X_RX_MODE_PROMISC:
5691 tstorm_mac_filter.ucast_accept_all = mask;
5692 tstorm_mac_filter.mcast_accept_all = mask;
5693 tstorm_mac_filter.bcast_accept_all = mask;
5694 /* pass management unicast packets as well */
5695 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5699 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5704 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5707 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5708 REG_WR(bp, BAR_TSTRORM_INTMEM +
5709 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5710 ((u32 *)&tstorm_mac_filter)[i]);
5712 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5713 ((u32 *)&tstorm_mac_filter)[i]); */
5716 if (mode != BNX2X_RX_MODE_NONE)
5717 bnx2x_set_client_config(bp);
5720 static void bnx2x_init_internal_common(struct bnx2x *bp)
5724 /* Zero this manually as its initialization is
5725 currently missing in the initTool */
5726 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5727 REG_WR(bp, BAR_USTRORM_INTMEM +
5728 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5731 static void bnx2x_init_internal_port(struct bnx2x *bp)
5733 int port = BP_PORT(bp);
5736 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5738 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5739 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5740 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5743 static void bnx2x_init_internal_func(struct bnx2x *bp)
5745 struct tstorm_eth_function_common_config tstorm_config = {0};
5746 struct stats_indication_flags stats_flags = {0};
5747 int port = BP_PORT(bp);
5748 int func = BP_FUNC(bp);
5754 tstorm_config.config_flags = MULTI_FLAGS(bp);
5755 tstorm_config.rss_result_mask = MULTI_MASK;
5758 /* Enable TPA if needed */
5759 if (bp->flags & TPA_ENABLE_FLAG)
5760 tstorm_config.config_flags |=
5761 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5764 tstorm_config.config_flags |=
5765 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5767 tstorm_config.leading_client_id = BP_L_ID(bp);
5769 REG_WR(bp, BAR_TSTRORM_INTMEM +
5770 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5771 (*(u32 *)&tstorm_config));
5773 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5774 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5775 bnx2x_set_storm_rx_mode(bp);
5777 for_each_queue(bp, i) {
5778 u8 cl_id = bp->fp[i].cl_id;
5780 /* reset xstorm per client statistics */
5781 offset = BAR_XSTRORM_INTMEM +
5782 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5784 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5785 REG_WR(bp, offset + j*4, 0);
5787 /* reset tstorm per client statistics */
5788 offset = BAR_TSTRORM_INTMEM +
5789 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5791 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5792 REG_WR(bp, offset + j*4, 0);
5794 /* reset ustorm per client statistics */
5795 offset = BAR_USTRORM_INTMEM +
5796 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5798 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5799 REG_WR(bp, offset + j*4, 0);
5802 /* Init statistics related context */
5803 stats_flags.collect_eth = 1;
5805 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5806 ((u32 *)&stats_flags)[0]);
5807 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5808 ((u32 *)&stats_flags)[1]);
5810 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5811 ((u32 *)&stats_flags)[0]);
5812 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5813 ((u32 *)&stats_flags)[1]);
5815 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5816 ((u32 *)&stats_flags)[0]);
5817 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5818 ((u32 *)&stats_flags)[1]);
5820 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5821 ((u32 *)&stats_flags)[0]);
5822 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5823 ((u32 *)&stats_flags)[1]);
5825 REG_WR(bp, BAR_XSTRORM_INTMEM +
5826 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5827 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5828 REG_WR(bp, BAR_XSTRORM_INTMEM +
5829 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5830 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5832 REG_WR(bp, BAR_TSTRORM_INTMEM +
5833 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5834 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5835 REG_WR(bp, BAR_TSTRORM_INTMEM +
5836 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5837 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5839 REG_WR(bp, BAR_USTRORM_INTMEM +
5840 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5841 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5842 REG_WR(bp, BAR_USTRORM_INTMEM +
5843 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5844 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5846 if (CHIP_IS_E1H(bp)) {
5847 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5849 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5851 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5853 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5856 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5860 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5861 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5862 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5863 for_each_queue(bp, i) {
5864 struct bnx2x_fastpath *fp = &bp->fp[i];
5866 REG_WR(bp, BAR_USTRORM_INTMEM +
5867 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5868 U64_LO(fp->rx_comp_mapping));
5869 REG_WR(bp, BAR_USTRORM_INTMEM +
5870 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5871 U64_HI(fp->rx_comp_mapping));
5874 REG_WR(bp, BAR_USTRORM_INTMEM +
5875 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5876 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5877 REG_WR(bp, BAR_USTRORM_INTMEM +
5878 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5879 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5881 REG_WR16(bp, BAR_USTRORM_INTMEM +
5882 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5886 /* dropless flow control */
5887 if (CHIP_IS_E1H(bp)) {
5888 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5890 rx_pause.bd_thr_low = 250;
5891 rx_pause.cqe_thr_low = 250;
5893 rx_pause.sge_thr_low = 0;
5894 rx_pause.bd_thr_high = 350;
5895 rx_pause.cqe_thr_high = 350;
5896 rx_pause.sge_thr_high = 0;
5898 for_each_queue(bp, i) {
5899 struct bnx2x_fastpath *fp = &bp->fp[i];
5901 if (!fp->disable_tpa) {
5902 rx_pause.sge_thr_low = 150;
5903 rx_pause.sge_thr_high = 250;
5907 offset = BAR_USTRORM_INTMEM +
5908 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5911 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5913 REG_WR(bp, offset + j*4,
5914 ((u32 *)&rx_pause)[j]);
5918 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5920 /* Init rate shaping and fairness contexts */
5924 /* During init there is no active link
5925 Until link is up, set link rate to 10Gbps */
5926 bp->link_vars.line_speed = SPEED_10000;
5927 bnx2x_init_port_minmax(bp);
5931 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5932 bnx2x_calc_vn_weight_sum(bp);
5934 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5935 bnx2x_init_vn_minmax(bp, 2*vn + port);
5937 /* Enable rate shaping and fairness */
5938 bp->cmng.flags.cmng_enables |=
5939 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5942 /* rate shaping and fairness are disabled */
5944 "single function mode minmax will be disabled\n");
5948 /* Store cmng structures to internal memory */
5950 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5951 REG_WR(bp, BAR_XSTRORM_INTMEM +
5952 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5953 ((u32 *)(&bp->cmng))[i]);
5956 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5958 switch (load_code) {
5959 case FW_MSG_CODE_DRV_LOAD_COMMON:
5960 bnx2x_init_internal_common(bp);
5963 case FW_MSG_CODE_DRV_LOAD_PORT:
5964 bnx2x_init_internal_port(bp);
5967 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5968 bnx2x_init_internal_func(bp);
5972 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5977 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5981 for_each_queue(bp, i) {
5982 struct bnx2x_fastpath *fp = &bp->fp[i];
5985 fp->state = BNX2X_FP_STATE_CLOSED;
5987 fp->cl_id = BP_L_ID(bp) + i;
5989 fp->sb_id = fp->cl_id + 1;
5991 fp->sb_id = fp->cl_id;
5994 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5995 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5996 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5998 bnx2x_update_fpsb_idx(fp);
6001 /* ensure status block indices were read */
6005 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6007 bnx2x_update_dsb_idx(bp);
6008 bnx2x_update_coalesce(bp);
6009 bnx2x_init_rx_rings(bp);
6010 bnx2x_init_tx_ring(bp);
6011 bnx2x_init_sp_ring(bp);
6012 bnx2x_init_context(bp);
6013 bnx2x_init_internal(bp, load_code);
6014 bnx2x_init_ind_table(bp);
6015 bnx2x_stats_init(bp);
6017 /* At this point, we are ready for interrupts */
6018 atomic_set(&bp->intr_sem, 0);
6020 /* flush all before enabling interrupts */
6024 bnx2x_int_enable(bp);
6026 /* Check for SPIO5 */
6027 bnx2x_attn_int_deasserted0(bp,
6028 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6029 AEU_INPUTS_ATTN_BITS_SPIO5);
6032 /* end of nic init */
6035 * gzip service functions
6038 static int bnx2x_gunzip_init(struct bnx2x *bp)
6040 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6041 &bp->gunzip_mapping, GFP_KERNEL);
6042 if (bp->gunzip_buf == NULL)
6045 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6046 if (bp->strm == NULL)
6049 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6051 if (bp->strm->workspace == NULL)
6061 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6062 bp->gunzip_mapping);
6063 bp->gunzip_buf = NULL;
6066 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6067 " un-compression\n");
6071 static void bnx2x_gunzip_end(struct bnx2x *bp)
6073 kfree(bp->strm->workspace);
6078 if (bp->gunzip_buf) {
6079 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6080 bp->gunzip_mapping);
6081 bp->gunzip_buf = NULL;
6085 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6089 /* check gzip header */
6090 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6091 BNX2X_ERR("Bad gzip header\n");
6099 if (zbuf[3] & FNAME)
6100 while ((zbuf[n++] != 0) && (n < len));
6102 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6103 bp->strm->avail_in = len - n;
6104 bp->strm->next_out = bp->gunzip_buf;
6105 bp->strm->avail_out = FW_BUF_SIZE;
6107 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6111 rc = zlib_inflate(bp->strm, Z_FINISH);
6112 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6113 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6116 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6117 if (bp->gunzip_outlen & 0x3)
6118 netdev_err(bp->dev, "Firmware decompression error:"
6119 " gunzip_outlen (%d) not aligned\n",
6121 bp->gunzip_outlen >>= 2;
6123 zlib_inflateEnd(bp->strm);
6125 if (rc == Z_STREAM_END)
6131 /* nic load/unload */
6134 * General service functions
6137 /* send a NIG loopback debug packet */
6138 static void bnx2x_lb_pckt(struct bnx2x *bp)
6142 /* Ethernet source and destination addresses */
6143 wb_write[0] = 0x55555555;
6144 wb_write[1] = 0x55555555;
6145 wb_write[2] = 0x20; /* SOP */
6146 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6148 /* NON-IP protocol */
6149 wb_write[0] = 0x09000000;
6150 wb_write[1] = 0x55555555;
6151 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
6152 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6155 /* some of the internal memories
6156 * are not directly readable from the driver
6157 * to test them we send debug packets
6159 static int bnx2x_int_mem_test(struct bnx2x *bp)
6165 if (CHIP_REV_IS_FPGA(bp))
6167 else if (CHIP_REV_IS_EMUL(bp))
6172 DP(NETIF_MSG_HW, "start part1\n");
6174 /* Disable inputs of parser neighbor blocks */
6175 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6176 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6177 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6178 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6180 /* Write 0 to parser credits for CFC search request */
6181 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6183 /* send Ethernet packet */
6186 /* TODO do i reset NIG statistic? */
6187 /* Wait until NIG register shows 1 packet of size 0x10 */
6188 count = 1000 * factor;
6191 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6192 val = *bnx2x_sp(bp, wb_data[0]);
6200 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6204 /* Wait until PRS register shows 1 packet */
6205 count = 1000 * factor;
6207 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6215 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6219 /* Reset and init BRB, PRS */
6220 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6222 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6224 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6225 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6227 DP(NETIF_MSG_HW, "part2\n");
6229 /* Disable inputs of parser neighbor blocks */
6230 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6231 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6232 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6233 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6235 /* Write 0 to parser credits for CFC search request */
6236 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6238 /* send 10 Ethernet packets */
6239 for (i = 0; i < 10; i++)
6242 /* Wait until NIG register shows 10 + 1
6243 packets of size 11*0x10 = 0xb0 */
6244 count = 1000 * factor;
6247 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6248 val = *bnx2x_sp(bp, wb_data[0]);
6256 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6260 /* Wait until PRS register shows 2 packets */
6261 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6263 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6265 /* Write 1 to parser credits for CFC search request */
6266 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6268 /* Wait until PRS register shows 3 packets */
6269 msleep(10 * factor);
6270 /* Wait until NIG register shows 1 packet of size 0x10 */
6271 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6273 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6275 /* clear NIG EOP FIFO */
6276 for (i = 0; i < 11; i++)
6277 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6278 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6280 BNX2X_ERR("clear of NIG failed\n");
6284 /* Reset and init BRB, PRS, NIG */
6285 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6287 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6289 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6290 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6293 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6296 /* Enable inputs of parser neighbor blocks */
6297 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6298 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6299 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6300 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6302 DP(NETIF_MSG_HW, "done\n");
6307 static void enable_blocks_attention(struct bnx2x *bp)
6309 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6310 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6311 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6312 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6313 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6314 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6315 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6316 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6317 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6318 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6319 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6320 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6321 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6322 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6323 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6324 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6325 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6326 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6327 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6328 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6329 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6330 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6331 if (CHIP_REV_IS_FPGA(bp))
6332 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6334 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6335 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6336 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6337 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6338 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6339 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6340 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6341 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6342 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6343 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6346 static const struct {
6349 } bnx2x_parity_mask[] = {
6350 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6351 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6352 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6353 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6354 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6355 {QM_REG_QM_PRTY_MASK, 0x0},
6356 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6357 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6358 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6359 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6360 {CDU_REG_CDU_PRTY_MASK, 0x0},
6361 {CFC_REG_CFC_PRTY_MASK, 0x0},
6362 {DBG_REG_DBG_PRTY_MASK, 0x0},
6363 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6364 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6365 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6366 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6367 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6368 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6369 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6370 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6371 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6372 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6373 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6374 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6375 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6376 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6377 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6380 static void enable_blocks_parity(struct bnx2x *bp)
6382 int i, mask_arr_len =
6383 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6385 for (i = 0; i < mask_arr_len; i++)
6386 REG_WR(bp, bnx2x_parity_mask[i].addr,
6387 bnx2x_parity_mask[i].mask);
6391 static void bnx2x_reset_common(struct bnx2x *bp)
6394 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6396 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6399 static void bnx2x_init_pxp(struct bnx2x *bp)
6402 int r_order, w_order;
6404 pci_read_config_word(bp->pdev,
6405 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6406 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6407 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6409 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6411 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6415 bnx2x_init_pxp_arb(bp, r_order, w_order);
6418 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6428 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6429 SHARED_HW_CFG_FAN_FAILURE_MASK;
6431 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6435 * The fan failure mechanism is usually related to the PHY type since
6436 * the power consumption of the board is affected by the PHY. Currently,
6437 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6439 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6440 for (port = PORT_0; port < PORT_MAX; port++) {
6442 SHMEM_RD(bp, dev_info.port_hw_config[port].
6443 external_phy_config) &
6444 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6447 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6449 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6451 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6454 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6456 if (is_required == 0)
6459 /* Fan failure is indicated by SPIO 5 */
6460 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6461 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6463 /* set to active low mode */
6464 val = REG_RD(bp, MISC_REG_SPIO_INT);
6465 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6466 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6467 REG_WR(bp, MISC_REG_SPIO_INT, val);
6469 /* enable interrupt to signal the IGU */
6470 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6471 val |= (1 << MISC_REGISTERS_SPIO_5);
6472 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6475 static int bnx2x_init_common(struct bnx2x *bp)
6482 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6484 bnx2x_reset_common(bp);
6485 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6486 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6488 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6489 if (CHIP_IS_E1H(bp))
6490 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6492 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6494 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6496 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6497 if (CHIP_IS_E1(bp)) {
6498 /* enable HW interrupt from PXP on USDM overflow
6499 bit 16 on INT_MASK_0 */
6500 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6503 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6507 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6508 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6509 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6510 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6511 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6512 /* make sure this value is 0 */
6513 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6515 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6516 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6517 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6518 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6519 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6522 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6524 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6525 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6526 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6529 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6530 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6532 /* let the HW do it's magic ... */
6534 /* finish PXP init */
6535 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6537 BNX2X_ERR("PXP2 CFG failed\n");
6540 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6542 BNX2X_ERR("PXP2 RD_INIT failed\n");
6546 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6547 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6549 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6551 /* clean the DMAE memory */
6553 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6555 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6556 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6557 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6558 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6560 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6561 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6562 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6563 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6565 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6570 for (i = 0; i < 64; i++) {
6571 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6572 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6574 if (CHIP_IS_E1H(bp)) {
6575 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6576 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6581 /* soft reset pulse */
6582 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6583 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6586 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6589 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6590 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6591 if (!CHIP_REV_IS_SLOW(bp)) {
6592 /* enable hw interrupt from doorbell Q */
6593 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6596 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6597 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6598 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6601 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6603 if (CHIP_IS_E1H(bp))
6604 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6606 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6607 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6608 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6609 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6611 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6612 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6613 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6614 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6616 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6617 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6618 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6619 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6622 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6624 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6627 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6628 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6629 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6631 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6632 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6633 REG_WR(bp, i, 0xc0cac01a);
6634 /* TODO: replace with something meaningful */
6636 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6638 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6639 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6640 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6641 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6642 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6643 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6644 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6645 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6646 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6647 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6649 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6651 if (sizeof(union cdu_context) != 1024)
6652 /* we currently assume that a context is 1024 bytes */
6653 dev_alert(&bp->pdev->dev, "please adjust the size "
6654 "of cdu_context(%ld)\n",
6655 (long)sizeof(union cdu_context));
6657 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6658 val = (4 << 24) + (0 << 12) + 1024;
6659 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6661 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6662 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6663 /* enable context validation interrupt from CFC */
6664 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6666 /* set the thresholds to prevent CFC/CDU race */
6667 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6669 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6670 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6672 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6673 /* Reset PCIE errors for debug */
6674 REG_WR(bp, 0x2814, 0xffffffff);
6675 REG_WR(bp, 0x3820, 0xffffffff);
6677 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6678 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6679 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6680 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6682 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6683 if (CHIP_IS_E1H(bp)) {
6684 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6685 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6688 if (CHIP_REV_IS_SLOW(bp))
6691 /* finish CFC init */
6692 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6694 BNX2X_ERR("CFC LL_INIT failed\n");
6697 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6699 BNX2X_ERR("CFC AC_INIT failed\n");
6702 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6704 BNX2X_ERR("CFC CAM_INIT failed\n");
6707 REG_WR(bp, CFC_REG_DEBUG0, 0);
6709 /* read NIG statistic
6710 to see if this is our first up since powerup */
6711 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6712 val = *bnx2x_sp(bp, wb_data[0]);
6714 /* do internal memory self test */
6715 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6716 BNX2X_ERR("internal mem self test failed\n");
6720 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6723 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6724 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6725 bp->port.need_hw_lock = 1;
6732 bnx2x_setup_fan_failure_detection(bp);
6734 /* clear PXP2 attentions */
6735 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6737 enable_blocks_attention(bp);
6738 if (CHIP_PARITY_SUPPORTED(bp))
6739 enable_blocks_parity(bp);
6741 if (!BP_NOMCP(bp)) {
6742 bnx2x_acquire_phy_lock(bp);
6743 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6744 bnx2x_release_phy_lock(bp);
6746 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6751 static int bnx2x_init_port(struct bnx2x *bp)
6753 int port = BP_PORT(bp);
6754 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6758 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6760 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6762 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6763 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6765 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6766 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6767 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6768 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6771 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6773 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6774 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6775 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6778 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6780 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6781 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6782 /* no pause for emulation and FPGA */
6787 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6788 else if (bp->dev->mtu > 4096) {
6789 if (bp->flags & ONE_PORT_FLAG)
6793 /* (24*1024 + val*4)/256 */
6794 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6797 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6798 high = low + 56; /* 14*1024/256 */
6800 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6801 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6804 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6806 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6807 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6808 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6809 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6811 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6812 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6813 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6814 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6816 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6817 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6819 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6821 /* configure PBF to work without PAUSE mtu 9000 */
6822 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6824 /* update threshold */
6825 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6826 /* update init credit */
6827 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6830 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6832 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6835 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6837 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6838 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6840 if (CHIP_IS_E1(bp)) {
6841 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6842 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6844 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6846 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6847 /* init aeu_mask_attn_func_0/1:
6848 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6849 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6850 * bits 4-7 are used for "per vn group attention" */
6851 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6852 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6854 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6855 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6856 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6857 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6858 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6860 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6862 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6864 if (CHIP_IS_E1H(bp)) {
6865 /* 0x2 disable e1hov, 0x1 enable */
6866 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6867 (IS_E1HMF(bp) ? 0x1 : 0x2));
6870 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6871 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6872 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6876 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6877 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6879 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6880 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6882 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6884 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6885 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6887 /* The GPIO should be swapped if the swap register is
6889 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6890 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6892 /* Select function upon port-swap configuration */
6894 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6895 aeu_gpio_mask = (swap_val && swap_override) ?
6896 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6897 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6899 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6900 aeu_gpio_mask = (swap_val && swap_override) ?
6901 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6902 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6904 val = REG_RD(bp, offset);
6905 /* add GPIO3 to group */
6906 val |= aeu_gpio_mask;
6907 REG_WR(bp, offset, val);
6911 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6912 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6913 /* add SPIO 5 to group 0 */
6915 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6916 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6917 val = REG_RD(bp, reg_addr);
6918 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6919 REG_WR(bp, reg_addr, val);
6927 bnx2x__link_reset(bp);
6932 #define ILT_PER_FUNC (768/2)
6933 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6934 /* the phys address is shifted right 12 bits and has an added
6935 1=valid bit added to the 53rd bit
6936 then since this is a wide register(TM)
6937 we split it into two 32 bit writes
6939 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6940 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6941 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6942 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6945 #define CNIC_ILT_LINES 127
6946 #define CNIC_CTX_PER_ILT 16
6948 #define CNIC_ILT_LINES 0
6951 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6955 if (CHIP_IS_E1H(bp))
6956 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6958 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6960 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6963 static int bnx2x_init_func(struct bnx2x *bp)
6965 int port = BP_PORT(bp);
6966 int func = BP_FUNC(bp);
6970 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6972 /* set MSI reconfigure capability */
6973 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6974 val = REG_RD(bp, addr);
6975 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6976 REG_WR(bp, addr, val);
6978 i = FUNC_ILT_BASE(func);
6980 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6981 if (CHIP_IS_E1H(bp)) {
6982 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6983 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6985 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6986 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6989 i += 1 + CNIC_ILT_LINES;
6990 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6992 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6994 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6995 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6999 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7001 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7003 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7004 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7008 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7010 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7012 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7013 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7016 /* tell the searcher where the T2 table is */
7017 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7019 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7020 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7022 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7023 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7024 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7026 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7029 if (CHIP_IS_E1H(bp)) {
7030 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7031 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7032 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7033 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7034 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7035 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7036 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7037 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7038 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7040 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7041 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7044 /* HC init per function */
7045 if (CHIP_IS_E1H(bp)) {
7046 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7048 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7049 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7051 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7053 /* Reset PCIE errors for debug */
7054 REG_WR(bp, 0x2114, 0xffffffff);
7055 REG_WR(bp, 0x2120, 0xffffffff);
7060 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7064 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7065 BP_FUNC(bp), load_code);
7068 mutex_init(&bp->dmae_mutex);
7069 rc = bnx2x_gunzip_init(bp);
7073 switch (load_code) {
7074 case FW_MSG_CODE_DRV_LOAD_COMMON:
7075 rc = bnx2x_init_common(bp);
7080 case FW_MSG_CODE_DRV_LOAD_PORT:
7082 rc = bnx2x_init_port(bp);
7087 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7089 rc = bnx2x_init_func(bp);
7095 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7099 if (!BP_NOMCP(bp)) {
7100 int func = BP_FUNC(bp);
7102 bp->fw_drv_pulse_wr_seq =
7103 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7104 DRV_PULSE_SEQ_MASK);
7105 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7108 /* this needs to be done before gunzip end */
7109 bnx2x_zero_def_sb(bp);
7110 for_each_queue(bp, i)
7111 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7113 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7117 bnx2x_gunzip_end(bp);
7122 static void bnx2x_free_mem(struct bnx2x *bp)
7125 #define BNX2X_PCI_FREE(x, y, size) \
7128 dma_free_coherent(&bp->pdev->dev, size, x, y); \
7134 #define BNX2X_FREE(x) \
7146 for_each_queue(bp, i) {
7149 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7150 bnx2x_fp(bp, i, status_blk_mapping),
7151 sizeof(struct host_status_block));
7154 for_each_queue(bp, i) {
7156 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7157 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7158 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7159 bnx2x_fp(bp, i, rx_desc_mapping),
7160 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7162 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7163 bnx2x_fp(bp, i, rx_comp_mapping),
7164 sizeof(struct eth_fast_path_rx_cqe) *
7168 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7169 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7170 bnx2x_fp(bp, i, rx_sge_mapping),
7171 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7174 for_each_queue(bp, i) {
7176 /* fastpath tx rings: tx_buf tx_desc */
7177 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7178 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7179 bnx2x_fp(bp, i, tx_desc_mapping),
7180 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7182 /* end of fastpath */
7184 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7185 sizeof(struct host_def_status_block));
7187 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7188 sizeof(struct bnx2x_slowpath));
7191 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7192 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7193 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7194 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7195 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7196 sizeof(struct host_status_block));
7198 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7200 #undef BNX2X_PCI_FREE
7204 static int bnx2x_alloc_mem(struct bnx2x *bp)
7207 #define BNX2X_PCI_ALLOC(x, y, size) \
7209 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7211 goto alloc_mem_err; \
7212 memset(x, 0, size); \
7215 #define BNX2X_ALLOC(x, size) \
7217 x = vmalloc(size); \
7219 goto alloc_mem_err; \
7220 memset(x, 0, size); \
7227 for_each_queue(bp, i) {
7228 bnx2x_fp(bp, i, bp) = bp;
7231 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7232 &bnx2x_fp(bp, i, status_blk_mapping),
7233 sizeof(struct host_status_block));
7236 for_each_queue(bp, i) {
7238 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7239 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7240 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7241 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7242 &bnx2x_fp(bp, i, rx_desc_mapping),
7243 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7245 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7246 &bnx2x_fp(bp, i, rx_comp_mapping),
7247 sizeof(struct eth_fast_path_rx_cqe) *
7251 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7252 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7253 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7254 &bnx2x_fp(bp, i, rx_sge_mapping),
7255 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7258 for_each_queue(bp, i) {
7260 /* fastpath tx rings: tx_buf tx_desc */
7261 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7262 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7263 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7264 &bnx2x_fp(bp, i, tx_desc_mapping),
7265 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7267 /* end of fastpath */
7269 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7270 sizeof(struct host_def_status_block));
7272 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7273 sizeof(struct bnx2x_slowpath));
7276 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7278 /* allocate searcher T2 table
7279 we allocate 1/4 of alloc num for T2
7280 (which is not entered into the ILT) */
7281 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7283 /* Initialize T2 (for 1024 connections) */
7284 for (i = 0; i < 16*1024; i += 64)
7285 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7287 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7288 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7290 /* QM queues (128*MAX_CONN) */
7291 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7293 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7294 sizeof(struct host_status_block));
7297 /* Slow path ring */
7298 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7306 #undef BNX2X_PCI_ALLOC
7310 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7314 for_each_queue(bp, i) {
7315 struct bnx2x_fastpath *fp = &bp->fp[i];
7317 u16 bd_cons = fp->tx_bd_cons;
7318 u16 sw_prod = fp->tx_pkt_prod;
7319 u16 sw_cons = fp->tx_pkt_cons;
7321 while (sw_cons != sw_prod) {
7322 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7328 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7332 for_each_queue(bp, j) {
7333 struct bnx2x_fastpath *fp = &bp->fp[j];
7335 for (i = 0; i < NUM_RX_BD; i++) {
7336 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7337 struct sk_buff *skb = rx_buf->skb;
7342 dma_unmap_single(&bp->pdev->dev,
7343 dma_unmap_addr(rx_buf, mapping),
7344 bp->rx_buf_size, DMA_FROM_DEVICE);
7349 if (!fp->disable_tpa)
7350 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7351 ETH_MAX_AGGREGATION_QUEUES_E1 :
7352 ETH_MAX_AGGREGATION_QUEUES_E1H);
7356 static void bnx2x_free_skbs(struct bnx2x *bp)
7358 bnx2x_free_tx_skbs(bp);
7359 bnx2x_free_rx_skbs(bp);
7362 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7366 free_irq(bp->msix_table[0].vector, bp->dev);
7367 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7368 bp->msix_table[0].vector);
7373 for_each_queue(bp, i) {
7374 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
7375 "state %x\n", i, bp->msix_table[i + offset].vector,
7376 bnx2x_fp(bp, i, state));
7378 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7382 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7384 if (bp->flags & USING_MSIX_FLAG) {
7386 bnx2x_free_msix_irqs(bp);
7387 pci_disable_msix(bp->pdev);
7388 bp->flags &= ~USING_MSIX_FLAG;
7390 } else if (bp->flags & USING_MSI_FLAG) {
7392 free_irq(bp->pdev->irq, bp->dev);
7393 pci_disable_msi(bp->pdev);
7394 bp->flags &= ~USING_MSI_FLAG;
7396 } else if (!disable_only)
7397 free_irq(bp->pdev->irq, bp->dev);
7400 static int bnx2x_enable_msix(struct bnx2x *bp)
7402 int i, rc, offset = 1;
7405 bp->msix_table[0].entry = igu_vec;
7406 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7409 igu_vec = BP_L_ID(bp) + offset;
7410 bp->msix_table[1].entry = igu_vec;
7411 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7414 for_each_queue(bp, i) {
7415 igu_vec = BP_L_ID(bp) + offset + i;
7416 bp->msix_table[i + offset].entry = igu_vec;
7417 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7418 "(fastpath #%u)\n", i + offset, igu_vec, i);
7421 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7422 BNX2X_NUM_QUEUES(bp) + offset);
7425 * reconfigure number of tx/rx queues according to available
7428 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7429 /* vectors available for FP */
7430 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7433 "Trying to use less MSI-X vectors: %d\n", rc);
7435 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7439 "MSI-X is not attainable rc %d\n", rc);
7443 bp->num_queues = min(bp->num_queues, fp_vec);
7445 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7448 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7452 bp->flags |= USING_MSIX_FLAG;
7457 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7459 int i, rc, offset = 1;
7461 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7462 bp->dev->name, bp->dev);
7464 BNX2X_ERR("request sp irq failed\n");
7471 for_each_queue(bp, i) {
7472 struct bnx2x_fastpath *fp = &bp->fp[i];
7473 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7476 rc = request_irq(bp->msix_table[i + offset].vector,
7477 bnx2x_msix_fp_int, 0, fp->name, fp);
7479 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7480 bnx2x_free_msix_irqs(bp);
7484 fp->state = BNX2X_FP_STATE_IRQ;
7487 i = BNX2X_NUM_QUEUES(bp);
7488 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7490 bp->msix_table[0].vector,
7491 0, bp->msix_table[offset].vector,
7492 i - 1, bp->msix_table[offset + i - 1].vector);
7497 static int bnx2x_enable_msi(struct bnx2x *bp)
7501 rc = pci_enable_msi(bp->pdev);
7503 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7506 bp->flags |= USING_MSI_FLAG;
7511 static int bnx2x_req_irq(struct bnx2x *bp)
7513 unsigned long flags;
7516 if (bp->flags & USING_MSI_FLAG)
7519 flags = IRQF_SHARED;
7521 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7522 bp->dev->name, bp->dev);
7524 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7529 static void bnx2x_napi_enable(struct bnx2x *bp)
7533 for_each_queue(bp, i)
7534 napi_enable(&bnx2x_fp(bp, i, napi));
7537 static void bnx2x_napi_disable(struct bnx2x *bp)
7541 for_each_queue(bp, i)
7542 napi_disable(&bnx2x_fp(bp, i, napi));
7545 static void bnx2x_netif_start(struct bnx2x *bp)
7549 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7550 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7553 if (netif_running(bp->dev)) {
7554 bnx2x_napi_enable(bp);
7555 bnx2x_int_enable(bp);
7556 if (bp->state == BNX2X_STATE_OPEN)
7557 netif_tx_wake_all_queues(bp->dev);
7562 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7564 bnx2x_int_disable_sync(bp, disable_hw);
7565 bnx2x_napi_disable(bp);
7566 netif_tx_disable(bp->dev);
7570 * Init service functions
7574 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7576 * @param bp driver descriptor
7577 * @param set set or clear an entry (1 or 0)
7578 * @param mac pointer to a buffer containing a MAC
7579 * @param cl_bit_vec bit vector of clients to register a MAC for
7580 * @param cam_offset offset in a CAM to use
7581 * @param with_bcast set broadcast MAC as well
7583 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7584 u32 cl_bit_vec, u8 cam_offset,
7587 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7588 int port = BP_PORT(bp);
7591 * unicasts 0-31:port0 32-63:port1
7592 * multicast 64-127:port0 128-191:port1
7594 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7595 config->hdr.offset = cam_offset;
7596 config->hdr.client_id = 0xff;
7597 config->hdr.reserved1 = 0;
7600 config->config_table[0].cam_entry.msb_mac_addr =
7601 swab16(*(u16 *)&mac[0]);
7602 config->config_table[0].cam_entry.middle_mac_addr =
7603 swab16(*(u16 *)&mac[2]);
7604 config->config_table[0].cam_entry.lsb_mac_addr =
7605 swab16(*(u16 *)&mac[4]);
7606 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7608 config->config_table[0].target_table_entry.flags = 0;
7610 CAM_INVALIDATE(config->config_table[0]);
7611 config->config_table[0].target_table_entry.clients_bit_vector =
7612 cpu_to_le32(cl_bit_vec);
7613 config->config_table[0].target_table_entry.vlan_id = 0;
7615 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7616 (set ? "setting" : "clearing"),
7617 config->config_table[0].cam_entry.msb_mac_addr,
7618 config->config_table[0].cam_entry.middle_mac_addr,
7619 config->config_table[0].cam_entry.lsb_mac_addr);
7623 config->config_table[1].cam_entry.msb_mac_addr =
7624 cpu_to_le16(0xffff);
7625 config->config_table[1].cam_entry.middle_mac_addr =
7626 cpu_to_le16(0xffff);
7627 config->config_table[1].cam_entry.lsb_mac_addr =
7628 cpu_to_le16(0xffff);
7629 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7631 config->config_table[1].target_table_entry.flags =
7632 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7634 CAM_INVALIDATE(config->config_table[1]);
7635 config->config_table[1].target_table_entry.clients_bit_vector =
7636 cpu_to_le32(cl_bit_vec);
7637 config->config_table[1].target_table_entry.vlan_id = 0;
7640 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7641 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7642 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7646 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7648 * @param bp driver descriptor
7649 * @param set set or clear an entry (1 or 0)
7650 * @param mac pointer to a buffer containing a MAC
7651 * @param cl_bit_vec bit vector of clients to register a MAC for
7652 * @param cam_offset offset in a CAM to use
7654 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7655 u32 cl_bit_vec, u8 cam_offset)
7657 struct mac_configuration_cmd_e1h *config =
7658 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7660 config->hdr.length = 1;
7661 config->hdr.offset = cam_offset;
7662 config->hdr.client_id = 0xff;
7663 config->hdr.reserved1 = 0;
7666 config->config_table[0].msb_mac_addr =
7667 swab16(*(u16 *)&mac[0]);
7668 config->config_table[0].middle_mac_addr =
7669 swab16(*(u16 *)&mac[2]);
7670 config->config_table[0].lsb_mac_addr =
7671 swab16(*(u16 *)&mac[4]);
7672 config->config_table[0].clients_bit_vector =
7673 cpu_to_le32(cl_bit_vec);
7674 config->config_table[0].vlan_id = 0;
7675 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7677 config->config_table[0].flags = BP_PORT(bp);
7679 config->config_table[0].flags =
7680 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7682 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7683 (set ? "setting" : "clearing"),
7684 config->config_table[0].msb_mac_addr,
7685 config->config_table[0].middle_mac_addr,
7686 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7688 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7689 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7690 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7693 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7694 int *state_p, int poll)
7696 /* can take a while if any port is running */
7699 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7700 poll ? "polling" : "waiting", state, idx);
7705 bnx2x_rx_int(bp->fp, 10);
7706 /* if index is different from 0
7707 * the reply for some commands will
7708 * be on the non default queue
7711 bnx2x_rx_int(&bp->fp[idx], 10);
7714 mb(); /* state is changed by bnx2x_sp_event() */
7715 if (*state_p == state) {
7716 #ifdef BNX2X_STOP_ON_ERROR
7717 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7729 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7730 poll ? "polling" : "waiting", state, idx);
7731 #ifdef BNX2X_STOP_ON_ERROR
7738 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7740 bp->set_mac_pending++;
7743 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7744 (1 << bp->fp->cl_id), BP_FUNC(bp));
7746 /* Wait for a completion */
7747 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7750 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7752 bp->set_mac_pending++;
7755 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7756 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7759 /* Wait for a completion */
7760 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7765 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7766 * MAC(s). This function will wait until the ramdord completion
7769 * @param bp driver handle
7770 * @param set set or clear the CAM entry
7772 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7774 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7776 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7778 bp->set_mac_pending++;
7781 /* Send a SET_MAC ramrod */
7783 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7784 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7787 /* CAM allocation for E1H
7788 * unicasts: by func number
7789 * multicast: 20+FUNC*20, 20 each
7791 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7792 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7794 /* Wait for a completion when setting */
7795 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7801 static int bnx2x_setup_leading(struct bnx2x *bp)
7805 /* reset IGU state */
7806 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7809 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7811 /* Wait for completion */
7812 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7817 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7819 struct bnx2x_fastpath *fp = &bp->fp[index];
7821 /* reset IGU state */
7822 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7825 fp->state = BNX2X_FP_STATE_OPENING;
7826 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7829 /* Wait for completion */
7830 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7834 static int bnx2x_poll(struct napi_struct *napi, int budget);
7836 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7839 switch (bp->multi_mode) {
7840 case ETH_RSS_MODE_DISABLED:
7844 case ETH_RSS_MODE_REGULAR:
7846 bp->num_queues = min_t(u32, num_queues,
7847 BNX2X_MAX_QUEUES(bp));
7849 bp->num_queues = min_t(u32, num_online_cpus(),
7850 BNX2X_MAX_QUEUES(bp));
7860 static int bnx2x_set_num_queues(struct bnx2x *bp)
7868 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7871 /* Set number of queues according to bp->multi_mode value */
7872 bnx2x_set_num_queues_msix(bp);
7874 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7877 /* if we can't use MSI-X we only need one fp,
7878 * so try to enable MSI-X with the requested number of fp's
7879 * and fallback to MSI or legacy INTx with one fp
7881 rc = bnx2x_enable_msix(bp);
7883 /* failed to enable MSI-X */
7887 bp->dev->real_num_tx_queues = bp->num_queues;
7892 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7893 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7896 /* must be called with rtnl_lock */
7897 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7902 #ifdef BNX2X_STOP_ON_ERROR
7903 if (unlikely(bp->panic))
7907 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7909 rc = bnx2x_set_num_queues(bp);
7911 if (bnx2x_alloc_mem(bp)) {
7912 bnx2x_free_irq(bp, true);
7916 for_each_queue(bp, i)
7917 bnx2x_fp(bp, i, disable_tpa) =
7918 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7920 for_each_queue(bp, i)
7921 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7924 bnx2x_napi_enable(bp);
7926 if (bp->flags & USING_MSIX_FLAG) {
7927 rc = bnx2x_req_msix_irqs(bp);
7929 bnx2x_free_irq(bp, true);
7933 /* Fall to INTx if failed to enable MSI-X due to lack of
7934 memory (in bnx2x_set_num_queues()) */
7935 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7936 bnx2x_enable_msi(bp);
7938 rc = bnx2x_req_irq(bp);
7940 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7941 bnx2x_free_irq(bp, true);
7944 if (bp->flags & USING_MSI_FLAG) {
7945 bp->dev->irq = bp->pdev->irq;
7946 netdev_info(bp->dev, "using MSI IRQ %d\n",
7951 /* Send LOAD_REQUEST command to MCP
7952 Returns the type of LOAD command:
7953 if it is the first port to be initialized
7954 common blocks should be initialized, otherwise - not
7956 if (!BP_NOMCP(bp)) {
7957 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7959 BNX2X_ERR("MCP response failure, aborting\n");
7963 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7964 rc = -EBUSY; /* other port in diagnostic mode */
7969 int port = BP_PORT(bp);
7971 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7972 load_count[0], load_count[1], load_count[2]);
7974 load_count[1 + port]++;
7975 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7976 load_count[0], load_count[1], load_count[2]);
7977 if (load_count[0] == 1)
7978 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7979 else if (load_count[1 + port] == 1)
7980 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7982 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7985 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7986 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7990 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7993 rc = bnx2x_init_hw(bp, load_code);
7995 BNX2X_ERR("HW init failed, aborting\n");
7996 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7997 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8002 /* Setup NIC internals and enable interrupts */
8003 bnx2x_nic_init(bp, load_code);
8005 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8006 (bp->common.shmem2_base))
8007 SHMEM2_WR(bp, dcc_support,
8008 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8009 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8011 /* Send LOAD_DONE command to MCP */
8012 if (!BP_NOMCP(bp)) {
8013 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8015 BNX2X_ERR("MCP response failure, aborting\n");
8021 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8023 rc = bnx2x_setup_leading(bp);
8025 BNX2X_ERR("Setup leading failed!\n");
8026 #ifndef BNX2X_STOP_ON_ERROR
8034 if (CHIP_IS_E1H(bp))
8035 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8036 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8037 bp->flags |= MF_FUNC_DIS;
8040 if (bp->state == BNX2X_STATE_OPEN) {
8042 /* Enable Timer scan */
8043 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8045 for_each_nondefault_queue(bp, i) {
8046 rc = bnx2x_setup_multi(bp, i);
8056 bnx2x_set_eth_mac_addr_e1(bp, 1);
8058 bnx2x_set_eth_mac_addr_e1h(bp, 1);
8060 /* Set iSCSI L2 MAC */
8061 mutex_lock(&bp->cnic_mutex);
8062 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8063 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8064 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8065 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8068 mutex_unlock(&bp->cnic_mutex);
8073 bnx2x_initial_phy_init(bp, load_mode);
8075 /* Start fast path */
8076 switch (load_mode) {
8078 if (bp->state == BNX2X_STATE_OPEN) {
8079 /* Tx queue should be only reenabled */
8080 netif_tx_wake_all_queues(bp->dev);
8082 /* Initialize the receive filter. */
8083 bnx2x_set_rx_mode(bp->dev);
8087 netif_tx_start_all_queues(bp->dev);
8088 if (bp->state != BNX2X_STATE_OPEN)
8089 netif_tx_disable(bp->dev);
8090 /* Initialize the receive filter. */
8091 bnx2x_set_rx_mode(bp->dev);
8095 /* Initialize the receive filter. */
8096 bnx2x_set_rx_mode(bp->dev);
8097 bp->state = BNX2X_STATE_DIAG;
8105 bnx2x__link_status_update(bp);
8107 /* start the timer */
8108 mod_timer(&bp->timer, jiffies + bp->current_interval);
8111 bnx2x_setup_cnic_irq_info(bp);
8112 if (bp->state == BNX2X_STATE_OPEN)
8113 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8115 bnx2x_inc_load_cnt(bp);
8121 /* Disable Timer scan */
8122 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8125 bnx2x_int_disable_sync(bp, 1);
8126 if (!BP_NOMCP(bp)) {
8127 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8128 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8131 /* Free SKBs, SGEs, TPA pool and driver internals */
8132 bnx2x_free_skbs(bp);
8133 for_each_queue(bp, i)
8134 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8137 bnx2x_free_irq(bp, false);
8139 bnx2x_napi_disable(bp);
8140 for_each_queue(bp, i)
8141 netif_napi_del(&bnx2x_fp(bp, i, napi));
8147 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8149 struct bnx2x_fastpath *fp = &bp->fp[index];
8152 /* halt the connection */
8153 fp->state = BNX2X_FP_STATE_HALTING;
8154 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8156 /* Wait for completion */
8157 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8159 if (rc) /* timeout */
8162 /* delete cfc entry */
8163 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8165 /* Wait for completion */
8166 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8171 static int bnx2x_stop_leading(struct bnx2x *bp)
8173 __le16 dsb_sp_prod_idx;
8174 /* if the other port is handling traffic,
8175 this can take a lot of time */
8181 /* Send HALT ramrod */
8182 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8183 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8185 /* Wait for completion */
8186 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8187 &(bp->fp[0].state), 1);
8188 if (rc) /* timeout */
8191 dsb_sp_prod_idx = *bp->dsb_sp_prod;
8193 /* Send PORT_DELETE ramrod */
8194 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8196 /* Wait for completion to arrive on default status block
8197 we are going to reset the chip anyway
8198 so there is not much to do if this times out
8200 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8202 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8203 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8204 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8205 #ifdef BNX2X_STOP_ON_ERROR
8213 rmb(); /* Refresh the dsb_sp_prod */
8215 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8216 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8221 static void bnx2x_reset_func(struct bnx2x *bp)
8223 int port = BP_PORT(bp);
8224 int func = BP_FUNC(bp);
8228 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8229 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8232 /* Disable Timer scan */
8233 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8235 * Wait for at least 10ms and up to 2 second for the timers scan to
8238 for (i = 0; i < 200; i++) {
8240 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8245 base = FUNC_ILT_BASE(func);
8246 for (i = base; i < base + ILT_PER_FUNC; i++)
8247 bnx2x_ilt_wr(bp, i, 0);
8250 static void bnx2x_reset_port(struct bnx2x *bp)
8252 int port = BP_PORT(bp);
8255 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8257 /* Do not rcv packets to BRB */
8258 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8259 /* Do not direct rcv packets that are not for MCP to the BRB */
8260 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8261 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8264 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8267 /* Check for BRB port occupancy */
8268 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8270 DP(NETIF_MSG_IFDOWN,
8271 "BRB1 is not empty %d blocks are occupied\n", val);
8273 /* TODO: Close Doorbell port? */
8276 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8278 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8279 BP_FUNC(bp), reset_code);
8281 switch (reset_code) {
8282 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8283 bnx2x_reset_port(bp);
8284 bnx2x_reset_func(bp);
8285 bnx2x_reset_common(bp);
8288 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8289 bnx2x_reset_port(bp);
8290 bnx2x_reset_func(bp);
8293 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8294 bnx2x_reset_func(bp);
8298 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8303 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8305 int port = BP_PORT(bp);
8309 /* Wait until tx fastpath tasks complete */
8310 for_each_queue(bp, i) {
8311 struct bnx2x_fastpath *fp = &bp->fp[i];
8314 while (bnx2x_has_tx_work_unload(fp)) {
8318 BNX2X_ERR("timeout waiting for queue[%d]\n",
8320 #ifdef BNX2X_STOP_ON_ERROR
8331 /* Give HW time to discard old tx messages */
8334 if (CHIP_IS_E1(bp)) {
8335 struct mac_configuration_cmd *config =
8336 bnx2x_sp(bp, mcast_config);
8338 bnx2x_set_eth_mac_addr_e1(bp, 0);
8340 for (i = 0; i < config->hdr.length; i++)
8341 CAM_INVALIDATE(config->config_table[i]);
8343 config->hdr.length = i;
8344 if (CHIP_REV_IS_SLOW(bp))
8345 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8347 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8348 config->hdr.client_id = bp->fp->cl_id;
8349 config->hdr.reserved1 = 0;
8351 bp->set_mac_pending++;
8354 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8355 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8356 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8359 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8361 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8363 for (i = 0; i < MC_HASH_SIZE; i++)
8364 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8366 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8369 /* Clear iSCSI L2 MAC */
8370 mutex_lock(&bp->cnic_mutex);
8371 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8372 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8373 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8375 mutex_unlock(&bp->cnic_mutex);
8378 if (unload_mode == UNLOAD_NORMAL)
8379 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8381 else if (bp->flags & NO_WOL_FLAG)
8382 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8385 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8386 u8 *mac_addr = bp->dev->dev_addr;
8388 /* The mac address is written to entries 1-4 to
8389 preserve entry 0 which is used by the PMF */
8390 u8 entry = (BP_E1HVN(bp) + 1)*8;
8392 val = (mac_addr[0] << 8) | mac_addr[1];
8393 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8395 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8396 (mac_addr[4] << 8) | mac_addr[5];
8397 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8399 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8402 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8404 /* Close multi and leading connections
8405 Completions for ramrods are collected in a synchronous way */
8406 for_each_nondefault_queue(bp, i)
8407 if (bnx2x_stop_multi(bp, i))
8410 rc = bnx2x_stop_leading(bp);
8412 BNX2X_ERR("Stop leading failed!\n");
8413 #ifdef BNX2X_STOP_ON_ERROR
8422 reset_code = bnx2x_fw_command(bp, reset_code);
8424 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8425 load_count[0], load_count[1], load_count[2]);
8427 load_count[1 + port]--;
8428 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8429 load_count[0], load_count[1], load_count[2]);
8430 if (load_count[0] == 0)
8431 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8432 else if (load_count[1 + port] == 0)
8433 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8435 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8438 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8439 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8440 bnx2x__link_reset(bp);
8442 /* Reset the chip */
8443 bnx2x_reset_chip(bp, reset_code);
8445 /* Report UNLOAD_DONE to MCP */
8447 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8451 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8455 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8457 if (CHIP_IS_E1(bp)) {
8458 int port = BP_PORT(bp);
8459 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8460 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8462 val = REG_RD(bp, addr);
8464 REG_WR(bp, addr, val);
8465 } else if (CHIP_IS_E1H(bp)) {
8466 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8467 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8468 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8469 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8473 /* must be called with rtnl_lock */
8474 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8478 if (bp->state == BNX2X_STATE_CLOSED) {
8479 /* Interface has been removed - nothing to recover */
8480 bp->recovery_state = BNX2X_RECOVERY_DONE;
8482 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8489 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8491 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8493 /* Set "drop all" */
8494 bp->rx_mode = BNX2X_RX_MODE_NONE;
8495 bnx2x_set_storm_rx_mode(bp);
8497 /* Disable HW interrupts, NAPI and Tx */
8498 bnx2x_netif_stop(bp, 1);
8500 del_timer_sync(&bp->timer);
8501 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8502 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8503 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8506 bnx2x_free_irq(bp, false);
8508 /* Cleanup the chip if needed */
8509 if (unload_mode != UNLOAD_RECOVERY)
8510 bnx2x_chip_cleanup(bp, unload_mode);
8514 /* Free SKBs, SGEs, TPA pool and driver internals */
8515 bnx2x_free_skbs(bp);
8516 for_each_queue(bp, i)
8517 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8518 for_each_queue(bp, i)
8519 netif_napi_del(&bnx2x_fp(bp, i, napi));
8522 bp->state = BNX2X_STATE_CLOSED;
8524 netif_carrier_off(bp->dev);
8526 /* The last driver must disable a "close the gate" if there is no
8527 * parity attention or "process kill" pending.
8529 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8530 bnx2x_reset_is_done(bp))
8531 bnx2x_disable_close_the_gate(bp);
8533 /* Reset MCP mail box sequence if there is on going recovery */
8534 if (unload_mode == UNLOAD_RECOVERY)
8540 /* Close gates #2, #3 and #4: */
8541 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8545 /* Gates #2 and #4a are closed/opened for "not E1" only */
8546 if (!CHIP_IS_E1(bp)) {
8548 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8549 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8550 close ? (val | 0x1) : (val & (~(u32)1)));
8552 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8553 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8554 close ? (val | 0x1) : (val & (~(u32)1)));
8558 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8559 val = REG_RD(bp, addr);
8560 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8562 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8563 close ? "closing" : "opening");
8567 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8569 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8571 /* Do some magic... */
8572 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8573 *magic_val = val & SHARED_MF_CLP_MAGIC;
8574 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8577 /* Restore the value of the `magic' bit.
8579 * @param pdev Device handle.
8580 * @param magic_val Old value of the `magic' bit.
8582 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8584 /* Restore the `magic' bit value... */
8585 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8586 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8587 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8588 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8589 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8590 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8593 /* Prepares for MCP reset: takes care of CLP configurations.
8596 * @param magic_val Old value of 'magic' bit.
8598 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8601 u32 validity_offset;
8603 DP(NETIF_MSG_HW, "Starting\n");
8605 /* Set `magic' bit in order to save MF config */
8606 if (!CHIP_IS_E1(bp))
8607 bnx2x_clp_reset_prep(bp, magic_val);
8609 /* Get shmem offset */
8610 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8611 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8613 /* Clear validity map flags */
8615 REG_WR(bp, shmem + validity_offset, 0);
8618 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8619 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
8621 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8622 * depending on the HW type.
8626 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8628 /* special handling for emulation and FPGA,
8629 wait 10 times longer */
8630 if (CHIP_REV_IS_SLOW(bp))
8631 msleep(MCP_ONE_TIMEOUT*10);
8633 msleep(MCP_ONE_TIMEOUT);
8636 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8638 u32 shmem, cnt, validity_offset, val;
8643 /* Get shmem offset */
8644 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8646 BNX2X_ERR("Shmem 0 return failure\n");
8651 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8653 /* Wait for MCP to come up */
8654 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8655 /* TBD: its best to check validity map of last port.
8656 * currently checks on port 0.
8658 val = REG_RD(bp, shmem + validity_offset);
8659 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8660 shmem + validity_offset, val);
8662 /* check that shared memory is valid. */
8663 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8664 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8667 bnx2x_mcp_wait_one(bp);
8670 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8672 /* Check that shared memory is valid. This indicates that MCP is up. */
8673 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8674 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8675 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8681 /* Restore the `magic' bit value */
8682 if (!CHIP_IS_E1(bp))
8683 bnx2x_clp_reset_done(bp, magic_val);
8688 static void bnx2x_pxp_prep(struct bnx2x *bp)
8690 if (!CHIP_IS_E1(bp)) {
8691 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8692 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8693 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8699 * Reset the whole chip except for:
8701 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8704 * - MISC (including AEU)
8708 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8710 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8713 MISC_REGISTERS_RESET_REG_1_RST_HC |
8714 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8715 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8718 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8719 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8720 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8721 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8722 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8723 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8724 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8725 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8727 reset_mask1 = 0xffffffff;
8730 reset_mask2 = 0xffff;
8732 reset_mask2 = 0x1ffff;
8734 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8735 reset_mask1 & (~not_reset_mask1));
8736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8737 reset_mask2 & (~not_reset_mask2));
8742 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8743 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8747 static int bnx2x_process_kill(struct bnx2x *bp)
8751 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8754 /* Empty the Tetris buffer, wait for 1s */
8756 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8757 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8758 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8759 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8760 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8761 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8762 ((port_is_idle_0 & 0x1) == 0x1) &&
8763 ((port_is_idle_1 & 0x1) == 0x1) &&
8764 (pgl_exp_rom2 == 0xffffffff))
8767 } while (cnt-- > 0);
8770 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8772 " outstanding read requests after 1s!\n");
8773 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8774 " port_is_idle_0=0x%08x,"
8775 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8776 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8783 /* Close gates #2, #3 and #4 */
8784 bnx2x_set_234_gates(bp, true);
8786 /* TBD: Indicate that "process kill" is in progress to MCP */
8788 /* Clear "unprepared" bit */
8789 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8792 /* Make sure all is written to the chip before the reset */
8795 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8796 * PSWHST, GRC and PSWRD Tetris buffer.
8800 /* Prepare to chip reset: */
8802 bnx2x_reset_mcp_prep(bp, &val);
8808 /* reset the chip */
8809 bnx2x_process_kill_chip_reset(bp);
8812 /* Recover after reset: */
8814 if (bnx2x_reset_mcp_comp(bp, val))
8820 /* Open the gates #2, #3 and #4 */
8821 bnx2x_set_234_gates(bp, false);
8823 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8824 * reset state, re-enable attentions. */
8829 static int bnx2x_leader_reset(struct bnx2x *bp)
8832 /* Try to recover after the failure */
8833 if (bnx2x_process_kill(bp)) {
8834 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8837 goto exit_leader_reset;
8840 /* Clear "reset is in progress" bit and update the driver state */
8841 bnx2x_set_reset_done(bp);
8842 bp->recovery_state = BNX2X_RECOVERY_DONE;
8846 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8851 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8853 /* Assumption: runs under rtnl lock. This together with the fact
8854 * that it's called only from bnx2x_reset_task() ensure that it
8855 * will never be called when netif_running(bp->dev) is false.
8857 static void bnx2x_parity_recover(struct bnx2x *bp)
8859 DP(NETIF_MSG_HW, "Handling parity\n");
8861 switch (bp->recovery_state) {
8862 case BNX2X_RECOVERY_INIT:
8863 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8864 /* Try to get a LEADER_LOCK HW lock */
8865 if (bnx2x_trylock_hw_lock(bp,
8866 HW_LOCK_RESOURCE_RESERVED_08))
8869 /* Stop the driver */
8870 /* If interface has been removed - break */
8871 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8874 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8875 /* Ensure "is_leader" and "recovery_state"
8876 * update values are seen on other CPUs
8881 case BNX2X_RECOVERY_WAIT:
8882 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8883 if (bp->is_leader) {
8884 u32 load_counter = bnx2x_get_load_cnt(bp);
8886 /* Wait until all other functions get
8889 schedule_delayed_work(&bp->reset_task,
8893 /* If all other functions got down -
8894 * try to bring the chip back to
8895 * normal. In any case it's an exit
8896 * point for a leader.
8898 if (bnx2x_leader_reset(bp) ||
8899 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8900 printk(KERN_ERR"%s: Recovery "
8901 "has failed. Power cycle is "
8902 "needed.\n", bp->dev->name);
8903 /* Disconnect this device */
8904 netif_device_detach(bp->dev);
8905 /* Block ifup for all function
8906 * of this ASIC until
8907 * "process kill" or power
8910 bnx2x_set_reset_in_progress(bp);
8911 /* Shut down the power */
8912 bnx2x_set_power_state(bp,
8919 } else { /* non-leader */
8920 if (!bnx2x_reset_is_done(bp)) {
8921 /* Try to get a LEADER_LOCK HW lock as
8922 * long as a former leader may have
8923 * been unloaded by the user or
8924 * released a leadership by another
8927 if (bnx2x_trylock_hw_lock(bp,
8928 HW_LOCK_RESOURCE_RESERVED_08)) {
8929 /* I'm a leader now! Restart a
8936 schedule_delayed_work(&bp->reset_task,
8940 } else { /* A leader has completed
8941 * the "process kill". It's an exit
8942 * point for a non-leader.
8944 bnx2x_nic_load(bp, LOAD_NORMAL);
8945 bp->recovery_state =
8946 BNX2X_RECOVERY_DONE;
8957 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8958 * scheduled on a general queue in order to prevent a dead lock.
8960 static void bnx2x_reset_task(struct work_struct *work)
8962 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8964 #ifdef BNX2X_STOP_ON_ERROR
8965 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8966 " so reset not done to allow debug dump,\n"
8967 KERN_ERR " you will need to reboot when done\n");
8973 if (!netif_running(bp->dev))
8974 goto reset_task_exit;
8976 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8977 bnx2x_parity_recover(bp);
8979 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8980 bnx2x_nic_load(bp, LOAD_NORMAL);
8987 /* end of nic load/unload */
8992 * Init service functions
8995 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8998 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8999 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9000 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9001 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9002 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9003 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9004 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9005 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9007 BNX2X_ERR("Unsupported function index: %d\n", func);
9012 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9014 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9016 /* Flush all outstanding writes */
9019 /* Pretend to be function 0 */
9021 /* Flush the GRC transaction (in the chip) */
9022 new_val = REG_RD(bp, reg);
9024 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9029 /* From now we are in the "like-E1" mode */
9030 bnx2x_int_disable(bp);
9032 /* Flush all outstanding writes */
9035 /* Restore the original funtion settings */
9036 REG_WR(bp, reg, orig_func);
9037 new_val = REG_RD(bp, reg);
9038 if (new_val != orig_func) {
9039 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9040 orig_func, new_val);
9045 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9047 if (CHIP_IS_E1H(bp))
9048 bnx2x_undi_int_disable_e1h(bp, func);
9050 bnx2x_int_disable(bp);
9053 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9057 /* Check if there is any driver already loaded */
9058 val = REG_RD(bp, MISC_REG_UNPREPARED);
9060 /* Check if it is the UNDI driver
9061 * UNDI driver initializes CID offset for normal bell to 0x7
9063 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9064 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9066 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9068 int func = BP_FUNC(bp);
9072 /* clear the UNDI indication */
9073 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9075 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9077 /* try unload UNDI on port 0 */
9080 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9081 DRV_MSG_SEQ_NUMBER_MASK);
9082 reset_code = bnx2x_fw_command(bp, reset_code);
9084 /* if UNDI is loaded on the other port */
9085 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9087 /* send "DONE" for previous unload */
9088 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9090 /* unload UNDI on port 1 */
9093 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9094 DRV_MSG_SEQ_NUMBER_MASK);
9095 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9097 bnx2x_fw_command(bp, reset_code);
9100 /* now it's safe to release the lock */
9101 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9103 bnx2x_undi_int_disable(bp, func);
9105 /* close input traffic and wait for it */
9106 /* Do not rcv packets to BRB */
9108 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9109 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9110 /* Do not direct rcv packets that are not for MCP to
9113 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9114 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9117 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9118 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9121 /* save NIG port swap info */
9122 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9123 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9126 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9129 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9131 /* take the NIG out of reset and restore swap values */
9133 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9134 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9135 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9136 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9138 /* send unload done to the MCP */
9139 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9141 /* restore our func and fw_seq */
9144 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9145 DRV_MSG_SEQ_NUMBER_MASK);
9148 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9152 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9154 u32 val, val2, val3, val4, id;
9157 /* Get the chip revision id and number. */
9158 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9159 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9160 id = ((val & 0xffff) << 16);
9161 val = REG_RD(bp, MISC_REG_CHIP_REV);
9162 id |= ((val & 0xf) << 12);
9163 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9164 id |= ((val & 0xff) << 4);
9165 val = REG_RD(bp, MISC_REG_BOND_ID);
9167 bp->common.chip_id = id;
9168 bp->link_params.chip_id = bp->common.chip_id;
9169 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9171 val = (REG_RD(bp, 0x2874) & 0x55);
9172 if ((bp->common.chip_id & 0x1) ||
9173 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9174 bp->flags |= ONE_PORT_FLAG;
9175 BNX2X_DEV_INFO("single port device\n");
9178 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9179 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9180 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9181 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9182 bp->common.flash_size, bp->common.flash_size);
9184 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9185 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9186 bp->link_params.shmem_base = bp->common.shmem_base;
9187 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9188 bp->common.shmem_base, bp->common.shmem2_base);
9190 if (!bp->common.shmem_base ||
9191 (bp->common.shmem_base < 0xA0000) ||
9192 (bp->common.shmem_base >= 0xC0000)) {
9193 BNX2X_DEV_INFO("MCP not active\n");
9194 bp->flags |= NO_MCP_FLAG;
9198 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9199 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9200 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9201 BNX2X_ERROR("BAD MCP validity signature\n");
9203 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9204 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9206 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9207 SHARED_HW_CFG_LED_MODE_MASK) >>
9208 SHARED_HW_CFG_LED_MODE_SHIFT);
9210 bp->link_params.feature_config_flags = 0;
9211 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9212 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9213 bp->link_params.feature_config_flags |=
9214 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9216 bp->link_params.feature_config_flags &=
9217 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9219 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9220 bp->common.bc_ver = val;
9221 BNX2X_DEV_INFO("bc_ver %X\n", val);
9222 if (val < BNX2X_BC_VER) {
9223 /* for now only warn
9224 * later we might need to enforce this */
9225 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9226 "please upgrade BC\n", BNX2X_BC_VER, val);
9228 bp->link_params.feature_config_flags |=
9229 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9230 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9232 if (BP_E1HVN(bp) == 0) {
9233 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9234 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9236 /* no WOL capability for E1HVN != 0 */
9237 bp->flags |= NO_WOL_FLAG;
9239 BNX2X_DEV_INFO("%sWoL capable\n",
9240 (bp->flags & NO_WOL_FLAG) ? "not " : "");
9242 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9243 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9244 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9245 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9247 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9248 val, val2, val3, val4);
9251 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9254 int port = BP_PORT(bp);
9257 switch (switch_cfg) {
9259 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9262 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9263 switch (ext_phy_type) {
9264 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9265 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9268 bp->port.supported |= (SUPPORTED_10baseT_Half |
9269 SUPPORTED_10baseT_Full |
9270 SUPPORTED_100baseT_Half |
9271 SUPPORTED_100baseT_Full |
9272 SUPPORTED_1000baseT_Full |
9273 SUPPORTED_2500baseX_Full |
9278 SUPPORTED_Asym_Pause);
9281 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9282 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9285 bp->port.supported |= (SUPPORTED_10baseT_Half |
9286 SUPPORTED_10baseT_Full |
9287 SUPPORTED_100baseT_Half |
9288 SUPPORTED_100baseT_Full |
9289 SUPPORTED_1000baseT_Full |
9294 SUPPORTED_Asym_Pause);
9298 BNX2X_ERR("NVRAM config error. "
9299 "BAD SerDes ext_phy_config 0x%x\n",
9300 bp->link_params.ext_phy_config);
9304 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9306 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9309 case SWITCH_CFG_10G:
9310 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9313 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9314 switch (ext_phy_type) {
9315 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9316 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9319 bp->port.supported |= (SUPPORTED_10baseT_Half |
9320 SUPPORTED_10baseT_Full |
9321 SUPPORTED_100baseT_Half |
9322 SUPPORTED_100baseT_Full |
9323 SUPPORTED_1000baseT_Full |
9324 SUPPORTED_2500baseX_Full |
9325 SUPPORTED_10000baseT_Full |
9330 SUPPORTED_Asym_Pause);
9333 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9334 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9337 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9338 SUPPORTED_1000baseT_Full |
9342 SUPPORTED_Asym_Pause);
9345 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9346 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9349 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9350 SUPPORTED_2500baseX_Full |
9351 SUPPORTED_1000baseT_Full |
9355 SUPPORTED_Asym_Pause);
9358 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9359 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9362 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9365 SUPPORTED_Asym_Pause);
9368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9369 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9372 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9373 SUPPORTED_1000baseT_Full |
9376 SUPPORTED_Asym_Pause);
9379 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9380 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9383 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9384 SUPPORTED_1000baseT_Full |
9388 SUPPORTED_Asym_Pause);
9391 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9392 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9395 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9396 SUPPORTED_1000baseT_Full |
9400 SUPPORTED_Asym_Pause);
9403 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9404 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9407 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9411 SUPPORTED_Asym_Pause);
9414 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9415 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9418 bp->port.supported |= (SUPPORTED_10baseT_Half |
9419 SUPPORTED_10baseT_Full |
9420 SUPPORTED_100baseT_Half |
9421 SUPPORTED_100baseT_Full |
9422 SUPPORTED_1000baseT_Full |
9423 SUPPORTED_10000baseT_Full |
9427 SUPPORTED_Asym_Pause);
9430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9431 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9432 bp->link_params.ext_phy_config);
9436 BNX2X_ERR("NVRAM config error. "
9437 "BAD XGXS ext_phy_config 0x%x\n",
9438 bp->link_params.ext_phy_config);
9442 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9444 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9449 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9450 bp->port.link_config);
9453 bp->link_params.phy_addr = bp->port.phy_addr;
9455 /* mask what we support according to speed_cap_mask */
9456 if (!(bp->link_params.speed_cap_mask &
9457 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9458 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9460 if (!(bp->link_params.speed_cap_mask &
9461 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9462 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9464 if (!(bp->link_params.speed_cap_mask &
9465 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9466 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9468 if (!(bp->link_params.speed_cap_mask &
9469 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9470 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9472 if (!(bp->link_params.speed_cap_mask &
9473 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9474 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9475 SUPPORTED_1000baseT_Full);
9477 if (!(bp->link_params.speed_cap_mask &
9478 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9479 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9481 if (!(bp->link_params.speed_cap_mask &
9482 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9483 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9485 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9488 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9490 bp->link_params.req_duplex = DUPLEX_FULL;
9492 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9493 case PORT_FEATURE_LINK_SPEED_AUTO:
9494 if (bp->port.supported & SUPPORTED_Autoneg) {
9495 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9496 bp->port.advertising = bp->port.supported;
9499 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9501 if ((ext_phy_type ==
9502 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9504 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9505 /* force 10G, no AN */
9506 bp->link_params.req_line_speed = SPEED_10000;
9507 bp->port.advertising =
9508 (ADVERTISED_10000baseT_Full |
9512 BNX2X_ERR("NVRAM config error. "
9513 "Invalid link_config 0x%x"
9514 " Autoneg not supported\n",
9515 bp->port.link_config);
9520 case PORT_FEATURE_LINK_SPEED_10M_FULL:
9521 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9522 bp->link_params.req_line_speed = SPEED_10;
9523 bp->port.advertising = (ADVERTISED_10baseT_Full |
9526 BNX2X_ERROR("NVRAM config error. "
9527 "Invalid link_config 0x%x"
9528 " speed_cap_mask 0x%x\n",
9529 bp->port.link_config,
9530 bp->link_params.speed_cap_mask);
9535 case PORT_FEATURE_LINK_SPEED_10M_HALF:
9536 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9537 bp->link_params.req_line_speed = SPEED_10;
9538 bp->link_params.req_duplex = DUPLEX_HALF;
9539 bp->port.advertising = (ADVERTISED_10baseT_Half |
9542 BNX2X_ERROR("NVRAM config error. "
9543 "Invalid link_config 0x%x"
9544 " speed_cap_mask 0x%x\n",
9545 bp->port.link_config,
9546 bp->link_params.speed_cap_mask);
9551 case PORT_FEATURE_LINK_SPEED_100M_FULL:
9552 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9553 bp->link_params.req_line_speed = SPEED_100;
9554 bp->port.advertising = (ADVERTISED_100baseT_Full |
9557 BNX2X_ERROR("NVRAM config error. "
9558 "Invalid link_config 0x%x"
9559 " speed_cap_mask 0x%x\n",
9560 bp->port.link_config,
9561 bp->link_params.speed_cap_mask);
9566 case PORT_FEATURE_LINK_SPEED_100M_HALF:
9567 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9568 bp->link_params.req_line_speed = SPEED_100;
9569 bp->link_params.req_duplex = DUPLEX_HALF;
9570 bp->port.advertising = (ADVERTISED_100baseT_Half |
9573 BNX2X_ERROR("NVRAM config error. "
9574 "Invalid link_config 0x%x"
9575 " speed_cap_mask 0x%x\n",
9576 bp->port.link_config,
9577 bp->link_params.speed_cap_mask);
9582 case PORT_FEATURE_LINK_SPEED_1G:
9583 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9584 bp->link_params.req_line_speed = SPEED_1000;
9585 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9588 BNX2X_ERROR("NVRAM config error. "
9589 "Invalid link_config 0x%x"
9590 " speed_cap_mask 0x%x\n",
9591 bp->port.link_config,
9592 bp->link_params.speed_cap_mask);
9597 case PORT_FEATURE_LINK_SPEED_2_5G:
9598 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9599 bp->link_params.req_line_speed = SPEED_2500;
9600 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9603 BNX2X_ERROR("NVRAM config error. "
9604 "Invalid link_config 0x%x"
9605 " speed_cap_mask 0x%x\n",
9606 bp->port.link_config,
9607 bp->link_params.speed_cap_mask);
9612 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9613 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9614 case PORT_FEATURE_LINK_SPEED_10G_KR:
9615 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9616 bp->link_params.req_line_speed = SPEED_10000;
9617 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9620 BNX2X_ERROR("NVRAM config error. "
9621 "Invalid link_config 0x%x"
9622 " speed_cap_mask 0x%x\n",
9623 bp->port.link_config,
9624 bp->link_params.speed_cap_mask);
9630 BNX2X_ERROR("NVRAM config error. "
9631 "BAD link speed link_config 0x%x\n",
9632 bp->port.link_config);
9633 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9634 bp->port.advertising = bp->port.supported;
9638 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9639 PORT_FEATURE_FLOW_CONTROL_MASK);
9640 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9641 !(bp->port.supported & SUPPORTED_Autoneg))
9642 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9644 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
9645 " advertising 0x%x\n",
9646 bp->link_params.req_line_speed,
9647 bp->link_params.req_duplex,
9648 bp->link_params.req_flow_ctrl, bp->port.advertising);
9651 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9653 mac_hi = cpu_to_be16(mac_hi);
9654 mac_lo = cpu_to_be32(mac_lo);
9655 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9656 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9659 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9661 int port = BP_PORT(bp);
9667 bp->link_params.bp = bp;
9668 bp->link_params.port = port;
9670 bp->link_params.lane_config =
9671 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9672 bp->link_params.ext_phy_config =
9674 dev_info.port_hw_config[port].external_phy_config);
9675 /* BCM8727_NOC => BCM8727 no over current */
9676 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9677 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9678 bp->link_params.ext_phy_config &=
9679 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9680 bp->link_params.ext_phy_config |=
9681 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9682 bp->link_params.feature_config_flags |=
9683 FEATURE_CONFIG_BCM8727_NOC;
9686 bp->link_params.speed_cap_mask =
9688 dev_info.port_hw_config[port].speed_capability_mask);
9690 bp->port.link_config =
9691 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9693 /* Get the 4 lanes xgxs config rx and tx */
9694 for (i = 0; i < 2; i++) {
9696 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9697 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9698 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9701 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9702 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9703 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9706 /* If the device is capable of WoL, set the default state according
9709 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9710 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9711 (config & PORT_FEATURE_WOL_ENABLED));
9713 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9714 " speed_cap_mask 0x%08x link_config 0x%08x\n",
9715 bp->link_params.lane_config,
9716 bp->link_params.ext_phy_config,
9717 bp->link_params.speed_cap_mask, bp->port.link_config);
9719 bp->link_params.switch_cfg |= (bp->port.link_config &
9720 PORT_FEATURE_CONNECTED_SWITCH_MASK);
9721 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9723 bnx2x_link_settings_requested(bp);
9726 * If connected directly, work with the internal PHY, otherwise, work
9727 * with the external PHY
9729 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9730 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9731 bp->mdio.prtad = bp->link_params.phy_addr;
9733 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9734 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9736 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9738 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9739 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9740 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9741 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9742 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9745 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9746 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9747 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9751 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9753 int func = BP_FUNC(bp);
9757 bnx2x_get_common_hwinfo(bp);
9761 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9763 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9765 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9766 FUNC_MF_CFG_E1HOV_TAG_MASK);
9767 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9769 BNX2X_DEV_INFO("%s function mode\n",
9770 IS_E1HMF(bp) ? "multi" : "single");
9773 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9775 FUNC_MF_CFG_E1HOV_TAG_MASK);
9776 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9778 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9780 func, bp->e1hov, bp->e1hov);
9782 BNX2X_ERROR("No valid E1HOV for func %d,"
9783 " aborting\n", func);
9788 BNX2X_ERROR("VN %d in single function mode,"
9789 " aborting\n", BP_E1HVN(bp));
9795 if (!BP_NOMCP(bp)) {
9796 bnx2x_get_port_hwinfo(bp);
9798 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9799 DRV_MSG_SEQ_NUMBER_MASK);
9800 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9804 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9805 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9806 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9807 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9808 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9809 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9810 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9811 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9812 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9813 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9814 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9816 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9824 /* only supposed to happen on emulation/FPGA */
9825 BNX2X_ERROR("warning: random MAC workaround active\n");
9826 random_ether_addr(bp->dev->dev_addr);
9827 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9833 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9835 int cnt, i, block_end, rodi;
9836 char vpd_data[BNX2X_VPD_LEN+1];
9837 char str_id_reg[VENDOR_ID_LEN+1];
9838 char str_id_cap[VENDOR_ID_LEN+1];
9841 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9842 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9844 if (cnt < BNX2X_VPD_LEN)
9847 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9848 PCI_VPD_LRDT_RO_DATA);
9853 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9854 pci_vpd_lrdt_size(&vpd_data[i]);
9856 i += PCI_VPD_LRDT_TAG_SIZE;
9858 if (block_end > BNX2X_VPD_LEN)
9861 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9862 PCI_VPD_RO_KEYWORD_MFR_ID);
9866 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9868 if (len != VENDOR_ID_LEN)
9871 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9873 /* vendor specific info */
9874 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9875 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9876 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9877 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9879 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9880 PCI_VPD_RO_KEYWORD_VENDOR0);
9882 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9884 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9886 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9887 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9888 bp->fw_ver[len] = ' ';
9897 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9899 int func = BP_FUNC(bp);
9903 /* Disable interrupt handling until HW is initialized */
9904 atomic_set(&bp->intr_sem, 1);
9905 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9907 mutex_init(&bp->port.phy_mutex);
9908 mutex_init(&bp->fw_mb_mutex);
9910 mutex_init(&bp->cnic_mutex);
9913 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9914 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9916 rc = bnx2x_get_hwinfo(bp);
9918 bnx2x_read_fwinfo(bp);
9919 /* need to reset chip if undi was active */
9921 bnx2x_undi_unload(bp);
9923 if (CHIP_REV_IS_FPGA(bp))
9924 dev_err(&bp->pdev->dev, "FPGA detected\n");
9926 if (BP_NOMCP(bp) && (func == 0))
9927 dev_err(&bp->pdev->dev, "MCP disabled, "
9928 "must load devices in order!\n");
9930 /* Set multi queue mode */
9931 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9932 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9933 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9934 "requested is not MSI-X\n");
9935 multi_mode = ETH_RSS_MODE_DISABLED;
9937 bp->multi_mode = multi_mode;
9940 bp->dev->features |= NETIF_F_GRO;
9944 bp->flags &= ~TPA_ENABLE_FLAG;
9945 bp->dev->features &= ~NETIF_F_LRO;
9947 bp->flags |= TPA_ENABLE_FLAG;
9948 bp->dev->features |= NETIF_F_LRO;
9952 bp->dropless_fc = 0;
9954 bp->dropless_fc = dropless_fc;
9958 bp->tx_ring_size = MAX_TX_AVAIL;
9959 bp->rx_ring_size = MAX_RX_AVAIL;
9963 /* make sure that the numbers are in the right granularity */
9964 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9965 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9967 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9968 bp->current_interval = (poll ? poll : timer_interval);
9970 init_timer(&bp->timer);
9971 bp->timer.expires = jiffies + bp->current_interval;
9972 bp->timer.data = (unsigned long) bp;
9973 bp->timer.function = bnx2x_timer;
9979 * ethtool service functions
9982 /* All ethtool functions called with rtnl_lock */
9984 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9986 struct bnx2x *bp = netdev_priv(dev);
9988 cmd->supported = bp->port.supported;
9989 cmd->advertising = bp->port.advertising;
9991 if ((bp->state == BNX2X_STATE_OPEN) &&
9992 !(bp->flags & MF_FUNC_DIS) &&
9993 (bp->link_vars.link_up)) {
9994 cmd->speed = bp->link_vars.line_speed;
9995 cmd->duplex = bp->link_vars.duplex;
10000 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
10001 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10002 if (vn_max_rate < cmd->speed)
10003 cmd->speed = vn_max_rate;
10010 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10012 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10014 switch (ext_phy_type) {
10015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10020 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10022 cmd->port = PORT_FIBRE;
10025 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10026 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10027 cmd->port = PORT_TP;
10030 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10031 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10032 bp->link_params.ext_phy_config);
10036 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10037 bp->link_params.ext_phy_config);
10041 cmd->port = PORT_TP;
10043 cmd->phy_address = bp->mdio.prtad;
10044 cmd->transceiver = XCVR_INTERNAL;
10046 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10047 cmd->autoneg = AUTONEG_ENABLE;
10049 cmd->autoneg = AUTONEG_DISABLE;
10054 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10055 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10056 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10057 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10058 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10059 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10060 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10065 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10067 struct bnx2x *bp = netdev_priv(dev);
10073 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10074 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10075 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10076 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10077 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10078 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10079 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10081 if (cmd->autoneg == AUTONEG_ENABLE) {
10082 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10083 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10087 /* advertise the requested speed and duplex if supported */
10088 cmd->advertising &= bp->port.supported;
10090 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10091 bp->link_params.req_duplex = DUPLEX_FULL;
10092 bp->port.advertising |= (ADVERTISED_Autoneg |
10095 } else { /* forced speed */
10096 /* advertise the requested speed and duplex if supported */
10097 switch (cmd->speed) {
10099 if (cmd->duplex == DUPLEX_FULL) {
10100 if (!(bp->port.supported &
10101 SUPPORTED_10baseT_Full)) {
10103 "10M full not supported\n");
10107 advertising = (ADVERTISED_10baseT_Full |
10110 if (!(bp->port.supported &
10111 SUPPORTED_10baseT_Half)) {
10113 "10M half not supported\n");
10117 advertising = (ADVERTISED_10baseT_Half |
10123 if (cmd->duplex == DUPLEX_FULL) {
10124 if (!(bp->port.supported &
10125 SUPPORTED_100baseT_Full)) {
10127 "100M full not supported\n");
10131 advertising = (ADVERTISED_100baseT_Full |
10134 if (!(bp->port.supported &
10135 SUPPORTED_100baseT_Half)) {
10137 "100M half not supported\n");
10141 advertising = (ADVERTISED_100baseT_Half |
10147 if (cmd->duplex != DUPLEX_FULL) {
10148 DP(NETIF_MSG_LINK, "1G half not supported\n");
10152 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10153 DP(NETIF_MSG_LINK, "1G full not supported\n");
10157 advertising = (ADVERTISED_1000baseT_Full |
10162 if (cmd->duplex != DUPLEX_FULL) {
10164 "2.5G half not supported\n");
10168 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10170 "2.5G full not supported\n");
10174 advertising = (ADVERTISED_2500baseX_Full |
10179 if (cmd->duplex != DUPLEX_FULL) {
10180 DP(NETIF_MSG_LINK, "10G half not supported\n");
10184 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10185 DP(NETIF_MSG_LINK, "10G full not supported\n");
10189 advertising = (ADVERTISED_10000baseT_Full |
10194 DP(NETIF_MSG_LINK, "Unsupported speed\n");
10198 bp->link_params.req_line_speed = cmd->speed;
10199 bp->link_params.req_duplex = cmd->duplex;
10200 bp->port.advertising = advertising;
10203 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10204 DP_LEVEL " req_duplex %d advertising 0x%x\n",
10205 bp->link_params.req_line_speed, bp->link_params.req_duplex,
10206 bp->port.advertising);
10208 if (netif_running(dev)) {
10209 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10210 bnx2x_link_set(bp);
10216 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10217 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10219 static int bnx2x_get_regs_len(struct net_device *dev)
10221 struct bnx2x *bp = netdev_priv(dev);
10222 int regdump_len = 0;
10225 if (CHIP_IS_E1(bp)) {
10226 for (i = 0; i < REGS_COUNT; i++)
10227 if (IS_E1_ONLINE(reg_addrs[i].info))
10228 regdump_len += reg_addrs[i].size;
10230 for (i = 0; i < WREGS_COUNT_E1; i++)
10231 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10232 regdump_len += wreg_addrs_e1[i].size *
10233 (1 + wreg_addrs_e1[i].read_regs_count);
10236 for (i = 0; i < REGS_COUNT; i++)
10237 if (IS_E1H_ONLINE(reg_addrs[i].info))
10238 regdump_len += reg_addrs[i].size;
10240 for (i = 0; i < WREGS_COUNT_E1H; i++)
10241 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10242 regdump_len += wreg_addrs_e1h[i].size *
10243 (1 + wreg_addrs_e1h[i].read_regs_count);
10246 regdump_len += sizeof(struct dump_hdr);
10248 return regdump_len;
10251 static void bnx2x_get_regs(struct net_device *dev,
10252 struct ethtool_regs *regs, void *_p)
10255 struct bnx2x *bp = netdev_priv(dev);
10256 struct dump_hdr dump_hdr = {0};
10259 memset(p, 0, regs->len);
10261 if (!netif_running(bp->dev))
10264 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10265 dump_hdr.dump_sign = dump_sign_all;
10266 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10267 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10268 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10269 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10270 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10272 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10273 p += dump_hdr.hdr_size + 1;
10275 if (CHIP_IS_E1(bp)) {
10276 for (i = 0; i < REGS_COUNT; i++)
10277 if (IS_E1_ONLINE(reg_addrs[i].info))
10278 for (j = 0; j < reg_addrs[i].size; j++)
10280 reg_addrs[i].addr + j*4);
10283 for (i = 0; i < REGS_COUNT; i++)
10284 if (IS_E1H_ONLINE(reg_addrs[i].info))
10285 for (j = 0; j < reg_addrs[i].size; j++)
10287 reg_addrs[i].addr + j*4);
10291 #define PHY_FW_VER_LEN 10
10293 static void bnx2x_get_drvinfo(struct net_device *dev,
10294 struct ethtool_drvinfo *info)
10296 struct bnx2x *bp = netdev_priv(dev);
10297 u8 phy_fw_ver[PHY_FW_VER_LEN];
10299 strcpy(info->driver, DRV_MODULE_NAME);
10300 strcpy(info->version, DRV_MODULE_VERSION);
10302 phy_fw_ver[0] = '\0';
10303 if (bp->port.pmf) {
10304 bnx2x_acquire_phy_lock(bp);
10305 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10306 (bp->state != BNX2X_STATE_CLOSED),
10307 phy_fw_ver, PHY_FW_VER_LEN);
10308 bnx2x_release_phy_lock(bp);
10311 strncpy(info->fw_version, bp->fw_ver, 32);
10312 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10314 (bp->common.bc_ver & 0xff0000) >> 16,
10315 (bp->common.bc_ver & 0xff00) >> 8,
10316 (bp->common.bc_ver & 0xff),
10317 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10318 strcpy(info->bus_info, pci_name(bp->pdev));
10319 info->n_stats = BNX2X_NUM_STATS;
10320 info->testinfo_len = BNX2X_NUM_TESTS;
10321 info->eedump_len = bp->common.flash_size;
10322 info->regdump_len = bnx2x_get_regs_len(dev);
10325 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10327 struct bnx2x *bp = netdev_priv(dev);
10329 if (bp->flags & NO_WOL_FLAG) {
10330 wol->supported = 0;
10333 wol->supported = WAKE_MAGIC;
10335 wol->wolopts = WAKE_MAGIC;
10339 memset(&wol->sopass, 0, sizeof(wol->sopass));
10342 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10344 struct bnx2x *bp = netdev_priv(dev);
10346 if (wol->wolopts & ~WAKE_MAGIC)
10349 if (wol->wolopts & WAKE_MAGIC) {
10350 if (bp->flags & NO_WOL_FLAG)
10360 static u32 bnx2x_get_msglevel(struct net_device *dev)
10362 struct bnx2x *bp = netdev_priv(dev);
10364 return bp->msg_enable;
10367 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10369 struct bnx2x *bp = netdev_priv(dev);
10371 if (capable(CAP_NET_ADMIN))
10372 bp->msg_enable = level;
10375 static int bnx2x_nway_reset(struct net_device *dev)
10377 struct bnx2x *bp = netdev_priv(dev);
10382 if (netif_running(dev)) {
10383 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10384 bnx2x_link_set(bp);
10390 static u32 bnx2x_get_link(struct net_device *dev)
10392 struct bnx2x *bp = netdev_priv(dev);
10394 if (bp->flags & MF_FUNC_DIS)
10397 return bp->link_vars.link_up;
10400 static int bnx2x_get_eeprom_len(struct net_device *dev)
10402 struct bnx2x *bp = netdev_priv(dev);
10404 return bp->common.flash_size;
10407 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10409 int port = BP_PORT(bp);
10413 /* adjust timeout for emulation/FPGA */
10414 count = NVRAM_TIMEOUT_COUNT;
10415 if (CHIP_REV_IS_SLOW(bp))
10418 /* request access to nvram interface */
10419 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10420 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10422 for (i = 0; i < count*10; i++) {
10423 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10424 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10430 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10431 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10438 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10440 int port = BP_PORT(bp);
10444 /* adjust timeout for emulation/FPGA */
10445 count = NVRAM_TIMEOUT_COUNT;
10446 if (CHIP_REV_IS_SLOW(bp))
10449 /* relinquish nvram interface */
10450 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10451 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10453 for (i = 0; i < count*10; i++) {
10454 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10455 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10461 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10462 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10469 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10473 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10475 /* enable both bits, even on read */
10476 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10477 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10478 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10481 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10485 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10487 /* disable both bits, even after read */
10488 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10489 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10490 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10493 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10499 /* build the command word */
10500 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10502 /* need to clear DONE bit separately */
10503 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10505 /* address of the NVRAM to read from */
10506 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10507 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10509 /* issue a read command */
10510 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10512 /* adjust timeout for emulation/FPGA */
10513 count = NVRAM_TIMEOUT_COUNT;
10514 if (CHIP_REV_IS_SLOW(bp))
10517 /* wait for completion */
10520 for (i = 0; i < count; i++) {
10522 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10524 if (val & MCPR_NVM_COMMAND_DONE) {
10525 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10526 /* we read nvram data in cpu order
10527 * but ethtool sees it as an array of bytes
10528 * converting to big-endian will do the work */
10529 *ret_val = cpu_to_be32(val);
10538 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10545 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10547 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10552 if (offset + buf_size > bp->common.flash_size) {
10553 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10554 " buf_size (0x%x) > flash_size (0x%x)\n",
10555 offset, buf_size, bp->common.flash_size);
10559 /* request access to nvram interface */
10560 rc = bnx2x_acquire_nvram_lock(bp);
10564 /* enable access to nvram interface */
10565 bnx2x_enable_nvram_access(bp);
10567 /* read the first word(s) */
10568 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10569 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10570 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10571 memcpy(ret_buf, &val, 4);
10573 /* advance to the next dword */
10574 offset += sizeof(u32);
10575 ret_buf += sizeof(u32);
10576 buf_size -= sizeof(u32);
10581 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10582 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10583 memcpy(ret_buf, &val, 4);
10586 /* disable access to nvram interface */
10587 bnx2x_disable_nvram_access(bp);
10588 bnx2x_release_nvram_lock(bp);
10593 static int bnx2x_get_eeprom(struct net_device *dev,
10594 struct ethtool_eeprom *eeprom, u8 *eebuf)
10596 struct bnx2x *bp = netdev_priv(dev);
10599 if (!netif_running(dev))
10602 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10603 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10604 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10605 eeprom->len, eeprom->len);
10607 /* parameters already validated in ethtool_get_eeprom */
10609 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10614 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10619 /* build the command word */
10620 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10622 /* need to clear DONE bit separately */
10623 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10625 /* write the data */
10626 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10628 /* address of the NVRAM to write to */
10629 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10630 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10632 /* issue the write command */
10633 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10635 /* adjust timeout for emulation/FPGA */
10636 count = NVRAM_TIMEOUT_COUNT;
10637 if (CHIP_REV_IS_SLOW(bp))
10640 /* wait for completion */
10642 for (i = 0; i < count; i++) {
10644 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10645 if (val & MCPR_NVM_COMMAND_DONE) {
10654 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
10656 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10664 if (offset + buf_size > bp->common.flash_size) {
10665 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10666 " buf_size (0x%x) > flash_size (0x%x)\n",
10667 offset, buf_size, bp->common.flash_size);
10671 /* request access to nvram interface */
10672 rc = bnx2x_acquire_nvram_lock(bp);
10676 /* enable access to nvram interface */
10677 bnx2x_enable_nvram_access(bp);
10679 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10680 align_offset = (offset & ~0x03);
10681 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10684 val &= ~(0xff << BYTE_OFFSET(offset));
10685 val |= (*data_buf << BYTE_OFFSET(offset));
10687 /* nvram data is returned as an array of bytes
10688 * convert it back to cpu order */
10689 val = be32_to_cpu(val);
10691 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10695 /* disable access to nvram interface */
10696 bnx2x_disable_nvram_access(bp);
10697 bnx2x_release_nvram_lock(bp);
10702 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10708 u32 written_so_far;
10710 if (buf_size == 1) /* ethtool */
10711 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10713 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10715 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10720 if (offset + buf_size > bp->common.flash_size) {
10721 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10722 " buf_size (0x%x) > flash_size (0x%x)\n",
10723 offset, buf_size, bp->common.flash_size);
10727 /* request access to nvram interface */
10728 rc = bnx2x_acquire_nvram_lock(bp);
10732 /* enable access to nvram interface */
10733 bnx2x_enable_nvram_access(bp);
10735 written_so_far = 0;
10736 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10737 while ((written_so_far < buf_size) && (rc == 0)) {
10738 if (written_so_far == (buf_size - sizeof(u32)))
10739 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10740 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10741 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10742 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10743 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10745 memcpy(&val, data_buf, 4);
10747 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10749 /* advance to the next dword */
10750 offset += sizeof(u32);
10751 data_buf += sizeof(u32);
10752 written_so_far += sizeof(u32);
10756 /* disable access to nvram interface */
10757 bnx2x_disable_nvram_access(bp);
10758 bnx2x_release_nvram_lock(bp);
10763 static int bnx2x_set_eeprom(struct net_device *dev,
10764 struct ethtool_eeprom *eeprom, u8 *eebuf)
10766 struct bnx2x *bp = netdev_priv(dev);
10767 int port = BP_PORT(bp);
10770 if (!netif_running(dev))
10773 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10774 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10775 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10776 eeprom->len, eeprom->len);
10778 /* parameters already validated in ethtool_set_eeprom */
10780 /* PHY eeprom can be accessed only by the PMF */
10781 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10785 if (eeprom->magic == 0x50485950) {
10786 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10787 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10789 bnx2x_acquire_phy_lock(bp);
10790 rc |= bnx2x_link_reset(&bp->link_params,
10791 &bp->link_vars, 0);
10792 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10793 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10794 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10795 MISC_REGISTERS_GPIO_HIGH, port);
10796 bnx2x_release_phy_lock(bp);
10797 bnx2x_link_report(bp);
10799 } else if (eeprom->magic == 0x50485952) {
10800 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10801 if (bp->state == BNX2X_STATE_OPEN) {
10802 bnx2x_acquire_phy_lock(bp);
10803 rc |= bnx2x_link_reset(&bp->link_params,
10804 &bp->link_vars, 1);
10806 rc |= bnx2x_phy_init(&bp->link_params,
10808 bnx2x_release_phy_lock(bp);
10809 bnx2x_calc_fc_adv(bp);
10811 } else if (eeprom->magic == 0x53985943) {
10812 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10813 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10814 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10816 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10818 /* DSP Remove Download Mode */
10819 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10820 MISC_REGISTERS_GPIO_LOW, port);
10822 bnx2x_acquire_phy_lock(bp);
10824 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10826 /* wait 0.5 sec to allow it to run */
10828 bnx2x_ext_phy_hw_reset(bp, port);
10830 bnx2x_release_phy_lock(bp);
10833 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10838 static int bnx2x_get_coalesce(struct net_device *dev,
10839 struct ethtool_coalesce *coal)
10841 struct bnx2x *bp = netdev_priv(dev);
10843 memset(coal, 0, sizeof(struct ethtool_coalesce));
10845 coal->rx_coalesce_usecs = bp->rx_ticks;
10846 coal->tx_coalesce_usecs = bp->tx_ticks;
10851 static int bnx2x_set_coalesce(struct net_device *dev,
10852 struct ethtool_coalesce *coal)
10854 struct bnx2x *bp = netdev_priv(dev);
10856 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10857 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10858 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10860 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10861 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10862 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10864 if (netif_running(dev))
10865 bnx2x_update_coalesce(bp);
10870 static void bnx2x_get_ringparam(struct net_device *dev,
10871 struct ethtool_ringparam *ering)
10873 struct bnx2x *bp = netdev_priv(dev);
10875 ering->rx_max_pending = MAX_RX_AVAIL;
10876 ering->rx_mini_max_pending = 0;
10877 ering->rx_jumbo_max_pending = 0;
10879 ering->rx_pending = bp->rx_ring_size;
10880 ering->rx_mini_pending = 0;
10881 ering->rx_jumbo_pending = 0;
10883 ering->tx_max_pending = MAX_TX_AVAIL;
10884 ering->tx_pending = bp->tx_ring_size;
10887 static int bnx2x_set_ringparam(struct net_device *dev,
10888 struct ethtool_ringparam *ering)
10890 struct bnx2x *bp = netdev_priv(dev);
10893 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10894 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10898 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10899 (ering->tx_pending > MAX_TX_AVAIL) ||
10900 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10903 bp->rx_ring_size = ering->rx_pending;
10904 bp->tx_ring_size = ering->tx_pending;
10906 if (netif_running(dev)) {
10907 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10908 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10914 static void bnx2x_get_pauseparam(struct net_device *dev,
10915 struct ethtool_pauseparam *epause)
10917 struct bnx2x *bp = netdev_priv(dev);
10919 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10920 BNX2X_FLOW_CTRL_AUTO) &&
10921 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10923 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10924 BNX2X_FLOW_CTRL_RX);
10925 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10926 BNX2X_FLOW_CTRL_TX);
10928 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10929 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10930 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10933 static int bnx2x_set_pauseparam(struct net_device *dev,
10934 struct ethtool_pauseparam *epause)
10936 struct bnx2x *bp = netdev_priv(dev);
10941 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10942 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10943 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10945 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10947 if (epause->rx_pause)
10948 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10950 if (epause->tx_pause)
10951 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10953 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10954 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10956 if (epause->autoneg) {
10957 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10958 DP(NETIF_MSG_LINK, "autoneg not supported\n");
10962 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10963 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10967 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10969 if (netif_running(dev)) {
10970 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10971 bnx2x_link_set(bp);
10977 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10979 struct bnx2x *bp = netdev_priv(dev);
10983 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10984 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10988 /* TPA requires Rx CSUM offloading */
10989 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10990 if (!disable_tpa) {
10991 if (!(dev->features & NETIF_F_LRO)) {
10992 dev->features |= NETIF_F_LRO;
10993 bp->flags |= TPA_ENABLE_FLAG;
10998 } else if (dev->features & NETIF_F_LRO) {
10999 dev->features &= ~NETIF_F_LRO;
11000 bp->flags &= ~TPA_ENABLE_FLAG;
11004 if (changed && netif_running(dev)) {
11005 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11006 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11012 static u32 bnx2x_get_rx_csum(struct net_device *dev)
11014 struct bnx2x *bp = netdev_priv(dev);
11016 return bp->rx_csum;
11019 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11021 struct bnx2x *bp = netdev_priv(dev);
11024 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11025 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11029 bp->rx_csum = data;
11031 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11032 TPA'ed packets will be discarded due to wrong TCP CSUM */
11034 u32 flags = ethtool_op_get_flags(dev);
11036 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11042 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11045 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11046 dev->features |= NETIF_F_TSO6;
11048 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11049 dev->features &= ~NETIF_F_TSO6;
11055 static const struct {
11056 char string[ETH_GSTRING_LEN];
11057 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11058 { "register_test (offline)" },
11059 { "memory_test (offline)" },
11060 { "loopback_test (offline)" },
11061 { "nvram_test (online)" },
11062 { "interrupt_test (online)" },
11063 { "link_test (online)" },
11064 { "idle check (online)" }
11067 static int bnx2x_test_registers(struct bnx2x *bp)
11069 int idx, i, rc = -ENODEV;
11071 int port = BP_PORT(bp);
11072 static const struct {
11077 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11078 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11079 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11080 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11081 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11082 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11083 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11084 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11085 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11086 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11087 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11088 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11089 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11090 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11091 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11092 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11093 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11094 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
11095 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
11096 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11097 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
11098 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11099 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11100 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11101 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11102 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11103 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11104 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11105 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
11106 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11107 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
11108 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11109 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11110 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11111 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11112 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11113 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11115 { 0xffffffff, 0, 0x00000000 }
11118 if (!netif_running(bp->dev))
11121 /* Repeat the test twice:
11122 First by writing 0x00000000, second by writing 0xffffffff */
11123 for (idx = 0; idx < 2; idx++) {
11130 wr_val = 0xffffffff;
11134 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11135 u32 offset, mask, save_val, val;
11137 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11138 mask = reg_tbl[i].mask;
11140 save_val = REG_RD(bp, offset);
11142 REG_WR(bp, offset, (wr_val & mask));
11143 val = REG_RD(bp, offset);
11145 /* Restore the original register's value */
11146 REG_WR(bp, offset, save_val);
11148 /* verify value is as expected */
11149 if ((val & mask) != (wr_val & mask)) {
11150 DP(NETIF_MSG_PROBE,
11151 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11152 offset, val, wr_val, mask);
11153 goto test_reg_exit;
11164 static int bnx2x_test_memory(struct bnx2x *bp)
11166 int i, j, rc = -ENODEV;
11168 static const struct {
11172 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11173 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11174 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11175 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11176 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11177 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11178 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11182 static const struct {
11188 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11189 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11190 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11191 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11192 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11193 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11195 { NULL, 0xffffffff, 0, 0 }
11198 if (!netif_running(bp->dev))
11201 /* Go through all the memories */
11202 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11203 for (j = 0; j < mem_tbl[i].size; j++)
11204 REG_RD(bp, mem_tbl[i].offset + j*4);
11206 /* Check the parity status */
11207 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11208 val = REG_RD(bp, prty_tbl[i].offset);
11209 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11210 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11212 "%s is 0x%x\n", prty_tbl[i].name, val);
11213 goto test_mem_exit;
11223 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11228 while (bnx2x_link_test(bp) && cnt--)
11232 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11234 unsigned int pkt_size, num_pkts, i;
11235 struct sk_buff *skb;
11236 unsigned char *packet;
11237 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11238 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11239 u16 tx_start_idx, tx_idx;
11240 u16 rx_start_idx, rx_idx;
11241 u16 pkt_prod, bd_prod;
11242 struct sw_tx_bd *tx_buf;
11243 struct eth_tx_start_bd *tx_start_bd;
11244 struct eth_tx_parse_bd *pbd = NULL;
11245 dma_addr_t mapping;
11246 union eth_rx_cqe *cqe;
11248 struct sw_rx_bd *rx_buf;
11252 /* check the loopback mode */
11253 switch (loopback_mode) {
11254 case BNX2X_PHY_LOOPBACK:
11255 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11258 case BNX2X_MAC_LOOPBACK:
11259 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11260 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11266 /* prepare the loopback packet */
11267 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11268 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11269 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11272 goto test_loopback_exit;
11274 packet = skb_put(skb, pkt_size);
11275 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11276 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11277 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11278 for (i = ETH_HLEN; i < pkt_size; i++)
11279 packet[i] = (unsigned char) (i & 0xff);
11281 /* send the loopback packet */
11283 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11284 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11286 pkt_prod = fp_tx->tx_pkt_prod++;
11287 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11288 tx_buf->first_bd = fp_tx->tx_bd_prod;
11292 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11293 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11294 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11295 skb_headlen(skb), DMA_TO_DEVICE);
11296 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11297 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11298 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11299 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11300 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11301 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11302 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11303 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11305 /* turn on parsing and get a BD */
11306 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11307 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11309 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11313 fp_tx->tx_db.data.prod += 2;
11315 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11320 fp_tx->tx_bd_prod += 2; /* start + pbd */
11324 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11325 if (tx_idx != tx_start_idx + num_pkts)
11326 goto test_loopback_exit;
11328 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11329 if (rx_idx != rx_start_idx + num_pkts)
11330 goto test_loopback_exit;
11332 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11333 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11334 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11335 goto test_loopback_rx_exit;
11337 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11338 if (len != pkt_size)
11339 goto test_loopback_rx_exit;
11341 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11343 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11344 for (i = ETH_HLEN; i < pkt_size; i++)
11345 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11346 goto test_loopback_rx_exit;
11350 test_loopback_rx_exit:
11352 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11353 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11354 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11355 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11357 /* Update producers */
11358 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11359 fp_rx->rx_sge_prod);
11361 test_loopback_exit:
11362 bp->link_params.loopback_mode = LOOPBACK_NONE;
11367 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11374 if (!netif_running(bp->dev))
11375 return BNX2X_LOOPBACK_FAILED;
11377 bnx2x_netif_stop(bp, 1);
11378 bnx2x_acquire_phy_lock(bp);
11380 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11382 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11383 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11386 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11388 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11389 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11392 bnx2x_release_phy_lock(bp);
11393 bnx2x_netif_start(bp);
11398 #define CRC32_RESIDUAL 0xdebb20e3
11400 static int bnx2x_test_nvram(struct bnx2x *bp)
11402 static const struct {
11406 { 0, 0x14 }, /* bootstrap */
11407 { 0x14, 0xec }, /* dir */
11408 { 0x100, 0x350 }, /* manuf_info */
11409 { 0x450, 0xf0 }, /* feature_info */
11410 { 0x640, 0x64 }, /* upgrade_key_info */
11412 { 0x708, 0x70 }, /* manuf_key_info */
11416 __be32 buf[0x350 / 4];
11417 u8 *data = (u8 *)buf;
11424 rc = bnx2x_nvram_read(bp, 0, data, 4);
11426 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11427 goto test_nvram_exit;
11430 magic = be32_to_cpu(buf[0]);
11431 if (magic != 0x669955aa) {
11432 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11434 goto test_nvram_exit;
11437 for (i = 0; nvram_tbl[i].size; i++) {
11439 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11440 nvram_tbl[i].size);
11442 DP(NETIF_MSG_PROBE,
11443 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11444 goto test_nvram_exit;
11447 crc = ether_crc_le(nvram_tbl[i].size, data);
11448 if (crc != CRC32_RESIDUAL) {
11449 DP(NETIF_MSG_PROBE,
11450 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11452 goto test_nvram_exit;
11460 static int bnx2x_test_intr(struct bnx2x *bp)
11462 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11465 if (!netif_running(bp->dev))
11468 config->hdr.length = 0;
11469 if (CHIP_IS_E1(bp))
11470 /* use last unicast entries */
11471 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11473 config->hdr.offset = BP_FUNC(bp);
11474 config->hdr.client_id = bp->fp->cl_id;
11475 config->hdr.reserved1 = 0;
11477 bp->set_mac_pending++;
11479 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11480 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11481 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11483 for (i = 0; i < 10; i++) {
11484 if (!bp->set_mac_pending)
11487 msleep_interruptible(10);
11496 static void bnx2x_self_test(struct net_device *dev,
11497 struct ethtool_test *etest, u64 *buf)
11499 struct bnx2x *bp = netdev_priv(dev);
11501 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11502 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11503 etest->flags |= ETH_TEST_FL_FAILED;
11507 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11509 if (!netif_running(dev))
11512 /* offline tests are not supported in MF mode */
11514 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11516 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11517 int port = BP_PORT(bp);
11521 /* save current value of input enable for TX port IF */
11522 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11523 /* disable input for TX port IF */
11524 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11526 link_up = (bnx2x_link_test(bp) == 0);
11527 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11528 bnx2x_nic_load(bp, LOAD_DIAG);
11529 /* wait until link state is restored */
11530 bnx2x_wait_for_link(bp, link_up);
11532 if (bnx2x_test_registers(bp) != 0) {
11534 etest->flags |= ETH_TEST_FL_FAILED;
11536 if (bnx2x_test_memory(bp) != 0) {
11538 etest->flags |= ETH_TEST_FL_FAILED;
11540 buf[2] = bnx2x_test_loopback(bp, link_up);
11542 etest->flags |= ETH_TEST_FL_FAILED;
11544 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11546 /* restore input for TX port IF */
11547 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11549 bnx2x_nic_load(bp, LOAD_NORMAL);
11550 /* wait until link state is restored */
11551 bnx2x_wait_for_link(bp, link_up);
11553 if (bnx2x_test_nvram(bp) != 0) {
11555 etest->flags |= ETH_TEST_FL_FAILED;
11557 if (bnx2x_test_intr(bp) != 0) {
11559 etest->flags |= ETH_TEST_FL_FAILED;
11562 if (bnx2x_link_test(bp) != 0) {
11564 etest->flags |= ETH_TEST_FL_FAILED;
11567 #ifdef BNX2X_EXTRA_DEBUG
11568 bnx2x_panic_dump(bp);
11572 static const struct {
11575 u8 string[ETH_GSTRING_LEN];
11576 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11577 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11578 { Q_STATS_OFFSET32(error_bytes_received_hi),
11579 8, "[%d]: rx_error_bytes" },
11580 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11581 8, "[%d]: rx_ucast_packets" },
11582 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11583 8, "[%d]: rx_mcast_packets" },
11584 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11585 8, "[%d]: rx_bcast_packets" },
11586 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11587 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11588 4, "[%d]: rx_phy_ip_err_discards"},
11589 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11590 4, "[%d]: rx_skb_alloc_discard" },
11591 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11593 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11594 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11595 8, "[%d]: tx_ucast_packets" },
11596 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11597 8, "[%d]: tx_mcast_packets" },
11598 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11599 8, "[%d]: tx_bcast_packets" }
11602 static const struct {
11606 #define STATS_FLAGS_PORT 1
11607 #define STATS_FLAGS_FUNC 2
11608 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11609 u8 string[ETH_GSTRING_LEN];
11610 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11611 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11612 8, STATS_FLAGS_BOTH, "rx_bytes" },
11613 { STATS_OFFSET32(error_bytes_received_hi),
11614 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11615 { STATS_OFFSET32(total_unicast_packets_received_hi),
11616 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11617 { STATS_OFFSET32(total_multicast_packets_received_hi),
11618 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11619 { STATS_OFFSET32(total_broadcast_packets_received_hi),
11620 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11621 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11622 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11623 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11624 8, STATS_FLAGS_PORT, "rx_align_errors" },
11625 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11626 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11627 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11628 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11629 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11630 8, STATS_FLAGS_PORT, "rx_fragments" },
11631 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11632 8, STATS_FLAGS_PORT, "rx_jabbers" },
11633 { STATS_OFFSET32(no_buff_discard_hi),
11634 8, STATS_FLAGS_BOTH, "rx_discards" },
11635 { STATS_OFFSET32(mac_filter_discard),
11636 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11637 { STATS_OFFSET32(xxoverflow_discard),
11638 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11639 { STATS_OFFSET32(brb_drop_hi),
11640 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11641 { STATS_OFFSET32(brb_truncate_hi),
11642 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11643 { STATS_OFFSET32(pause_frames_received_hi),
11644 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11645 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11646 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11647 { STATS_OFFSET32(nig_timer_max),
11648 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11649 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11650 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11651 { STATS_OFFSET32(rx_skb_alloc_failed),
11652 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11653 { STATS_OFFSET32(hw_csum_err),
11654 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11656 { STATS_OFFSET32(total_bytes_transmitted_hi),
11657 8, STATS_FLAGS_BOTH, "tx_bytes" },
11658 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11659 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11660 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11661 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11662 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11663 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11664 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11665 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11666 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11667 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11668 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11669 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11670 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11671 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11672 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11673 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11674 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11675 8, STATS_FLAGS_PORT, "tx_deferred" },
11676 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11677 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11678 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11679 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11680 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11681 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11682 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11683 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11684 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11685 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11686 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11687 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11688 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11689 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11690 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11691 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11692 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11693 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11694 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11695 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11696 { STATS_OFFSET32(pause_frames_sent_hi),
11697 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11700 #define IS_PORT_STAT(i) \
11701 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11702 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11703 #define IS_E1HMF_MODE_STAT(bp) \
11704 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11706 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11708 struct bnx2x *bp = netdev_priv(dev);
11711 switch (stringset) {
11713 if (is_multi(bp)) {
11714 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11715 if (!IS_E1HMF_MODE_STAT(bp))
11716 num_stats += BNX2X_NUM_STATS;
11718 if (IS_E1HMF_MODE_STAT(bp)) {
11720 for (i = 0; i < BNX2X_NUM_STATS; i++)
11721 if (IS_FUNC_STAT(i))
11724 num_stats = BNX2X_NUM_STATS;
11729 return BNX2X_NUM_TESTS;
11736 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11738 struct bnx2x *bp = netdev_priv(dev);
11741 switch (stringset) {
11743 if (is_multi(bp)) {
11745 for_each_queue(bp, i) {
11746 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11747 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11748 bnx2x_q_stats_arr[j].string, i);
11749 k += BNX2X_NUM_Q_STATS;
11751 if (IS_E1HMF_MODE_STAT(bp))
11753 for (j = 0; j < BNX2X_NUM_STATS; j++)
11754 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11755 bnx2x_stats_arr[j].string);
11757 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11758 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11760 strcpy(buf + j*ETH_GSTRING_LEN,
11761 bnx2x_stats_arr[i].string);
11768 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11773 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11774 struct ethtool_stats *stats, u64 *buf)
11776 struct bnx2x *bp = netdev_priv(dev);
11777 u32 *hw_stats, *offset;
11780 if (is_multi(bp)) {
11782 for_each_queue(bp, i) {
11783 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11784 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11785 if (bnx2x_q_stats_arr[j].size == 0) {
11786 /* skip this counter */
11790 offset = (hw_stats +
11791 bnx2x_q_stats_arr[j].offset);
11792 if (bnx2x_q_stats_arr[j].size == 4) {
11793 /* 4-byte counter */
11794 buf[k + j] = (u64) *offset;
11797 /* 8-byte counter */
11798 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11800 k += BNX2X_NUM_Q_STATS;
11802 if (IS_E1HMF_MODE_STAT(bp))
11804 hw_stats = (u32 *)&bp->eth_stats;
11805 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11806 if (bnx2x_stats_arr[j].size == 0) {
11807 /* skip this counter */
11811 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11812 if (bnx2x_stats_arr[j].size == 4) {
11813 /* 4-byte counter */
11814 buf[k + j] = (u64) *offset;
11817 /* 8-byte counter */
11818 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11821 hw_stats = (u32 *)&bp->eth_stats;
11822 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11823 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11825 if (bnx2x_stats_arr[i].size == 0) {
11826 /* skip this counter */
11831 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11832 if (bnx2x_stats_arr[i].size == 4) {
11833 /* 4-byte counter */
11834 buf[j] = (u64) *offset;
11838 /* 8-byte counter */
11839 buf[j] = HILO_U64(*offset, *(offset + 1));
11845 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11847 struct bnx2x *bp = netdev_priv(dev);
11850 if (!netif_running(dev))
11859 for (i = 0; i < (data * 2); i++) {
11861 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11864 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11866 msleep_interruptible(500);
11867 if (signal_pending(current))
11871 if (bp->link_vars.link_up)
11872 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11873 bp->link_vars.line_speed);
11878 static const struct ethtool_ops bnx2x_ethtool_ops = {
11879 .get_settings = bnx2x_get_settings,
11880 .set_settings = bnx2x_set_settings,
11881 .get_drvinfo = bnx2x_get_drvinfo,
11882 .get_regs_len = bnx2x_get_regs_len,
11883 .get_regs = bnx2x_get_regs,
11884 .get_wol = bnx2x_get_wol,
11885 .set_wol = bnx2x_set_wol,
11886 .get_msglevel = bnx2x_get_msglevel,
11887 .set_msglevel = bnx2x_set_msglevel,
11888 .nway_reset = bnx2x_nway_reset,
11889 .get_link = bnx2x_get_link,
11890 .get_eeprom_len = bnx2x_get_eeprom_len,
11891 .get_eeprom = bnx2x_get_eeprom,
11892 .set_eeprom = bnx2x_set_eeprom,
11893 .get_coalesce = bnx2x_get_coalesce,
11894 .set_coalesce = bnx2x_set_coalesce,
11895 .get_ringparam = bnx2x_get_ringparam,
11896 .set_ringparam = bnx2x_set_ringparam,
11897 .get_pauseparam = bnx2x_get_pauseparam,
11898 .set_pauseparam = bnx2x_set_pauseparam,
11899 .get_rx_csum = bnx2x_get_rx_csum,
11900 .set_rx_csum = bnx2x_set_rx_csum,
11901 .get_tx_csum = ethtool_op_get_tx_csum,
11902 .set_tx_csum = ethtool_op_set_tx_hw_csum,
11903 .set_flags = bnx2x_set_flags,
11904 .get_flags = ethtool_op_get_flags,
11905 .get_sg = ethtool_op_get_sg,
11906 .set_sg = ethtool_op_set_sg,
11907 .get_tso = ethtool_op_get_tso,
11908 .set_tso = bnx2x_set_tso,
11909 .self_test = bnx2x_self_test,
11910 .get_sset_count = bnx2x_get_sset_count,
11911 .get_strings = bnx2x_get_strings,
11912 .phys_id = bnx2x_phys_id,
11913 .get_ethtool_stats = bnx2x_get_ethtool_stats,
11916 /* end of ethtool_ops */
11918 /****************************************************************************
11919 * General service functions
11920 ****************************************************************************/
11922 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11926 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11930 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11931 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11932 PCI_PM_CTRL_PME_STATUS));
11934 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11935 /* delay required during transition out of D3hot */
11940 /* If there are other clients above don't
11941 shut down the power */
11942 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11944 /* Don't shut down the power for emulation and FPGA */
11945 if (CHIP_REV_IS_SLOW(bp))
11948 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11952 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11954 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11957 /* No more memory access after this point until
11958 * device is brought back to D0.
11968 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11972 /* Tell compiler that status block fields can change */
11974 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11975 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11977 return (fp->rx_comp_cons != rx_cons_sb);
11981 * net_device service functions
11984 static int bnx2x_poll(struct napi_struct *napi, int budget)
11987 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11989 struct bnx2x *bp = fp->bp;
11992 #ifdef BNX2X_STOP_ON_ERROR
11993 if (unlikely(bp->panic)) {
11994 napi_complete(napi);
11999 if (bnx2x_has_tx_work(fp))
12002 if (bnx2x_has_rx_work(fp)) {
12003 work_done += bnx2x_rx_int(fp, budget - work_done);
12005 /* must not complete if we consumed full budget */
12006 if (work_done >= budget)
12010 /* Fall out from the NAPI loop if needed */
12011 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12012 bnx2x_update_fpsb_idx(fp);
12013 /* bnx2x_has_rx_work() reads the status block, thus we need
12014 * to ensure that status block indices have been actually read
12015 * (bnx2x_update_fpsb_idx) prior to this check
12016 * (bnx2x_has_rx_work) so that we won't write the "newer"
12017 * value of the status block to IGU (if there was a DMA right
12018 * after bnx2x_has_rx_work and if there is no rmb, the memory
12019 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12020 * before bnx2x_ack_sb). In this case there will never be
12021 * another interrupt until there is another update of the
12022 * status block, while there is still unhandled work.
12026 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12027 napi_complete(napi);
12028 /* Re-enable interrupts */
12029 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12030 le16_to_cpu(fp->fp_c_idx),
12032 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12033 le16_to_cpu(fp->fp_u_idx),
12034 IGU_INT_ENABLE, 1);
12044 /* we split the first BD into headers and data BDs
12045 * to ease the pain of our fellow microcode engineers
12046 * we use one mapping for both BDs
12047 * So far this has only been observed to happen
12048 * in Other Operating Systems(TM)
12050 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12051 struct bnx2x_fastpath *fp,
12052 struct sw_tx_bd *tx_buf,
12053 struct eth_tx_start_bd **tx_bd, u16 hlen,
12054 u16 bd_prod, int nbd)
12056 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12057 struct eth_tx_bd *d_tx_bd;
12058 dma_addr_t mapping;
12059 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12061 /* first fix first BD */
12062 h_tx_bd->nbd = cpu_to_le16(nbd);
12063 h_tx_bd->nbytes = cpu_to_le16(hlen);
12065 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12066 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12067 h_tx_bd->addr_lo, h_tx_bd->nbd);
12069 /* now get a new data BD
12070 * (after the pbd) and fill it */
12071 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12072 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12074 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12075 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12077 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12078 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12079 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12081 /* this marks the BD as one that has no individual mapping */
12082 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12084 DP(NETIF_MSG_TX_QUEUED,
12085 "TSO split data size is %d (%x:%x)\n",
12086 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12089 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12094 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12097 csum = (u16) ~csum_fold(csum_sub(csum,
12098 csum_partial(t_header - fix, fix, 0)));
12101 csum = (u16) ~csum_fold(csum_add(csum,
12102 csum_partial(t_header, -fix, 0)));
12104 return swab16(csum);
12107 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12111 if (skb->ip_summed != CHECKSUM_PARTIAL)
12115 if (skb->protocol == htons(ETH_P_IPV6)) {
12117 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12118 rc |= XMIT_CSUM_TCP;
12122 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12123 rc |= XMIT_CSUM_TCP;
12127 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12128 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12130 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12131 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12136 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12137 /* check if packet requires linearization (packet is too fragmented)
12138 no need to check fragmentation if page size > 8K (there will be no
12139 violation to FW restrictions) */
12140 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12145 int first_bd_sz = 0;
12147 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12148 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12150 if (xmit_type & XMIT_GSO) {
12151 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12152 /* Check if LSO packet needs to be copied:
12153 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12154 int wnd_size = MAX_FETCH_BD - 3;
12155 /* Number of windows to check */
12156 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12161 /* Headers length */
12162 hlen = (int)(skb_transport_header(skb) - skb->data) +
12165 /* Amount of data (w/o headers) on linear part of SKB*/
12166 first_bd_sz = skb_headlen(skb) - hlen;
12168 wnd_sum = first_bd_sz;
12170 /* Calculate the first sum - it's special */
12171 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12173 skb_shinfo(skb)->frags[frag_idx].size;
12175 /* If there was data on linear skb data - check it */
12176 if (first_bd_sz > 0) {
12177 if (unlikely(wnd_sum < lso_mss)) {
12182 wnd_sum -= first_bd_sz;
12185 /* Others are easier: run through the frag list and
12186 check all windows */
12187 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12189 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12191 if (unlikely(wnd_sum < lso_mss)) {
12196 skb_shinfo(skb)->frags[wnd_idx].size;
12199 /* in non-LSO too fragmented packet should always
12206 if (unlikely(to_copy))
12207 DP(NETIF_MSG_TX_QUEUED,
12208 "Linearization IS REQUIRED for %s packet. "
12209 "num_frags %d hlen %d first_bd_sz %d\n",
12210 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12211 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12217 /* called with netif_tx_lock
12218 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12219 * netif_wake_queue()
12221 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12223 struct bnx2x *bp = netdev_priv(dev);
12224 struct bnx2x_fastpath *fp;
12225 struct netdev_queue *txq;
12226 struct sw_tx_bd *tx_buf;
12227 struct eth_tx_start_bd *tx_start_bd;
12228 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12229 struct eth_tx_parse_bd *pbd = NULL;
12230 u16 pkt_prod, bd_prod;
12232 dma_addr_t mapping;
12233 u32 xmit_type = bnx2x_xmit_type(bp, skb);
12236 __le16 pkt_size = 0;
12237 struct ethhdr *eth;
12238 u8 mac_type = UNICAST_ADDRESS;
12240 #ifdef BNX2X_STOP_ON_ERROR
12241 if (unlikely(bp->panic))
12242 return NETDEV_TX_BUSY;
12245 fp_index = skb_get_queue_mapping(skb);
12246 txq = netdev_get_tx_queue(dev, fp_index);
12248 fp = &bp->fp[fp_index];
12250 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12251 fp->eth_q_stats.driver_xoff++;
12252 netif_tx_stop_queue(txq);
12253 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12254 return NETDEV_TX_BUSY;
12257 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12258 " gso type %x xmit_type %x\n",
12259 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12260 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12262 eth = (struct ethhdr *)skb->data;
12264 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12265 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12266 if (is_broadcast_ether_addr(eth->h_dest))
12267 mac_type = BROADCAST_ADDRESS;
12269 mac_type = MULTICAST_ADDRESS;
12272 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12273 /* First, check if we need to linearize the skb (due to FW
12274 restrictions). No need to check fragmentation if page size > 8K
12275 (there will be no violation to FW restrictions) */
12276 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12277 /* Statistics of linearization */
12279 if (skb_linearize(skb) != 0) {
12280 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12281 "silently dropping this SKB\n");
12282 dev_kfree_skb_any(skb);
12283 return NETDEV_TX_OK;
12289 Please read carefully. First we use one BD which we mark as start,
12290 then we have a parsing info BD (used for TSO or xsum),
12291 and only then we have the rest of the TSO BDs.
12292 (don't forget to mark the last one as last,
12293 and to unmap only AFTER you write to the BD ...)
12294 And above all, all pdb sizes are in words - NOT DWORDS!
12297 pkt_prod = fp->tx_pkt_prod++;
12298 bd_prod = TX_BD(fp->tx_bd_prod);
12300 /* get a tx_buf and first BD */
12301 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12302 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12304 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12305 tx_start_bd->general_data = (mac_type <<
12306 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12308 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12310 /* remember the first BD of the packet */
12311 tx_buf->first_bd = fp->tx_bd_prod;
12315 DP(NETIF_MSG_TX_QUEUED,
12316 "sending pkt %u @%p next_idx %u bd %u @%p\n",
12317 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12320 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12321 (bp->flags & HW_VLAN_TX_FLAG)) {
12322 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12323 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12326 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12328 /* turn on parsing and get a BD */
12329 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12330 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12332 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12334 if (xmit_type & XMIT_CSUM) {
12335 hlen = (skb_network_header(skb) - skb->data) / 2;
12337 /* for now NS flag is not used in Linux */
12339 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12340 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12342 pbd->ip_hlen = (skb_transport_header(skb) -
12343 skb_network_header(skb)) / 2;
12345 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12347 pbd->total_hlen = cpu_to_le16(hlen);
12350 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12352 if (xmit_type & XMIT_CSUM_V4)
12353 tx_start_bd->bd_flags.as_bitfield |=
12354 ETH_TX_BD_FLAGS_IP_CSUM;
12356 tx_start_bd->bd_flags.as_bitfield |=
12357 ETH_TX_BD_FLAGS_IPV6;
12359 if (xmit_type & XMIT_CSUM_TCP) {
12360 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12363 s8 fix = SKB_CS_OFF(skb); /* signed! */
12365 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12367 DP(NETIF_MSG_TX_QUEUED,
12368 "hlen %d fix %d csum before fix %x\n",
12369 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12371 /* HW bug: fixup the CSUM */
12372 pbd->tcp_pseudo_csum =
12373 bnx2x_csum_fix(skb_transport_header(skb),
12376 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12377 pbd->tcp_pseudo_csum);
12381 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12382 skb_headlen(skb), DMA_TO_DEVICE);
12384 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12385 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12386 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12387 tx_start_bd->nbd = cpu_to_le16(nbd);
12388 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12389 pkt_size = tx_start_bd->nbytes;
12391 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
12392 " nbytes %d flags %x vlan %x\n",
12393 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12394 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12395 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12397 if (xmit_type & XMIT_GSO) {
12399 DP(NETIF_MSG_TX_QUEUED,
12400 "TSO packet len %d hlen %d total len %d tso size %d\n",
12401 skb->len, hlen, skb_headlen(skb),
12402 skb_shinfo(skb)->gso_size);
12404 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12406 if (unlikely(skb_headlen(skb) > hlen))
12407 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12408 hlen, bd_prod, ++nbd);
12410 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12411 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12412 pbd->tcp_flags = pbd_tcp_flags(skb);
12414 if (xmit_type & XMIT_GSO_V4) {
12415 pbd->ip_id = swab16(ip_hdr(skb)->id);
12416 pbd->tcp_pseudo_csum =
12417 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12418 ip_hdr(skb)->daddr,
12419 0, IPPROTO_TCP, 0));
12422 pbd->tcp_pseudo_csum =
12423 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12424 &ipv6_hdr(skb)->daddr,
12425 0, IPPROTO_TCP, 0));
12427 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12429 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12431 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12432 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12434 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12435 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12436 if (total_pkt_bd == NULL)
12437 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12439 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12441 frag->size, DMA_TO_DEVICE);
12443 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12444 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12445 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12446 le16_add_cpu(&pkt_size, frag->size);
12448 DP(NETIF_MSG_TX_QUEUED,
12449 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12450 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12451 le16_to_cpu(tx_data_bd->nbytes));
12454 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12456 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12458 /* now send a tx doorbell, counting the next BD
12459 * if the packet contains or ends with it
12461 if (TX_BD_POFF(bd_prod) < nbd)
12464 if (total_pkt_bd != NULL)
12465 total_pkt_bd->total_pkt_bytes = pkt_size;
12468 DP(NETIF_MSG_TX_QUEUED,
12469 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12470 " tcp_flags %x xsum %x seq %u hlen %u\n",
12471 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12472 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12473 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12475 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
12478 * Make sure that the BD data is updated before updating the producer
12479 * since FW might read the BD right after the producer is updated.
12480 * This is only applicable for weak-ordered memory model archs such
12481 * as IA-64. The following barrier is also mandatory since FW will
12482 * assumes packets must have BDs.
12486 fp->tx_db.data.prod += nbd;
12488 DOORBELL(bp, fp->index, fp->tx_db.raw);
12492 fp->tx_bd_prod += nbd;
12494 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12495 netif_tx_stop_queue(txq);
12497 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12498 * ordering of set_bit() in netif_tx_stop_queue() and read of
12499 * fp->bd_tx_cons */
12502 fp->eth_q_stats.driver_xoff++;
12503 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12504 netif_tx_wake_queue(txq);
12508 return NETDEV_TX_OK;
12511 /* called with rtnl_lock */
12512 static int bnx2x_open(struct net_device *dev)
12514 struct bnx2x *bp = netdev_priv(dev);
12516 netif_carrier_off(dev);
12518 bnx2x_set_power_state(bp, PCI_D0);
12520 if (!bnx2x_reset_is_done(bp)) {
12522 /* Reset MCP mail box sequence if there is on going
12527 /* If it's the first function to load and reset done
12528 * is still not cleared it may mean that. We don't
12529 * check the attention state here because it may have
12530 * already been cleared by a "common" reset but we
12531 * shell proceed with "process kill" anyway.
12533 if ((bnx2x_get_load_cnt(bp) == 0) &&
12534 bnx2x_trylock_hw_lock(bp,
12535 HW_LOCK_RESOURCE_RESERVED_08) &&
12536 (!bnx2x_leader_reset(bp))) {
12537 DP(NETIF_MSG_HW, "Recovered in open\n");
12541 bnx2x_set_power_state(bp, PCI_D3hot);
12543 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12544 " completed yet. Try again later. If u still see this"
12545 " message after a few retries then power cycle is"
12546 " required.\n", bp->dev->name);
12552 bp->recovery_state = BNX2X_RECOVERY_DONE;
12554 return bnx2x_nic_load(bp, LOAD_OPEN);
12557 /* called with rtnl_lock */
12558 static int bnx2x_close(struct net_device *dev)
12560 struct bnx2x *bp = netdev_priv(dev);
12562 /* Unload the driver, release IRQs */
12563 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12564 bnx2x_set_power_state(bp, PCI_D3hot);
12569 /* called with netif_tx_lock from dev_mcast.c */
12570 static void bnx2x_set_rx_mode(struct net_device *dev)
12572 struct bnx2x *bp = netdev_priv(dev);
12573 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12574 int port = BP_PORT(bp);
12576 if (bp->state != BNX2X_STATE_OPEN) {
12577 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12581 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12583 if (dev->flags & IFF_PROMISC)
12584 rx_mode = BNX2X_RX_MODE_PROMISC;
12586 else if ((dev->flags & IFF_ALLMULTI) ||
12587 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12589 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12591 else { /* some multicasts */
12592 if (CHIP_IS_E1(bp)) {
12593 int i, old, offset;
12594 struct netdev_hw_addr *ha;
12595 struct mac_configuration_cmd *config =
12596 bnx2x_sp(bp, mcast_config);
12599 netdev_for_each_mc_addr(ha, dev) {
12600 config->config_table[i].
12601 cam_entry.msb_mac_addr =
12602 swab16(*(u16 *)&ha->addr[0]);
12603 config->config_table[i].
12604 cam_entry.middle_mac_addr =
12605 swab16(*(u16 *)&ha->addr[2]);
12606 config->config_table[i].
12607 cam_entry.lsb_mac_addr =
12608 swab16(*(u16 *)&ha->addr[4]);
12609 config->config_table[i].cam_entry.flags =
12611 config->config_table[i].
12612 target_table_entry.flags = 0;
12613 config->config_table[i].target_table_entry.
12614 clients_bit_vector =
12615 cpu_to_le32(1 << BP_L_ID(bp));
12616 config->config_table[i].
12617 target_table_entry.vlan_id = 0;
12620 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12621 config->config_table[i].
12622 cam_entry.msb_mac_addr,
12623 config->config_table[i].
12624 cam_entry.middle_mac_addr,
12625 config->config_table[i].
12626 cam_entry.lsb_mac_addr);
12629 old = config->hdr.length;
12631 for (; i < old; i++) {
12632 if (CAM_IS_INVALID(config->
12633 config_table[i])) {
12634 /* already invalidated */
12638 CAM_INVALIDATE(config->
12643 if (CHIP_REV_IS_SLOW(bp))
12644 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12646 offset = BNX2X_MAX_MULTICAST*(1 + port);
12648 config->hdr.length = i;
12649 config->hdr.offset = offset;
12650 config->hdr.client_id = bp->fp->cl_id;
12651 config->hdr.reserved1 = 0;
12653 bp->set_mac_pending++;
12656 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12657 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12658 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12661 /* Accept one or more multicasts */
12662 struct netdev_hw_addr *ha;
12663 u32 mc_filter[MC_HASH_SIZE];
12664 u32 crc, bit, regidx;
12667 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12669 netdev_for_each_mc_addr(ha, dev) {
12670 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12673 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12674 bit = (crc >> 24) & 0xff;
12677 mc_filter[regidx] |= (1 << bit);
12680 for (i = 0; i < MC_HASH_SIZE; i++)
12681 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12686 bp->rx_mode = rx_mode;
12687 bnx2x_set_storm_rx_mode(bp);
12690 /* called with rtnl_lock */
12691 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12693 struct sockaddr *addr = p;
12694 struct bnx2x *bp = netdev_priv(dev);
12696 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12699 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12700 if (netif_running(dev)) {
12701 if (CHIP_IS_E1(bp))
12702 bnx2x_set_eth_mac_addr_e1(bp, 1);
12704 bnx2x_set_eth_mac_addr_e1h(bp, 1);
12710 /* called with rtnl_lock */
12711 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12712 int devad, u16 addr)
12714 struct bnx2x *bp = netdev_priv(netdev);
12717 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12719 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12720 prtad, devad, addr);
12722 if (prtad != bp->mdio.prtad) {
12723 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12724 prtad, bp->mdio.prtad);
12728 /* The HW expects different devad if CL22 is used */
12729 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12731 bnx2x_acquire_phy_lock(bp);
12732 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12733 devad, addr, &value);
12734 bnx2x_release_phy_lock(bp);
12735 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12742 /* called with rtnl_lock */
12743 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12744 u16 addr, u16 value)
12746 struct bnx2x *bp = netdev_priv(netdev);
12747 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12750 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12751 " value 0x%x\n", prtad, devad, addr, value);
12753 if (prtad != bp->mdio.prtad) {
12754 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12755 prtad, bp->mdio.prtad);
12759 /* The HW expects different devad if CL22 is used */
12760 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12762 bnx2x_acquire_phy_lock(bp);
12763 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12764 devad, addr, value);
12765 bnx2x_release_phy_lock(bp);
12769 /* called with rtnl_lock */
12770 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12772 struct bnx2x *bp = netdev_priv(dev);
12773 struct mii_ioctl_data *mdio = if_mii(ifr);
12775 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12776 mdio->phy_id, mdio->reg_num, mdio->val_in);
12778 if (!netif_running(dev))
12781 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12784 /* called with rtnl_lock */
12785 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12787 struct bnx2x *bp = netdev_priv(dev);
12790 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12791 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12795 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12796 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12799 /* This does not race with packet allocation
12800 * because the actual alloc size is
12801 * only updated as part of load
12803 dev->mtu = new_mtu;
12805 if (netif_running(dev)) {
12806 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12807 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12813 static void bnx2x_tx_timeout(struct net_device *dev)
12815 struct bnx2x *bp = netdev_priv(dev);
12817 #ifdef BNX2X_STOP_ON_ERROR
12821 /* This allows the netif to be shutdown gracefully before resetting */
12822 schedule_delayed_work(&bp->reset_task, 0);
12826 /* called with rtnl_lock */
12827 static void bnx2x_vlan_rx_register(struct net_device *dev,
12828 struct vlan_group *vlgrp)
12830 struct bnx2x *bp = netdev_priv(dev);
12834 /* Set flags according to the required capabilities */
12835 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12837 if (dev->features & NETIF_F_HW_VLAN_TX)
12838 bp->flags |= HW_VLAN_TX_FLAG;
12840 if (dev->features & NETIF_F_HW_VLAN_RX)
12841 bp->flags |= HW_VLAN_RX_FLAG;
12843 if (netif_running(dev))
12844 bnx2x_set_client_config(bp);
12849 #ifdef CONFIG_NET_POLL_CONTROLLER
12850 static void poll_bnx2x(struct net_device *dev)
12852 struct bnx2x *bp = netdev_priv(dev);
12854 disable_irq(bp->pdev->irq);
12855 bnx2x_interrupt(bp->pdev->irq, dev);
12856 enable_irq(bp->pdev->irq);
12860 static const struct net_device_ops bnx2x_netdev_ops = {
12861 .ndo_open = bnx2x_open,
12862 .ndo_stop = bnx2x_close,
12863 .ndo_start_xmit = bnx2x_start_xmit,
12864 .ndo_set_multicast_list = bnx2x_set_rx_mode,
12865 .ndo_set_mac_address = bnx2x_change_mac_addr,
12866 .ndo_validate_addr = eth_validate_addr,
12867 .ndo_do_ioctl = bnx2x_ioctl,
12868 .ndo_change_mtu = bnx2x_change_mtu,
12869 .ndo_tx_timeout = bnx2x_tx_timeout,
12871 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12873 #ifdef CONFIG_NET_POLL_CONTROLLER
12874 .ndo_poll_controller = poll_bnx2x,
12878 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12879 struct net_device *dev)
12884 SET_NETDEV_DEV(dev, &pdev->dev);
12885 bp = netdev_priv(dev);
12890 bp->func = PCI_FUNC(pdev->devfn);
12892 rc = pci_enable_device(pdev);
12894 dev_err(&bp->pdev->dev,
12895 "Cannot enable PCI device, aborting\n");
12899 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12900 dev_err(&bp->pdev->dev,
12901 "Cannot find PCI device base address, aborting\n");
12903 goto err_out_disable;
12906 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12907 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12908 " base address, aborting\n");
12910 goto err_out_disable;
12913 if (atomic_read(&pdev->enable_cnt) == 1) {
12914 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12916 dev_err(&bp->pdev->dev,
12917 "Cannot obtain PCI resources, aborting\n");
12918 goto err_out_disable;
12921 pci_set_master(pdev);
12922 pci_save_state(pdev);
12925 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12926 if (bp->pm_cap == 0) {
12927 dev_err(&bp->pdev->dev,
12928 "Cannot find power management capability, aborting\n");
12930 goto err_out_release;
12933 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12934 if (bp->pcie_cap == 0) {
12935 dev_err(&bp->pdev->dev,
12936 "Cannot find PCI Express capability, aborting\n");
12938 goto err_out_release;
12941 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12942 bp->flags |= USING_DAC_FLAG;
12943 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12944 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12945 " failed, aborting\n");
12947 goto err_out_release;
12950 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12951 dev_err(&bp->pdev->dev,
12952 "System does not support DMA, aborting\n");
12954 goto err_out_release;
12957 dev->mem_start = pci_resource_start(pdev, 0);
12958 dev->base_addr = dev->mem_start;
12959 dev->mem_end = pci_resource_end(pdev, 0);
12961 dev->irq = pdev->irq;
12963 bp->regview = pci_ioremap_bar(pdev, 0);
12964 if (!bp->regview) {
12965 dev_err(&bp->pdev->dev,
12966 "Cannot map register space, aborting\n");
12968 goto err_out_release;
12971 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12972 min_t(u64, BNX2X_DB_SIZE,
12973 pci_resource_len(pdev, 2)));
12974 if (!bp->doorbells) {
12975 dev_err(&bp->pdev->dev,
12976 "Cannot map doorbell space, aborting\n");
12978 goto err_out_unmap;
12981 bnx2x_set_power_state(bp, PCI_D0);
12983 /* clean indirect addresses */
12984 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12985 PCICFG_VENDOR_ID_OFFSET);
12986 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12987 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12988 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12989 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
12991 /* Reset the load counter */
12992 bnx2x_clear_load_cnt(bp);
12994 dev->watchdog_timeo = TX_TIMEOUT;
12996 dev->netdev_ops = &bnx2x_netdev_ops;
12997 dev->ethtool_ops = &bnx2x_ethtool_ops;
12998 dev->features |= NETIF_F_SG;
12999 dev->features |= NETIF_F_HW_CSUM;
13000 if (bp->flags & USING_DAC_FLAG)
13001 dev->features |= NETIF_F_HIGHDMA;
13002 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13003 dev->features |= NETIF_F_TSO6;
13005 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
13006 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
13008 dev->vlan_features |= NETIF_F_SG;
13009 dev->vlan_features |= NETIF_F_HW_CSUM;
13010 if (bp->flags & USING_DAC_FLAG)
13011 dev->vlan_features |= NETIF_F_HIGHDMA;
13012 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13013 dev->vlan_features |= NETIF_F_TSO6;
13016 /* get_port_hwinfo() will set prtad and mmds properly */
13017 bp->mdio.prtad = MDIO_PRTAD_NONE;
13019 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13020 bp->mdio.dev = dev;
13021 bp->mdio.mdio_read = bnx2x_mdio_read;
13022 bp->mdio.mdio_write = bnx2x_mdio_write;
13028 iounmap(bp->regview);
13029 bp->regview = NULL;
13031 if (bp->doorbells) {
13032 iounmap(bp->doorbells);
13033 bp->doorbells = NULL;
13037 if (atomic_read(&pdev->enable_cnt) == 1)
13038 pci_release_regions(pdev);
13041 pci_disable_device(pdev);
13042 pci_set_drvdata(pdev, NULL);
13048 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13049 int *width, int *speed)
13051 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13053 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13055 /* return value of 1=2.5GHz 2=5GHz */
13056 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13059 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13061 const struct firmware *firmware = bp->firmware;
13062 struct bnx2x_fw_file_hdr *fw_hdr;
13063 struct bnx2x_fw_file_section *sections;
13064 u32 offset, len, num_ops;
13069 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13072 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13073 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13075 /* Make sure none of the offsets and sizes make us read beyond
13076 * the end of the firmware data */
13077 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13078 offset = be32_to_cpu(sections[i].offset);
13079 len = be32_to_cpu(sections[i].len);
13080 if (offset + len > firmware->size) {
13081 dev_err(&bp->pdev->dev,
13082 "Section %d length is out of bounds\n", i);
13087 /* Likewise for the init_ops offsets */
13088 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13089 ops_offsets = (u16 *)(firmware->data + offset);
13090 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13092 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13093 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13094 dev_err(&bp->pdev->dev,
13095 "Section offset %d is out of bounds\n", i);
13100 /* Check FW version */
13101 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13102 fw_ver = firmware->data + offset;
13103 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13104 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13105 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13106 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13107 dev_err(&bp->pdev->dev,
13108 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13109 fw_ver[0], fw_ver[1], fw_ver[2],
13110 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13111 BCM_5710_FW_MINOR_VERSION,
13112 BCM_5710_FW_REVISION_VERSION,
13113 BCM_5710_FW_ENGINEERING_VERSION);
13120 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13122 const __be32 *source = (const __be32 *)_source;
13123 u32 *target = (u32 *)_target;
13126 for (i = 0; i < n/4; i++)
13127 target[i] = be32_to_cpu(source[i]);
13131 Ops array is stored in the following format:
13132 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13134 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13136 const __be32 *source = (const __be32 *)_source;
13137 struct raw_op *target = (struct raw_op *)_target;
13140 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13141 tmp = be32_to_cpu(source[j]);
13142 target[i].op = (tmp >> 24) & 0xff;
13143 target[i].offset = tmp & 0xffffff;
13144 target[i].raw_data = be32_to_cpu(source[j + 1]);
13148 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13150 const __be16 *source = (const __be16 *)_source;
13151 u16 *target = (u16 *)_target;
13154 for (i = 0; i < n/2; i++)
13155 target[i] = be16_to_cpu(source[i]);
13158 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13160 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13161 bp->arr = kmalloc(len, GFP_KERNEL); \
13163 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13166 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13167 (u8 *)bp->arr, len); \
13170 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13172 const char *fw_file_name;
13173 struct bnx2x_fw_file_hdr *fw_hdr;
13176 if (CHIP_IS_E1(bp))
13177 fw_file_name = FW_FILE_NAME_E1;
13178 else if (CHIP_IS_E1H(bp))
13179 fw_file_name = FW_FILE_NAME_E1H;
13181 dev_err(dev, "Unsupported chip revision\n");
13185 dev_info(dev, "Loading %s\n", fw_file_name);
13187 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13189 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13190 goto request_firmware_exit;
13193 rc = bnx2x_check_firmware(bp);
13195 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13196 goto request_firmware_exit;
13199 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13201 /* Initialize the pointers to the init arrays */
13203 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13206 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13209 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13212 /* STORMs firmware */
13213 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13214 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13215 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13216 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13217 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13218 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13219 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13220 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13221 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13222 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13223 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13224 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13225 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13226 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13227 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13228 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13232 init_offsets_alloc_err:
13233 kfree(bp->init_ops);
13234 init_ops_alloc_err:
13235 kfree(bp->init_data);
13236 request_firmware_exit:
13237 release_firmware(bp->firmware);
13243 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13244 const struct pci_device_id *ent)
13246 struct net_device *dev = NULL;
13248 int pcie_width, pcie_speed;
13251 /* dev zeroed in init_etherdev */
13252 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13254 dev_err(&pdev->dev, "Cannot allocate net device\n");
13258 bp = netdev_priv(dev);
13259 bp->msg_enable = debug;
13261 pci_set_drvdata(pdev, dev);
13263 rc = bnx2x_init_dev(pdev, dev);
13269 rc = bnx2x_init_bp(bp);
13271 goto init_one_exit;
13273 /* Set init arrays */
13274 rc = bnx2x_init_firmware(bp, &pdev->dev);
13276 dev_err(&pdev->dev, "Error loading firmware\n");
13277 goto init_one_exit;
13280 rc = register_netdev(dev);
13282 dev_err(&pdev->dev, "Cannot register net device\n");
13283 goto init_one_exit;
13286 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13287 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13288 " IRQ %d, ", board_info[ent->driver_data].name,
13289 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13290 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13291 dev->base_addr, bp->pdev->irq);
13292 pr_cont("node addr %pM\n", dev->dev_addr);
13298 iounmap(bp->regview);
13301 iounmap(bp->doorbells);
13305 if (atomic_read(&pdev->enable_cnt) == 1)
13306 pci_release_regions(pdev);
13308 pci_disable_device(pdev);
13309 pci_set_drvdata(pdev, NULL);
13314 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13316 struct net_device *dev = pci_get_drvdata(pdev);
13320 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13323 bp = netdev_priv(dev);
13325 unregister_netdev(dev);
13327 /* Make sure RESET task is not scheduled before continuing */
13328 cancel_delayed_work_sync(&bp->reset_task);
13330 kfree(bp->init_ops_offsets);
13331 kfree(bp->init_ops);
13332 kfree(bp->init_data);
13333 release_firmware(bp->firmware);
13336 iounmap(bp->regview);
13339 iounmap(bp->doorbells);
13343 if (atomic_read(&pdev->enable_cnt) == 1)
13344 pci_release_regions(pdev);
13346 pci_disable_device(pdev);
13347 pci_set_drvdata(pdev, NULL);
13350 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13352 struct net_device *dev = pci_get_drvdata(pdev);
13356 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13359 bp = netdev_priv(dev);
13363 pci_save_state(pdev);
13365 if (!netif_running(dev)) {
13370 netif_device_detach(dev);
13372 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13374 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13381 static int bnx2x_resume(struct pci_dev *pdev)
13383 struct net_device *dev = pci_get_drvdata(pdev);
13388 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13391 bp = netdev_priv(dev);
13393 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13394 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13400 pci_restore_state(pdev);
13402 if (!netif_running(dev)) {
13407 bnx2x_set_power_state(bp, PCI_D0);
13408 netif_device_attach(dev);
13410 rc = bnx2x_nic_load(bp, LOAD_OPEN);
13417 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13421 bp->state = BNX2X_STATE_ERROR;
13423 bp->rx_mode = BNX2X_RX_MODE_NONE;
13425 bnx2x_netif_stop(bp, 0);
13427 del_timer_sync(&bp->timer);
13428 bp->stats_state = STATS_STATE_DISABLED;
13429 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13432 bnx2x_free_irq(bp, false);
13434 if (CHIP_IS_E1(bp)) {
13435 struct mac_configuration_cmd *config =
13436 bnx2x_sp(bp, mcast_config);
13438 for (i = 0; i < config->hdr.length; i++)
13439 CAM_INVALIDATE(config->config_table[i]);
13442 /* Free SKBs, SGEs, TPA pool and driver internals */
13443 bnx2x_free_skbs(bp);
13444 for_each_queue(bp, i)
13445 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13446 for_each_queue(bp, i)
13447 netif_napi_del(&bnx2x_fp(bp, i, napi));
13448 bnx2x_free_mem(bp);
13450 bp->state = BNX2X_STATE_CLOSED;
13452 netif_carrier_off(bp->dev);
13457 static void bnx2x_eeh_recover(struct bnx2x *bp)
13461 mutex_init(&bp->port.phy_mutex);
13463 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13464 bp->link_params.shmem_base = bp->common.shmem_base;
13465 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13467 if (!bp->common.shmem_base ||
13468 (bp->common.shmem_base < 0xA0000) ||
13469 (bp->common.shmem_base >= 0xC0000)) {
13470 BNX2X_DEV_INFO("MCP not active\n");
13471 bp->flags |= NO_MCP_FLAG;
13475 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13476 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13477 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13478 BNX2X_ERR("BAD MCP validity signature\n");
13480 if (!BP_NOMCP(bp)) {
13481 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13482 & DRV_MSG_SEQ_NUMBER_MASK);
13483 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13488 * bnx2x_io_error_detected - called when PCI error is detected
13489 * @pdev: Pointer to PCI device
13490 * @state: The current pci connection state
13492 * This function is called after a PCI bus error affecting
13493 * this device has been detected.
13495 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13496 pci_channel_state_t state)
13498 struct net_device *dev = pci_get_drvdata(pdev);
13499 struct bnx2x *bp = netdev_priv(dev);
13503 netif_device_detach(dev);
13505 if (state == pci_channel_io_perm_failure) {
13507 return PCI_ERS_RESULT_DISCONNECT;
13510 if (netif_running(dev))
13511 bnx2x_eeh_nic_unload(bp);
13513 pci_disable_device(pdev);
13517 /* Request a slot reset */
13518 return PCI_ERS_RESULT_NEED_RESET;
13522 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13523 * @pdev: Pointer to PCI device
13525 * Restart the card from scratch, as if from a cold-boot.
13527 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13529 struct net_device *dev = pci_get_drvdata(pdev);
13530 struct bnx2x *bp = netdev_priv(dev);
13534 if (pci_enable_device(pdev)) {
13535 dev_err(&pdev->dev,
13536 "Cannot re-enable PCI device after reset\n");
13538 return PCI_ERS_RESULT_DISCONNECT;
13541 pci_set_master(pdev);
13542 pci_restore_state(pdev);
13544 if (netif_running(dev))
13545 bnx2x_set_power_state(bp, PCI_D0);
13549 return PCI_ERS_RESULT_RECOVERED;
13553 * bnx2x_io_resume - called when traffic can start flowing again
13554 * @pdev: Pointer to PCI device
13556 * This callback is called when the error recovery driver tells us that
13557 * its OK to resume normal operation.
13559 static void bnx2x_io_resume(struct pci_dev *pdev)
13561 struct net_device *dev = pci_get_drvdata(pdev);
13562 struct bnx2x *bp = netdev_priv(dev);
13564 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13565 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13571 bnx2x_eeh_recover(bp);
13573 if (netif_running(dev))
13574 bnx2x_nic_load(bp, LOAD_NORMAL);
13576 netif_device_attach(dev);
13581 static struct pci_error_handlers bnx2x_err_handler = {
13582 .error_detected = bnx2x_io_error_detected,
13583 .slot_reset = bnx2x_io_slot_reset,
13584 .resume = bnx2x_io_resume,
13587 static struct pci_driver bnx2x_pci_driver = {
13588 .name = DRV_MODULE_NAME,
13589 .id_table = bnx2x_pci_tbl,
13590 .probe = bnx2x_init_one,
13591 .remove = __devexit_p(bnx2x_remove_one),
13592 .suspend = bnx2x_suspend,
13593 .resume = bnx2x_resume,
13594 .err_handler = &bnx2x_err_handler,
13597 static int __init bnx2x_init(void)
13601 pr_info("%s", version);
13603 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13604 if (bnx2x_wq == NULL) {
13605 pr_err("Cannot create workqueue\n");
13609 ret = pci_register_driver(&bnx2x_pci_driver);
13611 pr_err("Cannot register driver\n");
13612 destroy_workqueue(bnx2x_wq);
13617 static void __exit bnx2x_cleanup(void)
13619 pci_unregister_driver(&bnx2x_pci_driver);
13621 destroy_workqueue(bnx2x_wq);
13624 module_init(bnx2x_init);
13625 module_exit(bnx2x_cleanup);
13629 /* count denotes the number of new completions we have seen */
13630 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13632 struct eth_spe *spe;
13634 #ifdef BNX2X_STOP_ON_ERROR
13635 if (unlikely(bp->panic))
13639 spin_lock_bh(&bp->spq_lock);
13640 bp->cnic_spq_pending -= count;
13642 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13643 bp->cnic_spq_pending++) {
13645 if (!bp->cnic_kwq_pending)
13648 spe = bnx2x_sp_get_next(bp);
13649 *spe = *bp->cnic_kwq_cons;
13651 bp->cnic_kwq_pending--;
13653 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13654 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13656 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13657 bp->cnic_kwq_cons = bp->cnic_kwq;
13659 bp->cnic_kwq_cons++;
13661 bnx2x_sp_prod_update(bp);
13662 spin_unlock_bh(&bp->spq_lock);
13665 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13666 struct kwqe_16 *kwqes[], u32 count)
13668 struct bnx2x *bp = netdev_priv(dev);
13671 #ifdef BNX2X_STOP_ON_ERROR
13672 if (unlikely(bp->panic))
13676 spin_lock_bh(&bp->spq_lock);
13678 for (i = 0; i < count; i++) {
13679 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13681 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13684 *bp->cnic_kwq_prod = *spe;
13686 bp->cnic_kwq_pending++;
13688 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13689 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13690 spe->data.mac_config_addr.hi,
13691 spe->data.mac_config_addr.lo,
13692 bp->cnic_kwq_pending);
13694 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13695 bp->cnic_kwq_prod = bp->cnic_kwq;
13697 bp->cnic_kwq_prod++;
13700 spin_unlock_bh(&bp->spq_lock);
13702 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13703 bnx2x_cnic_sp_post(bp, 0);
13708 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13710 struct cnic_ops *c_ops;
13713 mutex_lock(&bp->cnic_mutex);
13714 c_ops = bp->cnic_ops;
13716 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13717 mutex_unlock(&bp->cnic_mutex);
13722 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13724 struct cnic_ops *c_ops;
13728 c_ops = rcu_dereference(bp->cnic_ops);
13730 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13737 * for commands that have no data
13739 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13741 struct cnic_ctl_info ctl = {0};
13745 return bnx2x_cnic_ctl_send(bp, &ctl);
13748 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13750 struct cnic_ctl_info ctl;
13752 /* first we tell CNIC and only then we count this as a completion */
13753 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13754 ctl.data.comp.cid = cid;
13756 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13757 bnx2x_cnic_sp_post(bp, 1);
13760 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13762 struct bnx2x *bp = netdev_priv(dev);
13765 switch (ctl->cmd) {
13766 case DRV_CTL_CTXTBL_WR_CMD: {
13767 u32 index = ctl->data.io.offset;
13768 dma_addr_t addr = ctl->data.io.dma_addr;
13770 bnx2x_ilt_wr(bp, index, addr);
13774 case DRV_CTL_COMPLETION_CMD: {
13775 int count = ctl->data.comp.comp_count;
13777 bnx2x_cnic_sp_post(bp, count);
13781 /* rtnl_lock is held. */
13782 case DRV_CTL_START_L2_CMD: {
13783 u32 cli = ctl->data.ring.client_id;
13785 bp->rx_mode_cl_mask |= (1 << cli);
13786 bnx2x_set_storm_rx_mode(bp);
13790 /* rtnl_lock is held. */
13791 case DRV_CTL_STOP_L2_CMD: {
13792 u32 cli = ctl->data.ring.client_id;
13794 bp->rx_mode_cl_mask &= ~(1 << cli);
13795 bnx2x_set_storm_rx_mode(bp);
13800 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13807 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13809 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13811 if (bp->flags & USING_MSIX_FLAG) {
13812 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13813 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13814 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13816 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13817 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13819 cp->irq_arr[0].status_blk = bp->cnic_sb;
13820 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13821 cp->irq_arr[1].status_blk = bp->def_status_blk;
13822 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13827 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13830 struct bnx2x *bp = netdev_priv(dev);
13831 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13836 if (atomic_read(&bp->intr_sem) != 0)
13839 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13843 bp->cnic_kwq_cons = bp->cnic_kwq;
13844 bp->cnic_kwq_prod = bp->cnic_kwq;
13845 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13847 bp->cnic_spq_pending = 0;
13848 bp->cnic_kwq_pending = 0;
13850 bp->cnic_data = data;
13853 cp->drv_state = CNIC_DRV_STATE_REGD;
13855 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13857 bnx2x_setup_cnic_irq_info(bp);
13858 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13859 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13860 rcu_assign_pointer(bp->cnic_ops, ops);
13865 static int bnx2x_unregister_cnic(struct net_device *dev)
13867 struct bnx2x *bp = netdev_priv(dev);
13868 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13870 mutex_lock(&bp->cnic_mutex);
13871 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13872 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13873 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13876 rcu_assign_pointer(bp->cnic_ops, NULL);
13877 mutex_unlock(&bp->cnic_mutex);
13879 kfree(bp->cnic_kwq);
13880 bp->cnic_kwq = NULL;
13885 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13887 struct bnx2x *bp = netdev_priv(dev);
13888 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13890 cp->drv_owner = THIS_MODULE;
13891 cp->chip_id = CHIP_ID(bp);
13892 cp->pdev = bp->pdev;
13893 cp->io_base = bp->regview;
13894 cp->io_base2 = bp->doorbells;
13895 cp->max_kwqe_pending = 8;
13896 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13897 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13898 cp->ctx_tbl_len = CNIC_ILT_LINES;
13899 cp->starting_cid = BCM_CNIC_CID_START;
13900 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13901 cp->drv_ctl = bnx2x_drv_ctl;
13902 cp->drv_register_cnic = bnx2x_register_cnic;
13903 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13907 EXPORT_SYMBOL(bnx2x_cnic_probe);
13909 #endif /* BCM_CNIC */