1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.1-8"
61 #define DRV_MODULE_RELDATE "2010/04/01"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version[] __devinitdata =
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125 static struct workqueue_struct *bnx2x_wq;
127 enum bnx2x_board_type {
133 /* indexed by board_type, above */
136 } board_info[] __devinitdata = {
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
157 * locking is done by mcp
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
179 static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
206 struct dmae_command dmae;
207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
219 memset(&dmae, 0, sizeof(struct dmae_command));
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 DMAE_CMD_ENDIANITY_DW_SWAP |
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251 mutex_lock(&bp->dmae_mutex);
255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
263 BNX2X_ERR("DMAE timeout!\n");
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
274 mutex_unlock(&bp->dmae_mutex);
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 struct dmae_command dmae;
280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
294 memset(&dmae, 0, sizeof(struct dmae_command));
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 DMAE_CMD_ENDIANITY_DW_SWAP |
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323 mutex_lock(&bp->dmae_mutex);
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
332 while (*wb_comp != DMAE_COMP_VAL) {
335 BNX2X_ERR("DMAE timeout!\n");
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349 mutex_unlock(&bp->dmae_mutex);
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
382 REG_RD_DMAE(bp, reg, wb_data, 2);
384 return HILO_U64(wb_data[0], wb_data[1]);
388 static int bnx2x_mc_assert(struct bnx2x *bp)
392 u32 row0, row1, row2, row3;
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
509 static void bnx2x_fw_dump(struct bnx2x *bp)
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516 mark = ((mark + 0x3) & ~0x3);
517 pr_err("begin fw dump (mark 0x%x)\n", mark);
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
525 pr_cont("%s", (char *)data);
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
532 pr_cont("%s", (char *)data);
534 pr_err("end of fw dump\n");
537 static void bnx2x_panic_dump(struct bnx2x *bp)
542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545 BNX2X_ERR("begin crash dump -----------------\n");
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
556 for_each_queue(bp, i) {
557 struct bnx2x_fastpath *fp = &bp->fp[i];
559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
573 for_each_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i];
576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582 fp->status_blk->c_status_block.status_block_index,
583 fp->tx_db.data.prod);
588 for_each_queue(bp, i) {
589 struct bnx2x_fastpath *fp = &bp->fp[i];
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593 for (j = start; j != end; j = RX_BD(j + 1)) {
594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
603 for (j = start; j != end; j = RX_SGE(j + 1)) {
604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
622 for_each_queue(bp, i) {
623 struct bnx2x_fastpath *fp = &bp->fp[i];
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
646 BNX2X_ERR("end crash dump -----------------\n");
649 static void bnx2x_int_enable(struct bnx2x *bp)
651 int port = BP_PORT(bp);
652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 REG_WR(bp, addr, val);
678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
684 REG_WR(bp, addr, val);
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
696 /* enable nig and gpio3 attention */
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 /* Make sure that interrupts are indeed enabled from here on */
709 static void bnx2x_int_disable(struct bnx2x *bp)
711 int port = BP_PORT(bp);
712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 /* flush all outstanding writes */
726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
736 /* disable interrupt handling */
737 atomic_inc(&bp->intr_sem);
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
744 /* make sure all ISRs are done */
746 synchronize_irq(bp->msix_table[0].vector);
751 for_each_queue(bp, i)
752 synchronize_irq(bp->msix_table[i + offset].vector);
754 synchronize_irq(bp->pdev->irq);
756 /* make sure sp_task is not running */
757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
764 * General service functions
767 /* Return true if succeeded to acquire the lock */
768 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771 u32 resource_bit = (1 << resource);
772 int func = BP_FUNC(bp);
773 u32 hw_lock_control_reg;
775 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
777 /* Validating that the resource is within range */
778 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
780 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
781 resource, HW_LOCK_MAX_RESOURCE_VALUE);
786 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
788 hw_lock_control_reg =
789 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
791 /* Try to acquire the lock */
792 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
793 lock_status = REG_RD(bp, hw_lock_control_reg);
794 if (lock_status & resource_bit)
797 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
801 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
802 u8 storm, u16 index, u8 op, u8 update)
804 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
805 COMMAND_REG_INT_ACK);
806 struct igu_ack_register igu_ack;
808 igu_ack.status_block_index = index;
809 igu_ack.sb_id_and_flags =
810 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
811 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
812 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
813 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
815 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
816 (*(u32 *)&igu_ack), hc_addr);
817 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
819 /* Make sure that ACK is written */
824 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
826 struct host_status_block *fpsb = fp->status_blk;
828 barrier(); /* status block is written to by the chip */
829 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
830 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
833 static u16 bnx2x_ack_int(struct bnx2x *bp)
835 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
836 COMMAND_REG_SIMD_MASK);
837 u32 result = REG_RD(bp, hc_addr);
839 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
847 * fast path service functions
850 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
852 /* Tell compiler that consumer and producer can change */
854 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
857 /* free skb in the packet ring at pos idx
858 * return idx of last bd freed
860 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
863 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
864 struct eth_tx_start_bd *tx_start_bd;
865 struct eth_tx_bd *tx_data_bd;
866 struct sk_buff *skb = tx_buf->skb;
867 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
870 /* prefetch skb end pointer to speedup dev_kfree_skb() */
873 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
877 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
878 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
879 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
880 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
882 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
883 #ifdef BNX2X_STOP_ON_ERROR
884 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
885 BNX2X_ERR("BAD nbd!\n");
889 new_cons = nbd + tx_buf->first_bd;
891 /* Get the next bd */
892 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
894 /* Skip a parse bd... */
896 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
898 /* ...and the TSO split header bd since they have no mapping */
899 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
901 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
907 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
908 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
909 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
910 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
912 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
918 tx_buf->first_bd = 0;
924 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
930 prod = fp->tx_bd_prod;
931 cons = fp->tx_bd_cons;
933 /* NUM_TX_RINGS = number of "next-page" entries
934 It will be used as a threshold */
935 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
937 #ifdef BNX2X_STOP_ON_ERROR
939 WARN_ON(used > fp->bp->tx_ring_size);
940 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
943 return (s16)(fp->bp->tx_ring_size) - used;
946 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
950 /* Tell compiler that status block fields can change */
952 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
953 return hw_cons != fp->tx_pkt_cons;
956 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
958 struct bnx2x *bp = fp->bp;
959 struct netdev_queue *txq;
960 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
962 #ifdef BNX2X_STOP_ON_ERROR
963 if (unlikely(bp->panic))
967 txq = netdev_get_tx_queue(bp->dev, fp->index);
968 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
969 sw_cons = fp->tx_pkt_cons;
971 while (sw_cons != hw_cons) {
974 pkt_cons = TX_BD(sw_cons);
976 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
978 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
979 hw_cons, sw_cons, pkt_cons);
981 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
983 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
986 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
990 fp->tx_pkt_cons = sw_cons;
991 fp->tx_bd_cons = bd_cons;
993 /* Need to make the tx_bd_cons update visible to start_xmit()
994 * before checking for netif_tx_queue_stopped(). Without the
995 * memory barrier, there is a small possibility that
996 * start_xmit() will miss it and cause the queue to be stopped
1001 /* TBD need a thresh? */
1002 if (unlikely(netif_tx_queue_stopped(txq))) {
1003 /* Taking tx_lock() is needed to prevent reenabling the queue
1004 * while it's empty. This could have happen if rx_action() gets
1005 * suspended in bnx2x_tx_int() after the condition before
1006 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1008 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1009 * sends some packets consuming the whole queue again->
1013 __netif_tx_lock(txq, smp_processor_id());
1015 if ((netif_tx_queue_stopped(txq)) &&
1016 (bp->state == BNX2X_STATE_OPEN) &&
1017 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1018 netif_tx_wake_queue(txq);
1020 __netif_tx_unlock(txq);
1026 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1029 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1030 union eth_rx_cqe *rr_cqe)
1032 struct bnx2x *bp = fp->bp;
1033 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1034 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1037 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1038 fp->index, cid, command, bp->state,
1039 rr_cqe->ramrod_cqe.ramrod_type);
1044 switch (command | fp->state) {
1045 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1046 BNX2X_FP_STATE_OPENING):
1047 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1049 fp->state = BNX2X_FP_STATE_OPEN;
1052 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1053 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1055 fp->state = BNX2X_FP_STATE_HALTED;
1059 BNX2X_ERR("unexpected MC reply (%d) "
1060 "fp->state is %x\n", command, fp->state);
1063 mb(); /* force bnx2x_wait_ramrod() to see the change */
1067 switch (command | bp->state) {
1068 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1069 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1070 bp->state = BNX2X_STATE_OPEN;
1073 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1074 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1075 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1076 fp->state = BNX2X_FP_STATE_HALTED;
1079 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1080 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1081 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1085 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1086 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1087 bnx2x_cnic_cfc_comp(bp, cid);
1091 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1092 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1093 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1094 bp->set_mac_pending--;
1098 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1099 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1100 bp->set_mac_pending--;
1105 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1106 command, bp->state);
1109 mb(); /* force bnx2x_wait_ramrod() to see the change */
1112 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1113 struct bnx2x_fastpath *fp, u16 index)
1115 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1116 struct page *page = sw_buf->page;
1117 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1119 /* Skip "next page" elements */
1123 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1124 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1125 __free_pages(page, PAGES_PER_SGE_SHIFT);
1127 sw_buf->page = NULL;
1132 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1133 struct bnx2x_fastpath *fp, int last)
1137 for (i = 0; i < last; i++)
1138 bnx2x_free_rx_sge(bp, fp, i);
1141 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, u16 index)
1144 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1145 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1146 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1149 if (unlikely(page == NULL))
1152 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1153 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1154 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1155 __free_pages(page, PAGES_PER_SGE_SHIFT);
1159 sw_buf->page = page;
1160 dma_unmap_addr_set(sw_buf, mapping, mapping);
1162 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1163 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1168 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1169 struct bnx2x_fastpath *fp, u16 index)
1171 struct sk_buff *skb;
1172 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1173 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1176 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1177 if (unlikely(skb == NULL))
1180 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1182 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1188 dma_unmap_addr_set(rx_buf, mapping, mapping);
1190 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1191 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1196 /* note that we are not allocating a new skb,
1197 * we are just moving one from cons to prod
1198 * we are not creating a new mapping,
1199 * so there is no need to check for dma_mapping_error().
1201 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1202 struct sk_buff *skb, u16 cons, u16 prod)
1204 struct bnx2x *bp = fp->bp;
1205 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1206 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1207 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1208 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1210 dma_sync_single_for_device(&bp->pdev->dev,
1211 dma_unmap_addr(cons_rx_buf, mapping),
1212 RX_COPY_THRESH, DMA_FROM_DEVICE);
1214 prod_rx_buf->skb = cons_rx_buf->skb;
1215 dma_unmap_addr_set(prod_rx_buf, mapping,
1216 dma_unmap_addr(cons_rx_buf, mapping));
1217 *prod_bd = *cons_bd;
1220 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1223 u16 last_max = fp->last_max_sge;
1225 if (SUB_S16(idx, last_max) > 0)
1226 fp->last_max_sge = idx;
1229 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1233 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1234 int idx = RX_SGE_CNT * i - 1;
1236 for (j = 0; j < 2; j++) {
1237 SGE_MASK_CLEAR_BIT(fp, idx);
1243 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1244 struct eth_fast_path_rx_cqe *fp_cqe)
1246 struct bnx2x *bp = fp->bp;
1247 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1248 le16_to_cpu(fp_cqe->len_on_bd)) >>
1250 u16 last_max, last_elem, first_elem;
1257 /* First mark all used pages */
1258 for (i = 0; i < sge_len; i++)
1259 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1261 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1262 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1264 /* Here we assume that the last SGE index is the biggest */
1265 prefetch((void *)(fp->sge_mask));
1266 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1268 last_max = RX_SGE(fp->last_max_sge);
1269 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1270 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1272 /* If ring is not full */
1273 if (last_elem + 1 != first_elem)
1276 /* Now update the prod */
1277 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1278 if (likely(fp->sge_mask[i]))
1281 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1282 delta += RX_SGE_MASK_ELEM_SZ;
1286 fp->rx_sge_prod += delta;
1287 /* clear page-end entries */
1288 bnx2x_clear_sge_mask_next_elems(fp);
1291 DP(NETIF_MSG_RX_STATUS,
1292 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1293 fp->last_max_sge, fp->rx_sge_prod);
1296 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1298 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1299 memset(fp->sge_mask, 0xff,
1300 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1302 /* Clear the two last indices in the page to 1:
1303 these are the indices that correspond to the "next" element,
1304 hence will never be indicated and should be removed from
1305 the calculations. */
1306 bnx2x_clear_sge_mask_next_elems(fp);
1309 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1310 struct sk_buff *skb, u16 cons, u16 prod)
1312 struct bnx2x *bp = fp->bp;
1313 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1314 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1315 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1318 /* move empty skb from pool to prod and map it */
1319 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1320 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1321 bp->rx_buf_size, DMA_FROM_DEVICE);
1322 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1324 /* move partial skb from cons to pool (don't unmap yet) */
1325 fp->tpa_pool[queue] = *cons_rx_buf;
1327 /* mark bin state as start - print error if current state != stop */
1328 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1331 fp->tpa_state[queue] = BNX2X_TPA_START;
1333 /* point prod_bd to new skb */
1334 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1335 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1337 #ifdef BNX2X_STOP_ON_ERROR
1338 fp->tpa_queue_used |= (1 << queue);
1339 #ifdef __powerpc64__
1340 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1342 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1344 fp->tpa_queue_used);
1348 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1349 struct sk_buff *skb,
1350 struct eth_fast_path_rx_cqe *fp_cqe,
1353 struct sw_rx_page *rx_pg, old_rx_pg;
1354 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1355 u32 i, frag_len, frag_size, pages;
1359 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1360 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1362 /* This is needed in order to enable forwarding support */
1364 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1365 max(frag_size, (u32)len_on_bd));
1367 #ifdef BNX2X_STOP_ON_ERROR
1369 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1370 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1372 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1373 fp_cqe->pkt_len, len_on_bd);
1379 /* Run through the SGL and compose the fragmented skb */
1380 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1381 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1383 /* FW gives the indices of the SGE as if the ring is an array
1384 (meaning that "next" element will consume 2 indices) */
1385 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1386 rx_pg = &fp->rx_page_ring[sge_idx];
1389 /* If we fail to allocate a substitute page, we simply stop
1390 where we are and drop the whole packet */
1391 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1392 if (unlikely(err)) {
1393 fp->eth_q_stats.rx_skb_alloc_failed++;
1397 /* Unmap the page as we r going to pass it to the stack */
1398 dma_unmap_page(&bp->pdev->dev,
1399 dma_unmap_addr(&old_rx_pg, mapping),
1400 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1402 /* Add one frag and update the appropriate fields in the skb */
1403 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1405 skb->data_len += frag_len;
1406 skb->truesize += frag_len;
1407 skb->len += frag_len;
1409 frag_size -= frag_len;
1415 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1416 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1419 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1420 struct sk_buff *skb = rx_buf->skb;
1422 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1424 /* Unmap skb in the pool anyway, as we are going to change
1425 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1427 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1428 bp->rx_buf_size, DMA_FROM_DEVICE);
1430 if (likely(new_skb)) {
1431 /* fix ip xsum and give it to the stack */
1432 /* (no need to map the new skb) */
1435 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1436 PARSING_FLAGS_VLAN);
1437 int is_not_hwaccel_vlan_cqe =
1438 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1442 prefetch(((char *)(skb)) + 128);
1444 #ifdef BNX2X_STOP_ON_ERROR
1445 if (pad + len > bp->rx_buf_size) {
1446 BNX2X_ERR("skb_put is about to fail... "
1447 "pad %d len %d rx_buf_size %d\n",
1448 pad, len, bp->rx_buf_size);
1454 skb_reserve(skb, pad);
1457 skb->protocol = eth_type_trans(skb, bp->dev);
1458 skb->ip_summed = CHECKSUM_UNNECESSARY;
1463 iph = (struct iphdr *)skb->data;
1465 /* If there is no Rx VLAN offloading -
1466 take VLAN tag into an account */
1467 if (unlikely(is_not_hwaccel_vlan_cqe))
1468 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1471 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1474 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1475 &cqe->fast_path_cqe, cqe_idx)) {
1477 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1478 (!is_not_hwaccel_vlan_cqe))
1479 vlan_gro_receive(&fp->napi, bp->vlgrp,
1480 le16_to_cpu(cqe->fast_path_cqe.
1484 napi_gro_receive(&fp->napi, skb);
1486 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1487 " - dropping packet!\n");
1492 /* put new skb in bin */
1493 fp->tpa_pool[queue].skb = new_skb;
1496 /* else drop the packet and keep the buffer in the bin */
1497 DP(NETIF_MSG_RX_STATUS,
1498 "Failed to allocate new skb - dropping packet!\n");
1499 fp->eth_q_stats.rx_skb_alloc_failed++;
1502 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1505 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1506 struct bnx2x_fastpath *fp,
1507 u16 bd_prod, u16 rx_comp_prod,
1510 struct ustorm_eth_rx_producers rx_prods = {0};
1513 /* Update producers */
1514 rx_prods.bd_prod = bd_prod;
1515 rx_prods.cqe_prod = rx_comp_prod;
1516 rx_prods.sge_prod = rx_sge_prod;
1519 * Make sure that the BD and SGE data is updated before updating the
1520 * producers since FW might read the BD/SGE right after the producer
1522 * This is only applicable for weak-ordered memory model archs such
1523 * as IA-64. The following barrier is also mandatory since FW will
1524 * assumes BDs must have buffers.
1528 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1529 REG_WR(bp, BAR_USTRORM_INTMEM +
1530 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1531 ((u32 *)&rx_prods)[i]);
1533 mmiowb(); /* keep prod updates ordered */
1535 DP(NETIF_MSG_RX_STATUS,
1536 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1537 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1540 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1542 struct bnx2x *bp = fp->bp;
1543 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1544 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1547 #ifdef BNX2X_STOP_ON_ERROR
1548 if (unlikely(bp->panic))
1552 /* CQ "next element" is of the size of the regular element,
1553 that's why it's ok here */
1554 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1555 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1558 bd_cons = fp->rx_bd_cons;
1559 bd_prod = fp->rx_bd_prod;
1560 bd_prod_fw = bd_prod;
1561 sw_comp_cons = fp->rx_comp_cons;
1562 sw_comp_prod = fp->rx_comp_prod;
1564 /* Memory barrier necessary as speculative reads of the rx
1565 * buffer can be ahead of the index in the status block
1569 DP(NETIF_MSG_RX_STATUS,
1570 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1571 fp->index, hw_comp_cons, sw_comp_cons);
1573 while (sw_comp_cons != hw_comp_cons) {
1574 struct sw_rx_bd *rx_buf = NULL;
1575 struct sk_buff *skb;
1576 union eth_rx_cqe *cqe;
1580 comp_ring_cons = RCQ_BD(sw_comp_cons);
1581 bd_prod = RX_BD(bd_prod);
1582 bd_cons = RX_BD(bd_cons);
1584 /* Prefetch the page containing the BD descriptor
1585 at producer's index. It will be needed when new skb is
1587 prefetch((void *)(PAGE_ALIGN((unsigned long)
1588 (&fp->rx_desc_ring[bd_prod])) -
1591 cqe = &fp->rx_comp_ring[comp_ring_cons];
1592 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1594 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1595 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1596 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1597 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1598 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1599 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1601 /* is this a slowpath msg? */
1602 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1603 bnx2x_sp_event(fp, cqe);
1606 /* this is an rx packet */
1608 rx_buf = &fp->rx_buf_ring[bd_cons];
1611 prefetch((u8 *)skb + 256);
1612 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1613 pad = cqe->fast_path_cqe.placement_offset;
1615 /* If CQE is marked both TPA_START and TPA_END
1616 it is a non-TPA CQE */
1617 if ((!fp->disable_tpa) &&
1618 (TPA_TYPE(cqe_fp_flags) !=
1619 (TPA_TYPE_START | TPA_TYPE_END))) {
1620 u16 queue = cqe->fast_path_cqe.queue_index;
1622 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1623 DP(NETIF_MSG_RX_STATUS,
1624 "calling tpa_start on queue %d\n",
1627 bnx2x_tpa_start(fp, queue, skb,
1632 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1633 DP(NETIF_MSG_RX_STATUS,
1634 "calling tpa_stop on queue %d\n",
1637 if (!BNX2X_RX_SUM_FIX(cqe))
1638 BNX2X_ERR("STOP on none TCP "
1641 /* This is a size of the linear data
1643 len = le16_to_cpu(cqe->fast_path_cqe.
1645 bnx2x_tpa_stop(bp, fp, queue, pad,
1646 len, cqe, comp_ring_cons);
1647 #ifdef BNX2X_STOP_ON_ERROR
1652 bnx2x_update_sge_prod(fp,
1653 &cqe->fast_path_cqe);
1658 dma_sync_single_for_device(&bp->pdev->dev,
1659 dma_unmap_addr(rx_buf, mapping),
1660 pad + RX_COPY_THRESH,
1663 prefetch(((char *)(skb)) + 128);
1665 /* is this an error packet? */
1666 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1667 DP(NETIF_MSG_RX_ERR,
1668 "ERROR flags %x rx packet %u\n",
1669 cqe_fp_flags, sw_comp_cons);
1670 fp->eth_q_stats.rx_err_discard_pkt++;
1674 /* Since we don't have a jumbo ring
1675 * copy small packets if mtu > 1500
1677 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1678 (len <= RX_COPY_THRESH)) {
1679 struct sk_buff *new_skb;
1681 new_skb = netdev_alloc_skb(bp->dev,
1683 if (new_skb == NULL) {
1684 DP(NETIF_MSG_RX_ERR,
1685 "ERROR packet dropped "
1686 "because of alloc failure\n");
1687 fp->eth_q_stats.rx_skb_alloc_failed++;
1692 skb_copy_from_linear_data_offset(skb, pad,
1693 new_skb->data + pad, len);
1694 skb_reserve(new_skb, pad);
1695 skb_put(new_skb, len);
1697 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1702 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1703 dma_unmap_single(&bp->pdev->dev,
1704 dma_unmap_addr(rx_buf, mapping),
1707 skb_reserve(skb, pad);
1711 DP(NETIF_MSG_RX_ERR,
1712 "ERROR packet dropped because "
1713 "of alloc failure\n");
1714 fp->eth_q_stats.rx_skb_alloc_failed++;
1716 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1720 skb->protocol = eth_type_trans(skb, bp->dev);
1722 skb->ip_summed = CHECKSUM_NONE;
1724 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1725 skb->ip_summed = CHECKSUM_UNNECESSARY;
1727 fp->eth_q_stats.hw_csum_err++;
1731 skb_record_rx_queue(skb, fp->index);
1734 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1735 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1736 PARSING_FLAGS_VLAN))
1737 vlan_gro_receive(&fp->napi, bp->vlgrp,
1738 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1741 napi_gro_receive(&fp->napi, skb);
1747 bd_cons = NEXT_RX_IDX(bd_cons);
1748 bd_prod = NEXT_RX_IDX(bd_prod);
1749 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1752 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1753 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1755 if (rx_pkt == budget)
1759 fp->rx_bd_cons = bd_cons;
1760 fp->rx_bd_prod = bd_prod_fw;
1761 fp->rx_comp_cons = sw_comp_cons;
1762 fp->rx_comp_prod = sw_comp_prod;
1764 /* Update producers */
1765 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1768 fp->rx_pkt += rx_pkt;
1774 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1776 struct bnx2x_fastpath *fp = fp_cookie;
1777 struct bnx2x *bp = fp->bp;
1779 /* Return here if interrupt is disabled */
1780 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1781 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1785 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1786 fp->index, fp->sb_id);
1787 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1789 #ifdef BNX2X_STOP_ON_ERROR
1790 if (unlikely(bp->panic))
1794 /* Handle Rx and Tx according to MSI-X vector */
1795 prefetch(fp->rx_cons_sb);
1796 prefetch(fp->tx_cons_sb);
1797 prefetch(&fp->status_blk->u_status_block.status_block_index);
1798 prefetch(&fp->status_blk->c_status_block.status_block_index);
1799 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1804 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1806 struct bnx2x *bp = netdev_priv(dev_instance);
1807 u16 status = bnx2x_ack_int(bp);
1811 /* Return here if interrupt is shared and it's not for us */
1812 if (unlikely(status == 0)) {
1813 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1816 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1818 /* Return here if interrupt is disabled */
1819 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1820 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1824 #ifdef BNX2X_STOP_ON_ERROR
1825 if (unlikely(bp->panic))
1829 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1830 struct bnx2x_fastpath *fp = &bp->fp[i];
1832 mask = 0x2 << fp->sb_id;
1833 if (status & mask) {
1834 /* Handle Rx and Tx according to SB id */
1835 prefetch(fp->rx_cons_sb);
1836 prefetch(&fp->status_blk->u_status_block.
1837 status_block_index);
1838 prefetch(fp->tx_cons_sb);
1839 prefetch(&fp->status_blk->c_status_block.
1840 status_block_index);
1841 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1847 mask = 0x2 << CNIC_SB_ID(bp);
1848 if (status & (mask | 0x1)) {
1849 struct cnic_ops *c_ops = NULL;
1852 c_ops = rcu_dereference(bp->cnic_ops);
1854 c_ops->cnic_handler(bp->cnic_data, NULL);
1861 if (unlikely(status & 0x1)) {
1862 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1870 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1876 /* end of fast path */
1878 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1883 * General service functions
1886 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1889 u32 resource_bit = (1 << resource);
1890 int func = BP_FUNC(bp);
1891 u32 hw_lock_control_reg;
1894 /* Validating that the resource is within range */
1895 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1897 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1898 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1903 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1905 hw_lock_control_reg =
1906 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1909 /* Validating that the resource is not already taken */
1910 lock_status = REG_RD(bp, hw_lock_control_reg);
1911 if (lock_status & resource_bit) {
1912 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1913 lock_status, resource_bit);
1917 /* Try for 5 second every 5ms */
1918 for (cnt = 0; cnt < 1000; cnt++) {
1919 /* Try to acquire the lock */
1920 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1921 lock_status = REG_RD(bp, hw_lock_control_reg);
1922 if (lock_status & resource_bit)
1927 DP(NETIF_MSG_HW, "Timeout\n");
1931 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1934 u32 resource_bit = (1 << resource);
1935 int func = BP_FUNC(bp);
1936 u32 hw_lock_control_reg;
1938 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1940 /* Validating that the resource is within range */
1941 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1943 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1944 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1949 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1951 hw_lock_control_reg =
1952 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1955 /* Validating that the resource is currently taken */
1956 lock_status = REG_RD(bp, hw_lock_control_reg);
1957 if (!(lock_status & resource_bit)) {
1958 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1959 lock_status, resource_bit);
1963 REG_WR(bp, hw_lock_control_reg, resource_bit);
1967 /* HW Lock for shared dual port PHYs */
1968 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1970 mutex_lock(&bp->port.phy_mutex);
1972 if (bp->port.need_hw_lock)
1973 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1976 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1978 if (bp->port.need_hw_lock)
1979 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1981 mutex_unlock(&bp->port.phy_mutex);
1984 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1986 /* The GPIO should be swapped if swap register is set and active */
1987 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1988 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1989 int gpio_shift = gpio_num +
1990 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1991 u32 gpio_mask = (1 << gpio_shift);
1995 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1996 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000 /* read GPIO value */
2001 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2003 /* get the requested pin value */
2004 if ((gpio_reg & gpio_mask) == gpio_mask)
2009 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2014 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2016 /* The GPIO should be swapped if swap register is set and active */
2017 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2018 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2019 int gpio_shift = gpio_num +
2020 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2021 u32 gpio_mask = (1 << gpio_shift);
2024 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2025 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2029 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030 /* read GPIO and mask except the float bits */
2031 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2034 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2035 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2036 gpio_num, gpio_shift);
2037 /* clear FLOAT and set CLR */
2038 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2039 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2042 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2043 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2044 gpio_num, gpio_shift);
2045 /* clear FLOAT and set SET */
2046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2050 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2051 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2052 gpio_num, gpio_shift);
2054 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2061 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2062 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2067 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2069 /* The GPIO should be swapped if swap register is set and active */
2070 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2071 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2072 int gpio_shift = gpio_num +
2073 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2074 u32 gpio_mask = (1 << gpio_shift);
2077 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2078 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2082 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2084 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2087 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2088 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2089 "output low\n", gpio_num, gpio_shift);
2090 /* clear SET and set CLR */
2091 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2092 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2095 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2096 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2097 "output high\n", gpio_num, gpio_shift);
2098 /* clear CLR and set SET */
2099 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2100 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2107 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2108 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2113 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2115 u32 spio_mask = (1 << spio_num);
2118 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2119 (spio_num > MISC_REGISTERS_SPIO_7)) {
2120 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2124 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2125 /* read SPIO and mask except the float bits */
2126 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2129 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2130 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2131 /* clear FLOAT and set CLR */
2132 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2133 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2136 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2137 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2138 /* clear FLOAT and set SET */
2139 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2140 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2143 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2144 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2146 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2153 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2154 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2159 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2161 switch (bp->link_vars.ieee_fc &
2162 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2163 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2164 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2168 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2169 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2173 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2174 bp->port.advertising |= ADVERTISED_Asym_Pause;
2178 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2184 static void bnx2x_link_report(struct bnx2x *bp)
2186 if (bp->flags & MF_FUNC_DIS) {
2187 netif_carrier_off(bp->dev);
2188 netdev_err(bp->dev, "NIC Link is Down\n");
2192 if (bp->link_vars.link_up) {
2195 if (bp->state == BNX2X_STATE_OPEN)
2196 netif_carrier_on(bp->dev);
2197 netdev_info(bp->dev, "NIC Link is Up, ");
2199 line_speed = bp->link_vars.line_speed;
2204 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2205 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2206 if (vn_max_rate < line_speed)
2207 line_speed = vn_max_rate;
2209 pr_cont("%d Mbps ", line_speed);
2211 if (bp->link_vars.duplex == DUPLEX_FULL)
2212 pr_cont("full duplex");
2214 pr_cont("half duplex");
2216 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2217 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2218 pr_cont(", receive ");
2219 if (bp->link_vars.flow_ctrl &
2221 pr_cont("& transmit ");
2223 pr_cont(", transmit ");
2225 pr_cont("flow control ON");
2229 } else { /* link_down */
2230 netif_carrier_off(bp->dev);
2231 netdev_err(bp->dev, "NIC Link is Down\n");
2235 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2237 if (!BP_NOMCP(bp)) {
2240 /* Initialize link parameters structure variables */
2241 /* It is recommended to turn off RX FC for jumbo frames
2242 for better performance */
2243 if (bp->dev->mtu > 5000)
2244 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2246 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2248 bnx2x_acquire_phy_lock(bp);
2250 if (load_mode == LOAD_DIAG)
2251 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2253 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2255 bnx2x_release_phy_lock(bp);
2257 bnx2x_calc_fc_adv(bp);
2259 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2260 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2261 bnx2x_link_report(bp);
2266 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2270 static void bnx2x_link_set(struct bnx2x *bp)
2272 if (!BP_NOMCP(bp)) {
2273 bnx2x_acquire_phy_lock(bp);
2274 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2275 bnx2x_release_phy_lock(bp);
2277 bnx2x_calc_fc_adv(bp);
2279 BNX2X_ERR("Bootcode is missing - can not set link\n");
2282 static void bnx2x__link_reset(struct bnx2x *bp)
2284 if (!BP_NOMCP(bp)) {
2285 bnx2x_acquire_phy_lock(bp);
2286 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2287 bnx2x_release_phy_lock(bp);
2289 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2292 static u8 bnx2x_link_test(struct bnx2x *bp)
2296 bnx2x_acquire_phy_lock(bp);
2297 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2298 bnx2x_release_phy_lock(bp);
2303 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2305 u32 r_param = bp->link_vars.line_speed / 8;
2306 u32 fair_periodic_timeout_usec;
2309 memset(&(bp->cmng.rs_vars), 0,
2310 sizeof(struct rate_shaping_vars_per_port));
2311 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2313 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2314 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2316 /* this is the threshold below which no timer arming will occur
2317 1.25 coefficient is for the threshold to be a little bigger
2318 than the real time, to compensate for timer in-accuracy */
2319 bp->cmng.rs_vars.rs_threshold =
2320 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2322 /* resolution of fairness timer */
2323 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2324 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2325 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2327 /* this is the threshold below which we won't arm the timer anymore */
2328 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2330 /* we multiply by 1e3/8 to get bytes/msec.
2331 We don't want the credits to pass a credit
2332 of the t_fair*FAIR_MEM (algorithm resolution) */
2333 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2334 /* since each tick is 4 usec */
2335 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2338 /* Calculates the sum of vn_min_rates.
2339 It's needed for further normalizing of the min_rates.
2341 sum of vn_min_rates.
2343 0 - if all the min_rates are 0.
2344 In the later case fainess algorithm should be deactivated.
2345 If not all min_rates are zero then those that are zeroes will be set to 1.
2347 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2350 int port = BP_PORT(bp);
2353 bp->vn_weight_sum = 0;
2354 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2355 int func = 2*vn + port;
2356 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2357 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2358 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2360 /* Skip hidden vns */
2361 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2364 /* If min rate is zero - set it to 1 */
2366 vn_min_rate = DEF_MIN_RATE;
2370 bp->vn_weight_sum += vn_min_rate;
2373 /* ... only if all min rates are zeros - disable fairness */
2375 bp->cmng.flags.cmng_enables &=
2376 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2377 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2378 " fairness will be disabled\n");
2380 bp->cmng.flags.cmng_enables |=
2381 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2384 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2386 struct rate_shaping_vars_per_vn m_rs_vn;
2387 struct fairness_vars_per_vn m_fair_vn;
2388 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2389 u16 vn_min_rate, vn_max_rate;
2392 /* If function is hidden - set min and max to zeroes */
2393 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2398 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2399 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2400 /* If min rate is zero - set it to 1 */
2402 vn_min_rate = DEF_MIN_RATE;
2403 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2404 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2407 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2408 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2410 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2411 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2413 /* global vn counter - maximal Mbps for this vn */
2414 m_rs_vn.vn_counter.rate = vn_max_rate;
2416 /* quota - number of bytes transmitted in this period */
2417 m_rs_vn.vn_counter.quota =
2418 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2420 if (bp->vn_weight_sum) {
2421 /* credit for each period of the fairness algorithm:
2422 number of bytes in T_FAIR (the vn share the port rate).
2423 vn_weight_sum should not be larger than 10000, thus
2424 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2426 m_fair_vn.vn_credit_delta =
2427 max((u32)(vn_min_rate * (T_FAIR_COEF /
2428 (8 * bp->vn_weight_sum))),
2429 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2430 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2431 m_fair_vn.vn_credit_delta);
2434 /* Store it to internal memory */
2435 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2436 REG_WR(bp, BAR_XSTRORM_INTMEM +
2437 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2438 ((u32 *)(&m_rs_vn))[i]);
2440 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2441 REG_WR(bp, BAR_XSTRORM_INTMEM +
2442 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2443 ((u32 *)(&m_fair_vn))[i]);
2447 /* This function is called upon link interrupt */
2448 static void bnx2x_link_attn(struct bnx2x *bp)
2450 /* Make sure that we are synced with the current statistics */
2451 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2453 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2455 if (bp->link_vars.link_up) {
2457 /* dropless flow control */
2458 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2459 int port = BP_PORT(bp);
2460 u32 pause_enabled = 0;
2462 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2465 REG_WR(bp, BAR_USTRORM_INTMEM +
2466 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2470 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2471 struct host_port_stats *pstats;
2473 pstats = bnx2x_sp(bp, port_stats);
2474 /* reset old bmac stats */
2475 memset(&(pstats->mac_stx[0]), 0,
2476 sizeof(struct mac_stx));
2478 if (bp->state == BNX2X_STATE_OPEN)
2479 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2482 /* indicate link status */
2483 bnx2x_link_report(bp);
2486 int port = BP_PORT(bp);
2490 /* Set the attention towards other drivers on the same port */
2491 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2492 if (vn == BP_E1HVN(bp))
2495 func = ((vn << 1) | port);
2496 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2497 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2500 if (bp->link_vars.link_up) {
2503 /* Init rate shaping and fairness contexts */
2504 bnx2x_init_port_minmax(bp);
2506 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2507 bnx2x_init_vn_minmax(bp, 2*vn + port);
2509 /* Store it to internal memory */
2511 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2512 REG_WR(bp, BAR_XSTRORM_INTMEM +
2513 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2514 ((u32 *)(&bp->cmng))[i]);
2519 static void bnx2x__link_status_update(struct bnx2x *bp)
2521 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2524 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2526 if (bp->link_vars.link_up)
2527 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2529 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2531 bnx2x_calc_vn_weight_sum(bp);
2533 /* indicate link status */
2534 bnx2x_link_report(bp);
2537 static void bnx2x_pmf_update(struct bnx2x *bp)
2539 int port = BP_PORT(bp);
2543 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2545 /* enable nig attention */
2546 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2547 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2548 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2550 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2558 * General service functions
2561 /* send the MCP a request, block until there is a reply */
2562 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2564 int func = BP_FUNC(bp);
2565 u32 seq = ++bp->fw_seq;
2568 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2570 mutex_lock(&bp->fw_mb_mutex);
2571 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2572 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2575 /* let the FW do it's magic ... */
2578 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2580 /* Give the FW up to 5 second (500*10ms) */
2581 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2583 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2584 cnt*delay, rc, seq);
2586 /* is this a reply to our command? */
2587 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2588 rc &= FW_MSG_CODE_MASK;
2591 BNX2X_ERR("FW failed to respond!\n");
2595 mutex_unlock(&bp->fw_mb_mutex);
2600 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2601 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2602 static void bnx2x_set_rx_mode(struct net_device *dev);
2604 static void bnx2x_e1h_disable(struct bnx2x *bp)
2606 int port = BP_PORT(bp);
2608 netif_tx_disable(bp->dev);
2610 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2612 netif_carrier_off(bp->dev);
2615 static void bnx2x_e1h_enable(struct bnx2x *bp)
2617 int port = BP_PORT(bp);
2619 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2621 /* Tx queue should be only reenabled */
2622 netif_tx_wake_all_queues(bp->dev);
2625 * Should not call netif_carrier_on since it will be called if the link
2626 * is up when checking for link state
2630 static void bnx2x_update_min_max(struct bnx2x *bp)
2632 int port = BP_PORT(bp);
2635 /* Init rate shaping and fairness contexts */
2636 bnx2x_init_port_minmax(bp);
2638 bnx2x_calc_vn_weight_sum(bp);
2640 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2641 bnx2x_init_vn_minmax(bp, 2*vn + port);
2646 /* Set the attention towards other drivers on the same port */
2647 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2648 if (vn == BP_E1HVN(bp))
2651 func = ((vn << 1) | port);
2652 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2653 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2656 /* Store it to internal memory */
2657 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2658 REG_WR(bp, BAR_XSTRORM_INTMEM +
2659 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2660 ((u32 *)(&bp->cmng))[i]);
2664 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2666 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2668 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2671 * This is the only place besides the function initialization
2672 * where the bp->flags can change so it is done without any
2675 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2676 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2677 bp->flags |= MF_FUNC_DIS;
2679 bnx2x_e1h_disable(bp);
2681 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2682 bp->flags &= ~MF_FUNC_DIS;
2684 bnx2x_e1h_enable(bp);
2686 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2688 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2690 bnx2x_update_min_max(bp);
2691 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2694 /* Report results to MCP */
2696 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2698 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2701 /* must be called under the spq lock */
2702 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2704 struct eth_spe *next_spe = bp->spq_prod_bd;
2706 if (bp->spq_prod_bd == bp->spq_last_bd) {
2707 bp->spq_prod_bd = bp->spq;
2708 bp->spq_prod_idx = 0;
2709 DP(NETIF_MSG_TIMER, "end of spq\n");
2717 /* must be called under the spq lock */
2718 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2720 int func = BP_FUNC(bp);
2722 /* Make sure that BD data is updated before writing the producer */
2725 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2730 /* the slow path queue is odd since completions arrive on the fastpath ring */
2731 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2732 u32 data_hi, u32 data_lo, int common)
2734 struct eth_spe *spe;
2736 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2737 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2738 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2739 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2740 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2742 #ifdef BNX2X_STOP_ON_ERROR
2743 if (unlikely(bp->panic))
2747 spin_lock_bh(&bp->spq_lock);
2749 if (!bp->spq_left) {
2750 BNX2X_ERR("BUG! SPQ ring full!\n");
2751 spin_unlock_bh(&bp->spq_lock);
2756 spe = bnx2x_sp_get_next(bp);
2758 /* CID needs port number to be encoded int it */
2759 spe->hdr.conn_and_cmd_data =
2760 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2762 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2765 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2767 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2768 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2772 bnx2x_sp_prod_update(bp);
2773 spin_unlock_bh(&bp->spq_lock);
2777 /* acquire split MCP access lock register */
2778 static int bnx2x_acquire_alr(struct bnx2x *bp)
2784 for (j = 0; j < 1000; j++) {
2786 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2787 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2788 if (val & (1L << 31))
2793 if (!(val & (1L << 31))) {
2794 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2801 /* release split MCP access lock register */
2802 static void bnx2x_release_alr(struct bnx2x *bp)
2804 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2807 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2809 struct host_def_status_block *def_sb = bp->def_status_blk;
2812 barrier(); /* status block is written to by the chip */
2813 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2814 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2817 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2818 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2821 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2822 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2825 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2826 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2829 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2830 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2837 * slow path service functions
2840 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2842 int port = BP_PORT(bp);
2843 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2844 COMMAND_REG_ATTN_BITS_SET);
2845 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2846 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2847 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2848 NIG_REG_MASK_INTERRUPT_PORT0;
2852 if (bp->attn_state & asserted)
2853 BNX2X_ERR("IGU ERROR\n");
2855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2856 aeu_mask = REG_RD(bp, aeu_addr);
2858 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2859 aeu_mask, asserted);
2860 aeu_mask &= ~(asserted & 0x3ff);
2861 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2863 REG_WR(bp, aeu_addr, aeu_mask);
2864 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2866 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2867 bp->attn_state |= asserted;
2868 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2870 if (asserted & ATTN_HARD_WIRED_MASK) {
2871 if (asserted & ATTN_NIG_FOR_FUNC) {
2873 bnx2x_acquire_phy_lock(bp);
2875 /* save nig interrupt mask */
2876 nig_mask = REG_RD(bp, nig_int_mask_addr);
2877 REG_WR(bp, nig_int_mask_addr, 0);
2879 bnx2x_link_attn(bp);
2881 /* handle unicore attn? */
2883 if (asserted & ATTN_SW_TIMER_4_FUNC)
2884 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2886 if (asserted & GPIO_2_FUNC)
2887 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2889 if (asserted & GPIO_3_FUNC)
2890 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2892 if (asserted & GPIO_4_FUNC)
2893 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2896 if (asserted & ATTN_GENERAL_ATTN_1) {
2897 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2898 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2900 if (asserted & ATTN_GENERAL_ATTN_2) {
2901 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2902 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2904 if (asserted & ATTN_GENERAL_ATTN_3) {
2905 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2906 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2909 if (asserted & ATTN_GENERAL_ATTN_4) {
2910 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2911 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2913 if (asserted & ATTN_GENERAL_ATTN_5) {
2914 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2915 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2917 if (asserted & ATTN_GENERAL_ATTN_6) {
2918 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2919 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2923 } /* if hardwired */
2925 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2927 REG_WR(bp, hc_addr, asserted);
2929 /* now set back the mask */
2930 if (asserted & ATTN_NIG_FOR_FUNC) {
2931 REG_WR(bp, nig_int_mask_addr, nig_mask);
2932 bnx2x_release_phy_lock(bp);
2936 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2938 int port = BP_PORT(bp);
2940 /* mark the failure */
2941 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2942 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2943 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2944 bp->link_params.ext_phy_config);
2946 /* log the failure */
2947 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2948 "Please contact Dell Support for assistance.\n");
2951 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2953 int port = BP_PORT(bp);
2955 u32 val, swap_val, swap_override;
2957 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2958 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2960 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2962 val = REG_RD(bp, reg_offset);
2963 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2964 REG_WR(bp, reg_offset, val);
2966 BNX2X_ERR("SPIO5 hw attention\n");
2968 /* Fan failure attention */
2969 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2970 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2971 /* Low power mode is controlled by GPIO 2 */
2972 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2973 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2974 /* The PHY reset is controlled by GPIO 1 */
2975 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2976 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2979 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2980 /* The PHY reset is controlled by GPIO 1 */
2981 /* fake the port number to cancel the swap done in
2983 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2984 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2985 port = (swap_val && swap_override) ^ 1;
2986 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2987 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2993 bnx2x_fan_failure(bp);
2996 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2997 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2998 bnx2x_acquire_phy_lock(bp);
2999 bnx2x_handle_module_detect_int(&bp->link_params);
3000 bnx2x_release_phy_lock(bp);
3003 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3005 val = REG_RD(bp, reg_offset);
3006 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3007 REG_WR(bp, reg_offset, val);
3009 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3010 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3015 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3019 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3021 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3022 BNX2X_ERR("DB hw attention 0x%x\n", val);
3023 /* DORQ discard attention */
3025 BNX2X_ERR("FATAL error from DORQ\n");
3028 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3030 int port = BP_PORT(bp);
3033 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3034 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3036 val = REG_RD(bp, reg_offset);
3037 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3038 REG_WR(bp, reg_offset, val);
3040 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3041 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3046 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3050 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3052 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3053 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3054 /* CFC error attention */
3056 BNX2X_ERR("FATAL error from CFC\n");
3059 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3061 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3062 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3063 /* RQ_USDMDP_FIFO_OVERFLOW */
3065 BNX2X_ERR("FATAL error from PXP\n");
3068 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3070 int port = BP_PORT(bp);
3073 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3074 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3076 val = REG_RD(bp, reg_offset);
3077 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3078 REG_WR(bp, reg_offset, val);
3080 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3081 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3086 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3090 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3092 if (attn & BNX2X_PMF_LINK_ASSERT) {
3093 int func = BP_FUNC(bp);
3095 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3096 bp->mf_config = SHMEM_RD(bp,
3097 mf_cfg.func_mf_config[func].config);
3098 val = SHMEM_RD(bp, func_mb[func].drv_status);
3099 if (val & DRV_STATUS_DCC_EVENT_MASK)
3101 (val & DRV_STATUS_DCC_EVENT_MASK));
3102 bnx2x__link_status_update(bp);
3103 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3104 bnx2x_pmf_update(bp);
3106 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3108 BNX2X_ERR("MC assert!\n");
3109 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3110 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3111 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3112 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3115 } else if (attn & BNX2X_MCP_ASSERT) {
3117 BNX2X_ERR("MCP assert!\n");
3118 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3122 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3125 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3126 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3127 if (attn & BNX2X_GRC_TIMEOUT) {
3128 val = CHIP_IS_E1H(bp) ?
3129 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3130 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3132 if (attn & BNX2X_GRC_RSV) {
3133 val = CHIP_IS_E1H(bp) ?
3134 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3135 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3137 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3141 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3142 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3145 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3146 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3147 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3148 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3149 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3150 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3152 * should be run under rtnl lock
3154 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3156 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3157 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3158 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3164 * should be run under rtnl lock
3166 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3168 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3170 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3176 * should be run under rtnl lock
3178 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3180 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3181 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3182 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3186 * should be run under rtnl lock
3188 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3190 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3192 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3194 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3195 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3201 * should be run under rtnl lock
3203 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3205 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3207 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3209 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3210 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3218 * should be run under rtnl lock
3220 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3222 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3225 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3227 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3228 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3231 static inline void _print_next_block(int idx, const char *blk)
3238 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3242 for (i = 0; sig; i++) {
3243 cur_bit = ((u32)0x1 << i);
3244 if (sig & cur_bit) {
3246 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3247 _print_next_block(par_num++, "BRB");
3249 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3250 _print_next_block(par_num++, "PARSER");
3252 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3253 _print_next_block(par_num++, "TSDM");
3255 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3256 _print_next_block(par_num++, "SEARCHER");
3258 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3259 _print_next_block(par_num++, "TSEMI");
3271 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3275 for (i = 0; sig; i++) {
3276 cur_bit = ((u32)0x1 << i);
3277 if (sig & cur_bit) {
3279 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3280 _print_next_block(par_num++, "PBCLIENT");
3282 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3283 _print_next_block(par_num++, "QM");
3285 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3286 _print_next_block(par_num++, "XSDM");
3288 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3289 _print_next_block(par_num++, "XSEMI");
3291 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3292 _print_next_block(par_num++, "DOORBELLQ");
3294 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3295 _print_next_block(par_num++, "VAUX PCI CORE");
3297 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3298 _print_next_block(par_num++, "DEBUG");
3300 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3301 _print_next_block(par_num++, "USDM");
3303 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3304 _print_next_block(par_num++, "USEMI");
3306 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3307 _print_next_block(par_num++, "UPB");
3309 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3310 _print_next_block(par_num++, "CSDM");
3322 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3326 for (i = 0; sig; i++) {
3327 cur_bit = ((u32)0x1 << i);
3328 if (sig & cur_bit) {
3330 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3331 _print_next_block(par_num++, "CSEMI");
3333 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3334 _print_next_block(par_num++, "PXP");
3336 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3337 _print_next_block(par_num++,
3338 "PXPPCICLOCKCLIENT");
3340 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3341 _print_next_block(par_num++, "CFC");
3343 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3344 _print_next_block(par_num++, "CDU");
3346 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3347 _print_next_block(par_num++, "IGU");
3349 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3350 _print_next_block(par_num++, "MISC");
3362 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3366 for (i = 0; sig; i++) {
3367 cur_bit = ((u32)0x1 << i);
3368 if (sig & cur_bit) {
3370 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3371 _print_next_block(par_num++, "MCP ROM");
3373 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3374 _print_next_block(par_num++, "MCP UMP RX");
3376 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3377 _print_next_block(par_num++, "MCP UMP TX");
3379 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3380 _print_next_block(par_num++, "MCP SCPAD");
3392 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3395 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3396 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3398 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3399 "[0]:0x%08x [1]:0x%08x "
3400 "[2]:0x%08x [3]:0x%08x\n",
3401 sig0 & HW_PRTY_ASSERT_SET_0,
3402 sig1 & HW_PRTY_ASSERT_SET_1,
3403 sig2 & HW_PRTY_ASSERT_SET_2,
3404 sig3 & HW_PRTY_ASSERT_SET_3);
3405 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3407 par_num = bnx2x_print_blocks_with_parity0(
3408 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3409 par_num = bnx2x_print_blocks_with_parity1(
3410 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3411 par_num = bnx2x_print_blocks_with_parity2(
3412 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3413 par_num = bnx2x_print_blocks_with_parity3(
3414 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3421 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3423 struct attn_route attn;
3424 int port = BP_PORT(bp);
3426 attn.sig[0] = REG_RD(bp,
3427 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3429 attn.sig[1] = REG_RD(bp,
3430 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3432 attn.sig[2] = REG_RD(bp,
3433 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3435 attn.sig[3] = REG_RD(bp,
3436 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3439 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3443 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3445 struct attn_route attn, *group_mask;
3446 int port = BP_PORT(bp);
3452 /* need to take HW lock because MCP or other port might also
3453 try to handle this event */
3454 bnx2x_acquire_alr(bp);
3456 if (bnx2x_chk_parity_attn(bp)) {
3457 bp->recovery_state = BNX2X_RECOVERY_INIT;
3458 bnx2x_set_reset_in_progress(bp);
3459 schedule_delayed_work(&bp->reset_task, 0);
3460 /* Disable HW interrupts */
3461 bnx2x_int_disable(bp);
3462 bnx2x_release_alr(bp);
3463 /* In case of parity errors don't handle attentions so that
3464 * other function would "see" parity errors.
3469 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3470 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3471 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3472 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3473 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3474 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3476 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3477 if (deasserted & (1 << index)) {
3478 group_mask = &bp->attn_group[index];
3480 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3481 index, group_mask->sig[0], group_mask->sig[1],
3482 group_mask->sig[2], group_mask->sig[3]);
3484 bnx2x_attn_int_deasserted3(bp,
3485 attn.sig[3] & group_mask->sig[3]);
3486 bnx2x_attn_int_deasserted1(bp,
3487 attn.sig[1] & group_mask->sig[1]);
3488 bnx2x_attn_int_deasserted2(bp,
3489 attn.sig[2] & group_mask->sig[2]);
3490 bnx2x_attn_int_deasserted0(bp,
3491 attn.sig[0] & group_mask->sig[0]);
3495 bnx2x_release_alr(bp);
3497 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3500 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3502 REG_WR(bp, reg_addr, val);
3504 if (~bp->attn_state & deasserted)
3505 BNX2X_ERR("IGU ERROR\n");
3507 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3508 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3510 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3511 aeu_mask = REG_RD(bp, reg_addr);
3513 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3514 aeu_mask, deasserted);
3515 aeu_mask |= (deasserted & 0x3ff);
3516 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3518 REG_WR(bp, reg_addr, aeu_mask);
3519 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3521 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3522 bp->attn_state &= ~deasserted;
3523 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3526 static void bnx2x_attn_int(struct bnx2x *bp)
3528 /* read local copy of bits */
3529 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3531 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3533 u32 attn_state = bp->attn_state;
3535 /* look for changed bits */
3536 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3537 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3540 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3541 attn_bits, attn_ack, asserted, deasserted);
3543 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3544 BNX2X_ERR("BAD attention state\n");
3546 /* handle bits that were raised */
3548 bnx2x_attn_int_asserted(bp, asserted);
3551 bnx2x_attn_int_deasserted(bp, deasserted);
3554 static void bnx2x_sp_task(struct work_struct *work)
3556 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3560 /* Return here if interrupt is disabled */
3561 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3562 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3566 status = bnx2x_update_dsb_idx(bp);
3567 /* if (status == 0) */
3568 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3570 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3576 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3578 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3580 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3582 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3584 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3589 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3591 struct net_device *dev = dev_instance;
3592 struct bnx2x *bp = netdev_priv(dev);
3594 /* Return here if interrupt is disabled */
3595 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3596 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3600 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3602 #ifdef BNX2X_STOP_ON_ERROR
3603 if (unlikely(bp->panic))
3609 struct cnic_ops *c_ops;
3612 c_ops = rcu_dereference(bp->cnic_ops);
3614 c_ops->cnic_handler(bp->cnic_data, NULL);
3618 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3623 /* end of slow path */
3627 /****************************************************************************
3629 ****************************************************************************/
3631 /* sum[hi:lo] += add[hi:lo] */
3632 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3635 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3638 /* difference = minuend - subtrahend */
3639 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3641 if (m_lo < s_lo) { \
3643 d_hi = m_hi - s_hi; \
3645 /* we can 'loan' 1 */ \
3647 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3649 /* m_hi <= s_hi */ \
3654 /* m_lo >= s_lo */ \
3655 if (m_hi < s_hi) { \
3659 /* m_hi >= s_hi */ \
3660 d_hi = m_hi - s_hi; \
3661 d_lo = m_lo - s_lo; \
3666 #define UPDATE_STAT64(s, t) \
3668 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3669 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3670 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3671 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3672 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3673 pstats->mac_stx[1].t##_lo, diff.lo); \
3676 #define UPDATE_STAT64_NIG(s, t) \
3678 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3679 diff.lo, new->s##_lo, old->s##_lo); \
3680 ADD_64(estats->t##_hi, diff.hi, \
3681 estats->t##_lo, diff.lo); \
3684 /* sum[hi:lo] += add */
3685 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3688 s_hi += (s_lo < a) ? 1 : 0; \
3691 #define UPDATE_EXTEND_STAT(s) \
3693 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3694 pstats->mac_stx[1].s##_lo, \
3698 #define UPDATE_EXTEND_TSTAT(s, t) \
3700 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3701 old_tclient->s = tclient->s; \
3702 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3705 #define UPDATE_EXTEND_USTAT(s, t) \
3707 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3708 old_uclient->s = uclient->s; \
3709 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3712 #define UPDATE_EXTEND_XSTAT(s, t) \
3714 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3715 old_xclient->s = xclient->s; \
3716 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3719 /* minuend -= subtrahend */
3720 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3722 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3725 /* minuend[hi:lo] -= subtrahend */
3726 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3728 SUB_64(m_hi, 0, m_lo, s); \
3731 #define SUB_EXTEND_USTAT(s, t) \
3733 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3734 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3738 * General service functions
3741 static inline long bnx2x_hilo(u32 *hiref)
3743 u32 lo = *(hiref + 1);
3744 #if (BITS_PER_LONG == 64)
3747 return HILO_U64(hi, lo);
3754 * Init service functions
3757 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3759 if (!bp->stats_pending) {
3760 struct eth_query_ramrod_data ramrod_data = {0};
3763 ramrod_data.drv_counter = bp->stats_counter++;
3764 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3765 for_each_queue(bp, i)
3766 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3768 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3769 ((u32 *)&ramrod_data)[1],
3770 ((u32 *)&ramrod_data)[0], 0);
3772 /* stats ramrod has it's own slot on the spq */
3774 bp->stats_pending = 1;
3779 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3781 struct dmae_command *dmae = &bp->stats_dmae;
3782 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3784 *stats_comp = DMAE_COMP_VAL;
3785 if (CHIP_REV_IS_SLOW(bp))
3789 if (bp->executer_idx) {
3790 int loader_idx = PMF_DMAE_C(bp);
3792 memset(dmae, 0, sizeof(struct dmae_command));
3794 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3795 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3796 DMAE_CMD_DST_RESET |
3798 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3800 DMAE_CMD_ENDIANITY_DW_SWAP |
3802 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3804 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3805 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3806 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3807 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3808 sizeof(struct dmae_command) *
3809 (loader_idx + 1)) >> 2;
3810 dmae->dst_addr_hi = 0;
3811 dmae->len = sizeof(struct dmae_command) >> 2;
3814 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3815 dmae->comp_addr_hi = 0;
3819 bnx2x_post_dmae(bp, dmae, loader_idx);
3821 } else if (bp->func_stx) {
3823 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3827 static int bnx2x_stats_comp(struct bnx2x *bp)
3829 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3833 while (*stats_comp != DMAE_COMP_VAL) {
3835 BNX2X_ERR("timeout waiting for stats finished\n");
3845 * Statistics service functions
3848 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3850 struct dmae_command *dmae;
3852 int loader_idx = PMF_DMAE_C(bp);
3853 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3856 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3857 BNX2X_ERR("BUG!\n");
3861 bp->executer_idx = 0;
3863 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3865 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3867 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3869 DMAE_CMD_ENDIANITY_DW_SWAP |
3871 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3872 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3874 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3875 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3876 dmae->src_addr_lo = bp->port.port_stx >> 2;
3877 dmae->src_addr_hi = 0;
3878 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3879 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3880 dmae->len = DMAE_LEN32_RD_MAX;
3881 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3882 dmae->comp_addr_hi = 0;
3885 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3886 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3887 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3888 dmae->src_addr_hi = 0;
3889 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3890 DMAE_LEN32_RD_MAX * 4);
3891 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3892 DMAE_LEN32_RD_MAX * 4);
3893 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3894 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3895 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3896 dmae->comp_val = DMAE_COMP_VAL;
3899 bnx2x_hw_stats_post(bp);
3900 bnx2x_stats_comp(bp);
3903 static void bnx2x_port_stats_init(struct bnx2x *bp)
3905 struct dmae_command *dmae;
3906 int port = BP_PORT(bp);
3907 int vn = BP_E1HVN(bp);
3909 int loader_idx = PMF_DMAE_C(bp);
3911 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3914 if (!bp->link_vars.link_up || !bp->port.pmf) {
3915 BNX2X_ERR("BUG!\n");
3919 bp->executer_idx = 0;
3922 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3923 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3924 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3926 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3928 DMAE_CMD_ENDIANITY_DW_SWAP |
3930 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3931 (vn << DMAE_CMD_E1HVN_SHIFT));
3933 if (bp->port.port_stx) {
3935 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3936 dmae->opcode = opcode;
3937 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3938 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3939 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3940 dmae->dst_addr_hi = 0;
3941 dmae->len = sizeof(struct host_port_stats) >> 2;
3942 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3943 dmae->comp_addr_hi = 0;
3949 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3950 dmae->opcode = opcode;
3951 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3952 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3953 dmae->dst_addr_lo = bp->func_stx >> 2;
3954 dmae->dst_addr_hi = 0;
3955 dmae->len = sizeof(struct host_func_stats) >> 2;
3956 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3957 dmae->comp_addr_hi = 0;
3962 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3963 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3964 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3966 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3968 DMAE_CMD_ENDIANITY_DW_SWAP |
3970 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3971 (vn << DMAE_CMD_E1HVN_SHIFT));
3973 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3975 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3976 NIG_REG_INGRESS_BMAC0_MEM);
3978 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3979 BIGMAC_REGISTER_TX_STAT_GTBYT */
3980 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3981 dmae->opcode = opcode;
3982 dmae->src_addr_lo = (mac_addr +
3983 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3984 dmae->src_addr_hi = 0;
3985 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3986 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3987 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3988 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3989 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3990 dmae->comp_addr_hi = 0;
3993 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3994 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3995 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3996 dmae->opcode = opcode;
3997 dmae->src_addr_lo = (mac_addr +
3998 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3999 dmae->src_addr_hi = 0;
4000 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4001 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4002 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4003 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4004 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4005 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4006 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4007 dmae->comp_addr_hi = 0;
4010 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4012 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4014 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4015 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4016 dmae->opcode = opcode;
4017 dmae->src_addr_lo = (mac_addr +
4018 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4019 dmae->src_addr_hi = 0;
4020 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4021 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4022 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4023 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4024 dmae->comp_addr_hi = 0;
4027 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4028 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4029 dmae->opcode = opcode;
4030 dmae->src_addr_lo = (mac_addr +
4031 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4032 dmae->src_addr_hi = 0;
4033 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4034 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4035 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4036 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4038 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4039 dmae->comp_addr_hi = 0;
4042 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4043 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4044 dmae->opcode = opcode;
4045 dmae->src_addr_lo = (mac_addr +
4046 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4047 dmae->src_addr_hi = 0;
4048 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4049 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4050 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4051 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4052 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4053 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4054 dmae->comp_addr_hi = 0;
4059 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4060 dmae->opcode = opcode;
4061 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4062 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4063 dmae->src_addr_hi = 0;
4064 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4065 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4066 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4067 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4068 dmae->comp_addr_hi = 0;
4071 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4072 dmae->opcode = opcode;
4073 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4074 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4075 dmae->src_addr_hi = 0;
4076 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4077 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4078 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4079 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4080 dmae->len = (2*sizeof(u32)) >> 2;
4081 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4082 dmae->comp_addr_hi = 0;
4085 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4086 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4087 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4088 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4090 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4092 DMAE_CMD_ENDIANITY_DW_SWAP |
4094 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4095 (vn << DMAE_CMD_E1HVN_SHIFT));
4096 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4097 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4098 dmae->src_addr_hi = 0;
4099 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4100 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4101 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4102 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4103 dmae->len = (2*sizeof(u32)) >> 2;
4104 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4105 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4106 dmae->comp_val = DMAE_COMP_VAL;
4111 static void bnx2x_func_stats_init(struct bnx2x *bp)
4113 struct dmae_command *dmae = &bp->stats_dmae;
4114 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4117 if (!bp->func_stx) {
4118 BNX2X_ERR("BUG!\n");
4122 bp->executer_idx = 0;
4123 memset(dmae, 0, sizeof(struct dmae_command));
4125 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4126 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4127 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4129 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4131 DMAE_CMD_ENDIANITY_DW_SWAP |
4133 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4134 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4135 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4136 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4137 dmae->dst_addr_lo = bp->func_stx >> 2;
4138 dmae->dst_addr_hi = 0;
4139 dmae->len = sizeof(struct host_func_stats) >> 2;
4140 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4141 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4142 dmae->comp_val = DMAE_COMP_VAL;
4147 static void bnx2x_stats_start(struct bnx2x *bp)
4150 bnx2x_port_stats_init(bp);
4152 else if (bp->func_stx)
4153 bnx2x_func_stats_init(bp);
4155 bnx2x_hw_stats_post(bp);
4156 bnx2x_storm_stats_post(bp);
4159 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4161 bnx2x_stats_comp(bp);
4162 bnx2x_stats_pmf_update(bp);
4163 bnx2x_stats_start(bp);
4166 static void bnx2x_stats_restart(struct bnx2x *bp)
4168 bnx2x_stats_comp(bp);
4169 bnx2x_stats_start(bp);
4172 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4174 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4175 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4176 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4182 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4183 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4184 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4185 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4186 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4187 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4188 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4189 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4190 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4191 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4192 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4193 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4194 UPDATE_STAT64(tx_stat_gt127,
4195 tx_stat_etherstatspkts65octetsto127octets);
4196 UPDATE_STAT64(tx_stat_gt255,
4197 tx_stat_etherstatspkts128octetsto255octets);
4198 UPDATE_STAT64(tx_stat_gt511,
4199 tx_stat_etherstatspkts256octetsto511octets);
4200 UPDATE_STAT64(tx_stat_gt1023,
4201 tx_stat_etherstatspkts512octetsto1023octets);
4202 UPDATE_STAT64(tx_stat_gt1518,
4203 tx_stat_etherstatspkts1024octetsto1522octets);
4204 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4205 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4206 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4207 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4208 UPDATE_STAT64(tx_stat_gterr,
4209 tx_stat_dot3statsinternalmactransmiterrors);
4210 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4212 estats->pause_frames_received_hi =
4213 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4214 estats->pause_frames_received_lo =
4215 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4217 estats->pause_frames_sent_hi =
4218 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4219 estats->pause_frames_sent_lo =
4220 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4223 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4225 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4226 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4227 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4229 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4230 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4231 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4232 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4233 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4234 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4235 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4236 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4237 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4238 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4239 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4240 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4241 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4242 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4243 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4244 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4245 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4246 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4247 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4248 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4249 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4250 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4251 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4252 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4253 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4254 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4255 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4256 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4257 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4258 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4259 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4261 estats->pause_frames_received_hi =
4262 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4263 estats->pause_frames_received_lo =
4264 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4265 ADD_64(estats->pause_frames_received_hi,
4266 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4267 estats->pause_frames_received_lo,
4268 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4270 estats->pause_frames_sent_hi =
4271 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4272 estats->pause_frames_sent_lo =
4273 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4274 ADD_64(estats->pause_frames_sent_hi,
4275 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4276 estats->pause_frames_sent_lo,
4277 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4280 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4282 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4283 struct nig_stats *old = &(bp->port.old_nig_stats);
4284 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4285 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4292 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4293 bnx2x_bmac_stats_update(bp);
4295 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4296 bnx2x_emac_stats_update(bp);
4298 else { /* unreached */
4299 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4303 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4304 new->brb_discard - old->brb_discard);
4305 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4306 new->brb_truncate - old->brb_truncate);
4308 UPDATE_STAT64_NIG(egress_mac_pkt0,
4309 etherstatspkts1024octetsto1522octets);
4310 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4312 memcpy(old, new, sizeof(struct nig_stats));
4314 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4315 sizeof(struct mac_stx));
4316 estats->brb_drop_hi = pstats->brb_drop_hi;
4317 estats->brb_drop_lo = pstats->brb_drop_lo;
4319 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4321 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4322 if (nig_timer_max != estats->nig_timer_max) {
4323 estats->nig_timer_max = nig_timer_max;
4324 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
4330 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4332 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4333 struct tstorm_per_port_stats *tport =
4334 &stats->tstorm_common.port_statistics;
4335 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4336 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4339 memcpy(&(fstats->total_bytes_received_hi),
4340 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4341 sizeof(struct host_func_stats) - 2*sizeof(u32));
4342 estats->error_bytes_received_hi = 0;
4343 estats->error_bytes_received_lo = 0;
4344 estats->etherstatsoverrsizepkts_hi = 0;
4345 estats->etherstatsoverrsizepkts_lo = 0;
4346 estats->no_buff_discard_hi = 0;
4347 estats->no_buff_discard_lo = 0;
4349 for_each_queue(bp, i) {
4350 struct bnx2x_fastpath *fp = &bp->fp[i];
4351 int cl_id = fp->cl_id;
4352 struct tstorm_per_client_stats *tclient =
4353 &stats->tstorm_common.client_statistics[cl_id];
4354 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4355 struct ustorm_per_client_stats *uclient =
4356 &stats->ustorm_common.client_statistics[cl_id];
4357 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4358 struct xstorm_per_client_stats *xclient =
4359 &stats->xstorm_common.client_statistics[cl_id];
4360 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4361 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4364 /* are storm stats valid? */
4365 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4366 bp->stats_counter) {
4367 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4368 " xstorm counter (%d) != stats_counter (%d)\n",
4369 i, xclient->stats_counter, bp->stats_counter);
4372 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4373 bp->stats_counter) {
4374 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4375 " tstorm counter (%d) != stats_counter (%d)\n",
4376 i, tclient->stats_counter, bp->stats_counter);
4379 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4380 bp->stats_counter) {
4381 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4382 " ustorm counter (%d) != stats_counter (%d)\n",
4383 i, uclient->stats_counter, bp->stats_counter);
4387 qstats->total_bytes_received_hi =
4388 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4389 qstats->total_bytes_received_lo =
4390 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4392 ADD_64(qstats->total_bytes_received_hi,
4393 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4394 qstats->total_bytes_received_lo,
4395 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4397 ADD_64(qstats->total_bytes_received_hi,
4398 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4399 qstats->total_bytes_received_lo,
4400 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4402 qstats->valid_bytes_received_hi =
4403 qstats->total_bytes_received_hi;
4404 qstats->valid_bytes_received_lo =
4405 qstats->total_bytes_received_lo;
4407 qstats->error_bytes_received_hi =
4408 le32_to_cpu(tclient->rcv_error_bytes.hi);
4409 qstats->error_bytes_received_lo =
4410 le32_to_cpu(tclient->rcv_error_bytes.lo);
4412 ADD_64(qstats->total_bytes_received_hi,
4413 qstats->error_bytes_received_hi,
4414 qstats->total_bytes_received_lo,
4415 qstats->error_bytes_received_lo);
4417 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4418 total_unicast_packets_received);
4419 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4420 total_multicast_packets_received);
4421 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4422 total_broadcast_packets_received);
4423 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4424 etherstatsoverrsizepkts);
4425 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4427 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4428 total_unicast_packets_received);
4429 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4430 total_multicast_packets_received);
4431 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4432 total_broadcast_packets_received);
4433 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4434 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4435 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4437 qstats->total_bytes_transmitted_hi =
4438 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4439 qstats->total_bytes_transmitted_lo =
4440 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4442 ADD_64(qstats->total_bytes_transmitted_hi,
4443 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4444 qstats->total_bytes_transmitted_lo,
4445 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4447 ADD_64(qstats->total_bytes_transmitted_hi,
4448 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4449 qstats->total_bytes_transmitted_lo,
4450 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4452 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4453 total_unicast_packets_transmitted);
4454 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4455 total_multicast_packets_transmitted);
4456 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4457 total_broadcast_packets_transmitted);
4459 old_tclient->checksum_discard = tclient->checksum_discard;
4460 old_tclient->ttl0_discard = tclient->ttl0_discard;
4462 ADD_64(fstats->total_bytes_received_hi,
4463 qstats->total_bytes_received_hi,
4464 fstats->total_bytes_received_lo,
4465 qstats->total_bytes_received_lo);
4466 ADD_64(fstats->total_bytes_transmitted_hi,
4467 qstats->total_bytes_transmitted_hi,
4468 fstats->total_bytes_transmitted_lo,
4469 qstats->total_bytes_transmitted_lo);
4470 ADD_64(fstats->total_unicast_packets_received_hi,
4471 qstats->total_unicast_packets_received_hi,
4472 fstats->total_unicast_packets_received_lo,
4473 qstats->total_unicast_packets_received_lo);
4474 ADD_64(fstats->total_multicast_packets_received_hi,
4475 qstats->total_multicast_packets_received_hi,
4476 fstats->total_multicast_packets_received_lo,
4477 qstats->total_multicast_packets_received_lo);
4478 ADD_64(fstats->total_broadcast_packets_received_hi,
4479 qstats->total_broadcast_packets_received_hi,
4480 fstats->total_broadcast_packets_received_lo,
4481 qstats->total_broadcast_packets_received_lo);
4482 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4483 qstats->total_unicast_packets_transmitted_hi,
4484 fstats->total_unicast_packets_transmitted_lo,
4485 qstats->total_unicast_packets_transmitted_lo);
4486 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4487 qstats->total_multicast_packets_transmitted_hi,
4488 fstats->total_multicast_packets_transmitted_lo,
4489 qstats->total_multicast_packets_transmitted_lo);
4490 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4491 qstats->total_broadcast_packets_transmitted_hi,
4492 fstats->total_broadcast_packets_transmitted_lo,
4493 qstats->total_broadcast_packets_transmitted_lo);
4494 ADD_64(fstats->valid_bytes_received_hi,
4495 qstats->valid_bytes_received_hi,
4496 fstats->valid_bytes_received_lo,
4497 qstats->valid_bytes_received_lo);
4499 ADD_64(estats->error_bytes_received_hi,
4500 qstats->error_bytes_received_hi,
4501 estats->error_bytes_received_lo,
4502 qstats->error_bytes_received_lo);
4503 ADD_64(estats->etherstatsoverrsizepkts_hi,
4504 qstats->etherstatsoverrsizepkts_hi,
4505 estats->etherstatsoverrsizepkts_lo,
4506 qstats->etherstatsoverrsizepkts_lo);
4507 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4508 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4511 ADD_64(fstats->total_bytes_received_hi,
4512 estats->rx_stat_ifhcinbadoctets_hi,
4513 fstats->total_bytes_received_lo,
4514 estats->rx_stat_ifhcinbadoctets_lo);
4516 memcpy(estats, &(fstats->total_bytes_received_hi),
4517 sizeof(struct host_func_stats) - 2*sizeof(u32));
4519 ADD_64(estats->etherstatsoverrsizepkts_hi,
4520 estats->rx_stat_dot3statsframestoolong_hi,
4521 estats->etherstatsoverrsizepkts_lo,
4522 estats->rx_stat_dot3statsframestoolong_lo);
4523 ADD_64(estats->error_bytes_received_hi,
4524 estats->rx_stat_ifhcinbadoctets_hi,
4525 estats->error_bytes_received_lo,
4526 estats->rx_stat_ifhcinbadoctets_lo);
4529 estats->mac_filter_discard =
4530 le32_to_cpu(tport->mac_filter_discard);
4531 estats->xxoverflow_discard =
4532 le32_to_cpu(tport->xxoverflow_discard);
4533 estats->brb_truncate_discard =
4534 le32_to_cpu(tport->brb_truncate_discard);
4535 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4538 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4540 bp->stats_pending = 0;
4545 static void bnx2x_net_stats_update(struct bnx2x *bp)
4547 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4548 struct net_device_stats *nstats = &bp->dev->stats;
4551 nstats->rx_packets =
4552 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4553 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4554 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4556 nstats->tx_packets =
4557 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4558 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4559 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4561 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4563 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4565 nstats->rx_dropped = estats->mac_discard;
4566 for_each_queue(bp, i)
4567 nstats->rx_dropped +=
4568 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4570 nstats->tx_dropped = 0;
4573 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4575 nstats->collisions =
4576 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4578 nstats->rx_length_errors =
4579 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4580 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4581 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4582 bnx2x_hilo(&estats->brb_truncate_hi);
4583 nstats->rx_crc_errors =
4584 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4585 nstats->rx_frame_errors =
4586 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4587 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4588 nstats->rx_missed_errors = estats->xxoverflow_discard;
4590 nstats->rx_errors = nstats->rx_length_errors +
4591 nstats->rx_over_errors +
4592 nstats->rx_crc_errors +
4593 nstats->rx_frame_errors +
4594 nstats->rx_fifo_errors +
4595 nstats->rx_missed_errors;
4597 nstats->tx_aborted_errors =
4598 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4599 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4600 nstats->tx_carrier_errors =
4601 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4602 nstats->tx_fifo_errors = 0;
4603 nstats->tx_heartbeat_errors = 0;
4604 nstats->tx_window_errors = 0;
4606 nstats->tx_errors = nstats->tx_aborted_errors +
4607 nstats->tx_carrier_errors +
4608 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4611 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4613 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4616 estats->driver_xoff = 0;
4617 estats->rx_err_discard_pkt = 0;
4618 estats->rx_skb_alloc_failed = 0;
4619 estats->hw_csum_err = 0;
4620 for_each_queue(bp, i) {
4621 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4623 estats->driver_xoff += qstats->driver_xoff;
4624 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4625 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4626 estats->hw_csum_err += qstats->hw_csum_err;
4630 static void bnx2x_stats_update(struct bnx2x *bp)
4632 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4634 if (*stats_comp != DMAE_COMP_VAL)
4638 bnx2x_hw_stats_update(bp);
4640 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4641 BNX2X_ERR("storm stats were not updated for 3 times\n");
4646 bnx2x_net_stats_update(bp);
4647 bnx2x_drv_stats_update(bp);
4649 if (netif_msg_timer(bp)) {
4650 struct bnx2x_fastpath *fp0_rx = bp->fp;
4651 struct bnx2x_fastpath *fp0_tx = bp->fp;
4652 struct tstorm_per_client_stats *old_tclient =
4653 &bp->fp->old_tclient;
4654 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4655 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4656 struct net_device_stats *nstats = &bp->dev->stats;
4659 netdev_printk(KERN_DEBUG, bp->dev, "\n");
4660 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4662 bnx2x_tx_avail(fp0_tx),
4663 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4664 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4666 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4667 fp0_rx->rx_comp_cons),
4668 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4669 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4670 "brb truncate %u\n",
4671 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4672 qstats->driver_xoff,
4673 estats->brb_drop_lo, estats->brb_truncate_lo);
4674 printk(KERN_DEBUG "tstats: checksum_discard %u "
4675 "packets_too_big_discard %lu no_buff_discard %lu "
4676 "mac_discard %u mac_filter_discard %u "
4677 "xxovrflow_discard %u brb_truncate_discard %u "
4678 "ttl0_discard %u\n",
4679 le32_to_cpu(old_tclient->checksum_discard),
4680 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4681 bnx2x_hilo(&qstats->no_buff_discard_hi),
4682 estats->mac_discard, estats->mac_filter_discard,
4683 estats->xxoverflow_discard, estats->brb_truncate_discard,
4684 le32_to_cpu(old_tclient->ttl0_discard));
4686 for_each_queue(bp, i) {
4687 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4688 bnx2x_fp(bp, i, tx_pkt),
4689 bnx2x_fp(bp, i, rx_pkt),
4690 bnx2x_fp(bp, i, rx_calls));
4694 bnx2x_hw_stats_post(bp);
4695 bnx2x_storm_stats_post(bp);
4698 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4700 struct dmae_command *dmae;
4702 int loader_idx = PMF_DMAE_C(bp);
4703 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4705 bp->executer_idx = 0;
4707 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4709 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4711 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4713 DMAE_CMD_ENDIANITY_DW_SWAP |
4715 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4716 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4718 if (bp->port.port_stx) {
4720 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4722 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4724 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4725 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4726 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4727 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4728 dmae->dst_addr_hi = 0;
4729 dmae->len = sizeof(struct host_port_stats) >> 2;
4731 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4732 dmae->comp_addr_hi = 0;
4735 dmae->comp_addr_lo =
4736 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4737 dmae->comp_addr_hi =
4738 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4739 dmae->comp_val = DMAE_COMP_VAL;
4747 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4748 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4749 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4750 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4751 dmae->dst_addr_lo = bp->func_stx >> 2;
4752 dmae->dst_addr_hi = 0;
4753 dmae->len = sizeof(struct host_func_stats) >> 2;
4754 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4755 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4756 dmae->comp_val = DMAE_COMP_VAL;
4762 static void bnx2x_stats_stop(struct bnx2x *bp)
4766 bnx2x_stats_comp(bp);
4769 update = (bnx2x_hw_stats_update(bp) == 0);
4771 update |= (bnx2x_storm_stats_update(bp) == 0);
4774 bnx2x_net_stats_update(bp);
4777 bnx2x_port_stats_stop(bp);
4779 bnx2x_hw_stats_post(bp);
4780 bnx2x_stats_comp(bp);
4784 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4788 static const struct {
4789 void (*action)(struct bnx2x *bp);
4790 enum bnx2x_stats_state next_state;
4791 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4794 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4795 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4796 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4797 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4800 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4801 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4802 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4803 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4807 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4809 enum bnx2x_stats_state state = bp->stats_state;
4811 bnx2x_stats_stm[state][event].action(bp);
4812 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4814 /* Make sure the state has been "changed" */
4817 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4818 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4819 state, event, bp->stats_state);
4822 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4824 struct dmae_command *dmae;
4825 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4828 if (!bp->port.pmf || !bp->port.port_stx) {
4829 BNX2X_ERR("BUG!\n");
4833 bp->executer_idx = 0;
4835 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4836 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4837 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4838 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4840 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4842 DMAE_CMD_ENDIANITY_DW_SWAP |
4844 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4845 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4846 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4847 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4848 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4849 dmae->dst_addr_hi = 0;
4850 dmae->len = sizeof(struct host_port_stats) >> 2;
4851 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4852 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4853 dmae->comp_val = DMAE_COMP_VAL;
4856 bnx2x_hw_stats_post(bp);
4857 bnx2x_stats_comp(bp);
4860 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4862 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4863 int port = BP_PORT(bp);
4868 if (!bp->port.pmf || !bp->func_stx) {
4869 BNX2X_ERR("BUG!\n");
4873 /* save our func_stx */
4874 func_stx = bp->func_stx;
4876 for (vn = VN_0; vn < vn_max; vn++) {
4879 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4880 bnx2x_func_stats_init(bp);
4881 bnx2x_hw_stats_post(bp);
4882 bnx2x_stats_comp(bp);
4885 /* restore our func_stx */
4886 bp->func_stx = func_stx;
4889 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4891 struct dmae_command *dmae = &bp->stats_dmae;
4892 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4895 if (!bp->func_stx) {
4896 BNX2X_ERR("BUG!\n");
4900 bp->executer_idx = 0;
4901 memset(dmae, 0, sizeof(struct dmae_command));
4903 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4904 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4905 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4907 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4909 DMAE_CMD_ENDIANITY_DW_SWAP |
4911 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4912 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4913 dmae->src_addr_lo = bp->func_stx >> 2;
4914 dmae->src_addr_hi = 0;
4915 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4916 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4917 dmae->len = sizeof(struct host_func_stats) >> 2;
4918 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4919 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4920 dmae->comp_val = DMAE_COMP_VAL;
4923 bnx2x_hw_stats_post(bp);
4924 bnx2x_stats_comp(bp);
4927 static void bnx2x_stats_init(struct bnx2x *bp)
4929 int port = BP_PORT(bp);
4930 int func = BP_FUNC(bp);
4933 bp->stats_pending = 0;
4934 bp->executer_idx = 0;
4935 bp->stats_counter = 0;
4937 /* port and func stats for management */
4938 if (!BP_NOMCP(bp)) {
4939 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4940 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4943 bp->port.port_stx = 0;
4946 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4947 bp->port.port_stx, bp->func_stx);
4950 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4951 bp->port.old_nig_stats.brb_discard =
4952 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4953 bp->port.old_nig_stats.brb_truncate =
4954 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4955 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4956 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4957 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4958 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4960 /* function stats */
4961 for_each_queue(bp, i) {
4962 struct bnx2x_fastpath *fp = &bp->fp[i];
4964 memset(&fp->old_tclient, 0,
4965 sizeof(struct tstorm_per_client_stats));
4966 memset(&fp->old_uclient, 0,
4967 sizeof(struct ustorm_per_client_stats));
4968 memset(&fp->old_xclient, 0,
4969 sizeof(struct xstorm_per_client_stats));
4970 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4973 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4974 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4976 bp->stats_state = STATS_STATE_DISABLED;
4979 if (bp->port.port_stx)
4980 bnx2x_port_stats_base_init(bp);
4983 bnx2x_func_stats_base_init(bp);
4985 } else if (bp->func_stx)
4986 bnx2x_func_stats_base_update(bp);
4989 static void bnx2x_timer(unsigned long data)
4991 struct bnx2x *bp = (struct bnx2x *) data;
4993 if (!netif_running(bp->dev))
4996 if (atomic_read(&bp->intr_sem) != 0)
5000 struct bnx2x_fastpath *fp = &bp->fp[0];
5004 rc = bnx2x_rx_int(fp, 1000);
5007 if (!BP_NOMCP(bp)) {
5008 int func = BP_FUNC(bp);
5012 ++bp->fw_drv_pulse_wr_seq;
5013 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5014 /* TBD - add SYSTEM_TIME */
5015 drv_pulse = bp->fw_drv_pulse_wr_seq;
5016 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5018 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5019 MCP_PULSE_SEQ_MASK);
5020 /* The delta between driver pulse and mcp response
5021 * should be 1 (before mcp response) or 0 (after mcp response)
5023 if ((drv_pulse != mcp_pulse) &&
5024 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5025 /* someone lost a heartbeat... */
5026 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5027 drv_pulse, mcp_pulse);
5031 if (bp->state == BNX2X_STATE_OPEN)
5032 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5035 mod_timer(&bp->timer, jiffies + bp->current_interval);
5038 /* end of Statistics */
5043 * nic init service functions
5046 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5048 int port = BP_PORT(bp);
5051 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5052 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5053 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5054 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5055 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5056 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5059 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5060 dma_addr_t mapping, int sb_id)
5062 int port = BP_PORT(bp);
5063 int func = BP_FUNC(bp);
5068 section = ((u64)mapping) + offsetof(struct host_status_block,
5070 sb->u_status_block.status_block_id = sb_id;
5072 REG_WR(bp, BAR_CSTRORM_INTMEM +
5073 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5074 REG_WR(bp, BAR_CSTRORM_INTMEM +
5075 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5077 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5078 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5080 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5081 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5082 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5085 section = ((u64)mapping) + offsetof(struct host_status_block,
5087 sb->c_status_block.status_block_id = sb_id;
5089 REG_WR(bp, BAR_CSTRORM_INTMEM +
5090 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5091 REG_WR(bp, BAR_CSTRORM_INTMEM +
5092 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5094 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5095 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5097 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5098 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5099 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5101 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5104 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5106 int func = BP_FUNC(bp);
5108 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5109 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5110 sizeof(struct tstorm_def_status_block)/4);
5111 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5112 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5113 sizeof(struct cstorm_def_status_block_u)/4);
5114 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5115 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5116 sizeof(struct cstorm_def_status_block_c)/4);
5117 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5118 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5119 sizeof(struct xstorm_def_status_block)/4);
5122 static void bnx2x_init_def_sb(struct bnx2x *bp,
5123 struct host_def_status_block *def_sb,
5124 dma_addr_t mapping, int sb_id)
5126 int port = BP_PORT(bp);
5127 int func = BP_FUNC(bp);
5128 int index, val, reg_offset;
5132 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5133 atten_status_block);
5134 def_sb->atten_status_block.status_block_id = sb_id;
5138 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5139 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5141 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5142 bp->attn_group[index].sig[0] = REG_RD(bp,
5143 reg_offset + 0x10*index);
5144 bp->attn_group[index].sig[1] = REG_RD(bp,
5145 reg_offset + 0x4 + 0x10*index);
5146 bp->attn_group[index].sig[2] = REG_RD(bp,
5147 reg_offset + 0x8 + 0x10*index);
5148 bp->attn_group[index].sig[3] = REG_RD(bp,
5149 reg_offset + 0xc + 0x10*index);
5152 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5153 HC_REG_ATTN_MSG0_ADDR_L);
5155 REG_WR(bp, reg_offset, U64_LO(section));
5156 REG_WR(bp, reg_offset + 4, U64_HI(section));
5158 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5160 val = REG_RD(bp, reg_offset);
5162 REG_WR(bp, reg_offset, val);
5165 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5166 u_def_status_block);
5167 def_sb->u_def_status_block.status_block_id = sb_id;
5169 REG_WR(bp, BAR_CSTRORM_INTMEM +
5170 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5171 REG_WR(bp, BAR_CSTRORM_INTMEM +
5172 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5174 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5175 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5177 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5178 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5179 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5182 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5183 c_def_status_block);
5184 def_sb->c_def_status_block.status_block_id = sb_id;
5186 REG_WR(bp, BAR_CSTRORM_INTMEM +
5187 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5188 REG_WR(bp, BAR_CSTRORM_INTMEM +
5189 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5191 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5192 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5194 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5195 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5196 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5199 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5200 t_def_status_block);
5201 def_sb->t_def_status_block.status_block_id = sb_id;
5203 REG_WR(bp, BAR_TSTRORM_INTMEM +
5204 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5205 REG_WR(bp, BAR_TSTRORM_INTMEM +
5206 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5208 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5209 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5211 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5212 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5213 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5216 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5217 x_def_status_block);
5218 def_sb->x_def_status_block.status_block_id = sb_id;
5220 REG_WR(bp, BAR_XSTRORM_INTMEM +
5221 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5222 REG_WR(bp, BAR_XSTRORM_INTMEM +
5223 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5225 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5226 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5228 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5229 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5230 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5232 bp->stats_pending = 0;
5233 bp->set_mac_pending = 0;
5235 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5238 static void bnx2x_update_coalesce(struct bnx2x *bp)
5240 int port = BP_PORT(bp);
5243 for_each_queue(bp, i) {
5244 int sb_id = bp->fp[i].sb_id;
5246 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5247 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5248 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5249 U_SB_ETH_RX_CQ_INDEX),
5250 bp->rx_ticks/(4 * BNX2X_BTR));
5251 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5252 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5253 U_SB_ETH_RX_CQ_INDEX),
5254 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5256 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5257 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5258 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5259 C_SB_ETH_TX_CQ_INDEX),
5260 bp->tx_ticks/(4 * BNX2X_BTR));
5261 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5262 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5263 C_SB_ETH_TX_CQ_INDEX),
5264 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5268 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5269 struct bnx2x_fastpath *fp, int last)
5273 for (i = 0; i < last; i++) {
5274 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5275 struct sk_buff *skb = rx_buf->skb;
5278 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5282 if (fp->tpa_state[i] == BNX2X_TPA_START)
5283 dma_unmap_single(&bp->pdev->dev,
5284 dma_unmap_addr(rx_buf, mapping),
5285 bp->rx_buf_size, DMA_FROM_DEVICE);
5292 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5294 int func = BP_FUNC(bp);
5295 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5296 ETH_MAX_AGGREGATION_QUEUES_E1H;
5297 u16 ring_prod, cqe_ring_prod;
5300 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5302 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5304 if (bp->flags & TPA_ENABLE_FLAG) {
5306 for_each_queue(bp, j) {
5307 struct bnx2x_fastpath *fp = &bp->fp[j];
5309 for (i = 0; i < max_agg_queues; i++) {
5310 fp->tpa_pool[i].skb =
5311 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5312 if (!fp->tpa_pool[i].skb) {
5313 BNX2X_ERR("Failed to allocate TPA "
5314 "skb pool for queue[%d] - "
5315 "disabling TPA on this "
5317 bnx2x_free_tpa_pool(bp, fp, i);
5318 fp->disable_tpa = 1;
5321 dma_unmap_addr_set((struct sw_rx_bd *)
5322 &bp->fp->tpa_pool[i],
5324 fp->tpa_state[i] = BNX2X_TPA_STOP;
5329 for_each_queue(bp, j) {
5330 struct bnx2x_fastpath *fp = &bp->fp[j];
5333 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5334 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5336 /* "next page" elements initialization */
5338 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5339 struct eth_rx_sge *sge;
5341 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5343 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5344 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5346 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5347 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5350 bnx2x_init_sge_ring_bit_mask(fp);
5353 for (i = 1; i <= NUM_RX_RINGS; i++) {
5354 struct eth_rx_bd *rx_bd;
5356 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5358 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5359 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5361 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5362 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5366 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5367 struct eth_rx_cqe_next_page *nextpg;
5369 nextpg = (struct eth_rx_cqe_next_page *)
5370 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5372 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5373 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5375 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5376 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5379 /* Allocate SGEs and initialize the ring elements */
5380 for (i = 0, ring_prod = 0;
5381 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5383 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5384 BNX2X_ERR("was only able to allocate "
5386 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5387 /* Cleanup already allocated elements */
5388 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5389 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5390 fp->disable_tpa = 1;
5394 ring_prod = NEXT_SGE_IDX(ring_prod);
5396 fp->rx_sge_prod = ring_prod;
5398 /* Allocate BDs and initialize BD ring */
5399 fp->rx_comp_cons = 0;
5400 cqe_ring_prod = ring_prod = 0;
5401 for (i = 0; i < bp->rx_ring_size; i++) {
5402 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5403 BNX2X_ERR("was only able to allocate "
5404 "%d rx skbs on queue[%d]\n", i, j);
5405 fp->eth_q_stats.rx_skb_alloc_failed++;
5408 ring_prod = NEXT_RX_IDX(ring_prod);
5409 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5410 WARN_ON(ring_prod <= i);
5413 fp->rx_bd_prod = ring_prod;
5414 /* must not have more available CQEs than BDs */
5415 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5417 fp->rx_pkt = fp->rx_calls = 0;
5420 * this will generate an interrupt (to the TSTORM)
5421 * must only be done after chip is initialized
5423 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5428 REG_WR(bp, BAR_USTRORM_INTMEM +
5429 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5430 U64_LO(fp->rx_comp_mapping));
5431 REG_WR(bp, BAR_USTRORM_INTMEM +
5432 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5433 U64_HI(fp->rx_comp_mapping));
5437 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5441 for_each_queue(bp, j) {
5442 struct bnx2x_fastpath *fp = &bp->fp[j];
5444 for (i = 1; i <= NUM_TX_RINGS; i++) {
5445 struct eth_tx_next_bd *tx_next_bd =
5446 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5448 tx_next_bd->addr_hi =
5449 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5450 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5451 tx_next_bd->addr_lo =
5452 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5453 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5456 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5457 fp->tx_db.data.zero_fill1 = 0;
5458 fp->tx_db.data.prod = 0;
5460 fp->tx_pkt_prod = 0;
5461 fp->tx_pkt_cons = 0;
5464 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5469 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5471 int func = BP_FUNC(bp);
5473 spin_lock_init(&bp->spq_lock);
5475 bp->spq_left = MAX_SPQ_PENDING;
5476 bp->spq_prod_idx = 0;
5477 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5478 bp->spq_prod_bd = bp->spq;
5479 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5481 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5482 U64_LO(bp->spq_mapping));
5484 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5485 U64_HI(bp->spq_mapping));
5487 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5491 static void bnx2x_init_context(struct bnx2x *bp)
5496 for_each_queue(bp, i) {
5497 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5498 struct bnx2x_fastpath *fp = &bp->fp[i];
5499 u8 cl_id = fp->cl_id;
5501 context->ustorm_st_context.common.sb_index_numbers =
5502 BNX2X_RX_SB_INDEX_NUM;
5503 context->ustorm_st_context.common.clientId = cl_id;
5504 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5505 context->ustorm_st_context.common.flags =
5506 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5507 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5508 context->ustorm_st_context.common.statistics_counter_id =
5510 context->ustorm_st_context.common.mc_alignment_log_size =
5511 BNX2X_RX_ALIGN_SHIFT;
5512 context->ustorm_st_context.common.bd_buff_size =
5514 context->ustorm_st_context.common.bd_page_base_hi =
5515 U64_HI(fp->rx_desc_mapping);
5516 context->ustorm_st_context.common.bd_page_base_lo =
5517 U64_LO(fp->rx_desc_mapping);
5518 if (!fp->disable_tpa) {
5519 context->ustorm_st_context.common.flags |=
5520 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5521 context->ustorm_st_context.common.sge_buff_size =
5522 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5524 context->ustorm_st_context.common.sge_page_base_hi =
5525 U64_HI(fp->rx_sge_mapping);
5526 context->ustorm_st_context.common.sge_page_base_lo =
5527 U64_LO(fp->rx_sge_mapping);
5529 context->ustorm_st_context.common.max_sges_for_packet =
5530 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5531 context->ustorm_st_context.common.max_sges_for_packet =
5532 ((context->ustorm_st_context.common.
5533 max_sges_for_packet + PAGES_PER_SGE - 1) &
5534 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5537 context->ustorm_ag_context.cdu_usage =
5538 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5539 CDU_REGION_NUMBER_UCM_AG,
5540 ETH_CONNECTION_TYPE);
5542 context->xstorm_ag_context.cdu_reserved =
5543 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5544 CDU_REGION_NUMBER_XCM_AG,
5545 ETH_CONNECTION_TYPE);
5549 for_each_queue(bp, i) {
5550 struct bnx2x_fastpath *fp = &bp->fp[i];
5551 struct eth_context *context =
5552 bnx2x_sp(bp, context[i].eth);
5554 context->cstorm_st_context.sb_index_number =
5555 C_SB_ETH_TX_CQ_INDEX;
5556 context->cstorm_st_context.status_block_id = fp->sb_id;
5558 context->xstorm_st_context.tx_bd_page_base_hi =
5559 U64_HI(fp->tx_desc_mapping);
5560 context->xstorm_st_context.tx_bd_page_base_lo =
5561 U64_LO(fp->tx_desc_mapping);
5562 context->xstorm_st_context.statistics_data = (fp->cl_id |
5563 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5567 static void bnx2x_init_ind_table(struct bnx2x *bp)
5569 int func = BP_FUNC(bp);
5572 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5576 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5577 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5578 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5579 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5580 bp->fp->cl_id + (i % bp->num_queues));
5583 static void bnx2x_set_client_config(struct bnx2x *bp)
5585 struct tstorm_eth_client_config tstorm_client = {0};
5586 int port = BP_PORT(bp);
5589 tstorm_client.mtu = bp->dev->mtu;
5590 tstorm_client.config_flags =
5591 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5592 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5594 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5595 tstorm_client.config_flags |=
5596 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5597 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5601 for_each_queue(bp, i) {
5602 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5604 REG_WR(bp, BAR_TSTRORM_INTMEM +
5605 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5606 ((u32 *)&tstorm_client)[0]);
5607 REG_WR(bp, BAR_TSTRORM_INTMEM +
5608 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5609 ((u32 *)&tstorm_client)[1]);
5612 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5613 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5616 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5618 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5619 int mode = bp->rx_mode;
5620 int mask = bp->rx_mode_cl_mask;
5621 int func = BP_FUNC(bp);
5622 int port = BP_PORT(bp);
5624 /* All but management unicast packets should pass to the host as well */
5626 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5627 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5628 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5629 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5631 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5634 case BNX2X_RX_MODE_NONE: /* no Rx */
5635 tstorm_mac_filter.ucast_drop_all = mask;
5636 tstorm_mac_filter.mcast_drop_all = mask;
5637 tstorm_mac_filter.bcast_drop_all = mask;
5640 case BNX2X_RX_MODE_NORMAL:
5641 tstorm_mac_filter.bcast_accept_all = mask;
5644 case BNX2X_RX_MODE_ALLMULTI:
5645 tstorm_mac_filter.mcast_accept_all = mask;
5646 tstorm_mac_filter.bcast_accept_all = mask;
5649 case BNX2X_RX_MODE_PROMISC:
5650 tstorm_mac_filter.ucast_accept_all = mask;
5651 tstorm_mac_filter.mcast_accept_all = mask;
5652 tstorm_mac_filter.bcast_accept_all = mask;
5653 /* pass management unicast packets as well */
5654 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5658 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5663 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5666 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5667 REG_WR(bp, BAR_TSTRORM_INTMEM +
5668 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5669 ((u32 *)&tstorm_mac_filter)[i]);
5671 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5672 ((u32 *)&tstorm_mac_filter)[i]); */
5675 if (mode != BNX2X_RX_MODE_NONE)
5676 bnx2x_set_client_config(bp);
5679 static void bnx2x_init_internal_common(struct bnx2x *bp)
5683 /* Zero this manually as its initialization is
5684 currently missing in the initTool */
5685 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5686 REG_WR(bp, BAR_USTRORM_INTMEM +
5687 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5690 static void bnx2x_init_internal_port(struct bnx2x *bp)
5692 int port = BP_PORT(bp);
5695 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5697 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5698 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5699 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5702 static void bnx2x_init_internal_func(struct bnx2x *bp)
5704 struct tstorm_eth_function_common_config tstorm_config = {0};
5705 struct stats_indication_flags stats_flags = {0};
5706 int port = BP_PORT(bp);
5707 int func = BP_FUNC(bp);
5713 tstorm_config.config_flags = MULTI_FLAGS(bp);
5714 tstorm_config.rss_result_mask = MULTI_MASK;
5717 /* Enable TPA if needed */
5718 if (bp->flags & TPA_ENABLE_FLAG)
5719 tstorm_config.config_flags |=
5720 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5723 tstorm_config.config_flags |=
5724 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5726 tstorm_config.leading_client_id = BP_L_ID(bp);
5728 REG_WR(bp, BAR_TSTRORM_INTMEM +
5729 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5730 (*(u32 *)&tstorm_config));
5732 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5733 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5734 bnx2x_set_storm_rx_mode(bp);
5736 for_each_queue(bp, i) {
5737 u8 cl_id = bp->fp[i].cl_id;
5739 /* reset xstorm per client statistics */
5740 offset = BAR_XSTRORM_INTMEM +
5741 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5743 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5744 REG_WR(bp, offset + j*4, 0);
5746 /* reset tstorm per client statistics */
5747 offset = BAR_TSTRORM_INTMEM +
5748 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5750 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5751 REG_WR(bp, offset + j*4, 0);
5753 /* reset ustorm per client statistics */
5754 offset = BAR_USTRORM_INTMEM +
5755 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5757 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5758 REG_WR(bp, offset + j*4, 0);
5761 /* Init statistics related context */
5762 stats_flags.collect_eth = 1;
5764 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5765 ((u32 *)&stats_flags)[0]);
5766 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5767 ((u32 *)&stats_flags)[1]);
5769 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5770 ((u32 *)&stats_flags)[0]);
5771 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5772 ((u32 *)&stats_flags)[1]);
5774 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5775 ((u32 *)&stats_flags)[0]);
5776 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5777 ((u32 *)&stats_flags)[1]);
5779 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5780 ((u32 *)&stats_flags)[0]);
5781 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5782 ((u32 *)&stats_flags)[1]);
5784 REG_WR(bp, BAR_XSTRORM_INTMEM +
5785 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5786 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5787 REG_WR(bp, BAR_XSTRORM_INTMEM +
5788 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5789 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5791 REG_WR(bp, BAR_TSTRORM_INTMEM +
5792 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5793 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5794 REG_WR(bp, BAR_TSTRORM_INTMEM +
5795 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5796 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5798 REG_WR(bp, BAR_USTRORM_INTMEM +
5799 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5800 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5801 REG_WR(bp, BAR_USTRORM_INTMEM +
5802 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5803 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5805 if (CHIP_IS_E1H(bp)) {
5806 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5808 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5810 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5812 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5815 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5819 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5821 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5822 SGE_PAGE_SIZE * PAGES_PER_SGE),
5824 for_each_queue(bp, i) {
5825 struct bnx2x_fastpath *fp = &bp->fp[i];
5827 REG_WR(bp, BAR_USTRORM_INTMEM +
5828 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5829 U64_LO(fp->rx_comp_mapping));
5830 REG_WR(bp, BAR_USTRORM_INTMEM +
5831 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5832 U64_HI(fp->rx_comp_mapping));
5835 REG_WR(bp, BAR_USTRORM_INTMEM +
5836 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5837 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5838 REG_WR(bp, BAR_USTRORM_INTMEM +
5839 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5840 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5842 REG_WR16(bp, BAR_USTRORM_INTMEM +
5843 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5847 /* dropless flow control */
5848 if (CHIP_IS_E1H(bp)) {
5849 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5851 rx_pause.bd_thr_low = 250;
5852 rx_pause.cqe_thr_low = 250;
5854 rx_pause.sge_thr_low = 0;
5855 rx_pause.bd_thr_high = 350;
5856 rx_pause.cqe_thr_high = 350;
5857 rx_pause.sge_thr_high = 0;
5859 for_each_queue(bp, i) {
5860 struct bnx2x_fastpath *fp = &bp->fp[i];
5862 if (!fp->disable_tpa) {
5863 rx_pause.sge_thr_low = 150;
5864 rx_pause.sge_thr_high = 250;
5868 offset = BAR_USTRORM_INTMEM +
5869 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5872 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5874 REG_WR(bp, offset + j*4,
5875 ((u32 *)&rx_pause)[j]);
5879 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5881 /* Init rate shaping and fairness contexts */
5885 /* During init there is no active link
5886 Until link is up, set link rate to 10Gbps */
5887 bp->link_vars.line_speed = SPEED_10000;
5888 bnx2x_init_port_minmax(bp);
5892 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5893 bnx2x_calc_vn_weight_sum(bp);
5895 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5896 bnx2x_init_vn_minmax(bp, 2*vn + port);
5898 /* Enable rate shaping and fairness */
5899 bp->cmng.flags.cmng_enables |=
5900 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5903 /* rate shaping and fairness are disabled */
5905 "single function mode minmax will be disabled\n");
5909 /* Store it to internal memory */
5911 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5912 REG_WR(bp, BAR_XSTRORM_INTMEM +
5913 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5914 ((u32 *)(&bp->cmng))[i]);
5917 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5919 switch (load_code) {
5920 case FW_MSG_CODE_DRV_LOAD_COMMON:
5921 bnx2x_init_internal_common(bp);
5924 case FW_MSG_CODE_DRV_LOAD_PORT:
5925 bnx2x_init_internal_port(bp);
5928 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5929 bnx2x_init_internal_func(bp);
5933 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5938 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5942 for_each_queue(bp, i) {
5943 struct bnx2x_fastpath *fp = &bp->fp[i];
5946 fp->state = BNX2X_FP_STATE_CLOSED;
5948 fp->cl_id = BP_L_ID(bp) + i;
5950 fp->sb_id = fp->cl_id + 1;
5952 fp->sb_id = fp->cl_id;
5955 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5956 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5957 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5959 bnx2x_update_fpsb_idx(fp);
5962 /* ensure status block indices were read */
5966 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5968 bnx2x_update_dsb_idx(bp);
5969 bnx2x_update_coalesce(bp);
5970 bnx2x_init_rx_rings(bp);
5971 bnx2x_init_tx_ring(bp);
5972 bnx2x_init_sp_ring(bp);
5973 bnx2x_init_context(bp);
5974 bnx2x_init_internal(bp, load_code);
5975 bnx2x_init_ind_table(bp);
5976 bnx2x_stats_init(bp);
5978 /* At this point, we are ready for interrupts */
5979 atomic_set(&bp->intr_sem, 0);
5981 /* flush all before enabling interrupts */
5985 bnx2x_int_enable(bp);
5987 /* Check for SPIO5 */
5988 bnx2x_attn_int_deasserted0(bp,
5989 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5990 AEU_INPUTS_ATTN_BITS_SPIO5);
5993 /* end of nic init */
5996 * gzip service functions
5999 static int bnx2x_gunzip_init(struct bnx2x *bp)
6001 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6002 &bp->gunzip_mapping, GFP_KERNEL);
6003 if (bp->gunzip_buf == NULL)
6006 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6007 if (bp->strm == NULL)
6010 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6012 if (bp->strm->workspace == NULL)
6022 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6023 bp->gunzip_mapping);
6024 bp->gunzip_buf = NULL;
6027 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
6031 static void bnx2x_gunzip_end(struct bnx2x *bp)
6033 kfree(bp->strm->workspace);
6038 if (bp->gunzip_buf) {
6039 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6040 bp->gunzip_mapping);
6041 bp->gunzip_buf = NULL;
6045 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6049 /* check gzip header */
6050 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6051 BNX2X_ERR("Bad gzip header\n");
6059 if (zbuf[3] & FNAME)
6060 while ((zbuf[n++] != 0) && (n < len));
6062 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6063 bp->strm->avail_in = len - n;
6064 bp->strm->next_out = bp->gunzip_buf;
6065 bp->strm->avail_out = FW_BUF_SIZE;
6067 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6071 rc = zlib_inflate(bp->strm, Z_FINISH);
6072 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6073 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6076 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6077 if (bp->gunzip_outlen & 0x3)
6078 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6080 bp->gunzip_outlen >>= 2;
6082 zlib_inflateEnd(bp->strm);
6084 if (rc == Z_STREAM_END)
6090 /* nic load/unload */
6093 * General service functions
6096 /* send a NIG loopback debug packet */
6097 static void bnx2x_lb_pckt(struct bnx2x *bp)
6101 /* Ethernet source and destination addresses */
6102 wb_write[0] = 0x55555555;
6103 wb_write[1] = 0x55555555;
6104 wb_write[2] = 0x20; /* SOP */
6105 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6107 /* NON-IP protocol */
6108 wb_write[0] = 0x09000000;
6109 wb_write[1] = 0x55555555;
6110 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
6111 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6114 /* some of the internal memories
6115 * are not directly readable from the driver
6116 * to test them we send debug packets
6118 static int bnx2x_int_mem_test(struct bnx2x *bp)
6124 if (CHIP_REV_IS_FPGA(bp))
6126 else if (CHIP_REV_IS_EMUL(bp))
6131 DP(NETIF_MSG_HW, "start part1\n");
6133 /* Disable inputs of parser neighbor blocks */
6134 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6135 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6136 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6137 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6139 /* Write 0 to parser credits for CFC search request */
6140 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6142 /* send Ethernet packet */
6145 /* TODO do i reset NIG statistic? */
6146 /* Wait until NIG register shows 1 packet of size 0x10 */
6147 count = 1000 * factor;
6150 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6151 val = *bnx2x_sp(bp, wb_data[0]);
6159 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6163 /* Wait until PRS register shows 1 packet */
6164 count = 1000 * factor;
6166 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6174 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6178 /* Reset and init BRB, PRS */
6179 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6181 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6183 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6184 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6186 DP(NETIF_MSG_HW, "part2\n");
6188 /* Disable inputs of parser neighbor blocks */
6189 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6190 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6191 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6192 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6194 /* Write 0 to parser credits for CFC search request */
6195 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6197 /* send 10 Ethernet packets */
6198 for (i = 0; i < 10; i++)
6201 /* Wait until NIG register shows 10 + 1
6202 packets of size 11*0x10 = 0xb0 */
6203 count = 1000 * factor;
6206 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6207 val = *bnx2x_sp(bp, wb_data[0]);
6215 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6219 /* Wait until PRS register shows 2 packets */
6220 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6222 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6224 /* Write 1 to parser credits for CFC search request */
6225 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6227 /* Wait until PRS register shows 3 packets */
6228 msleep(10 * factor);
6229 /* Wait until NIG register shows 1 packet of size 0x10 */
6230 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6232 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6234 /* clear NIG EOP FIFO */
6235 for (i = 0; i < 11; i++)
6236 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6237 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6239 BNX2X_ERR("clear of NIG failed\n");
6243 /* Reset and init BRB, PRS, NIG */
6244 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6246 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6248 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6249 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6252 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6255 /* Enable inputs of parser neighbor blocks */
6256 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6257 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6258 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6259 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6261 DP(NETIF_MSG_HW, "done\n");
6266 static void enable_blocks_attention(struct bnx2x *bp)
6268 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6269 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6270 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6271 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6272 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6273 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6274 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6275 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6276 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6277 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6278 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6279 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6280 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6281 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6282 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6283 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6284 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6285 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6286 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6287 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6288 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6289 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6290 if (CHIP_REV_IS_FPGA(bp))
6291 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6293 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6294 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6295 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6296 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6297 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6298 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6299 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6300 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6301 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6302 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6305 static const struct {
6308 } bnx2x_parity_mask[] = {
6309 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6310 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6311 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6312 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6313 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6314 {QM_REG_QM_PRTY_MASK, 0x0},
6315 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6316 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6317 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6318 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6319 {CDU_REG_CDU_PRTY_MASK, 0x0},
6320 {CFC_REG_CFC_PRTY_MASK, 0x0},
6321 {DBG_REG_DBG_PRTY_MASK, 0x0},
6322 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6323 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6324 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6325 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6326 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6327 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6328 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6329 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6330 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6331 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6332 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6333 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6334 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6335 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6336 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6339 static void enable_blocks_parity(struct bnx2x *bp)
6341 int i, mask_arr_len =
6342 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6344 for (i = 0; i < mask_arr_len; i++)
6345 REG_WR(bp, bnx2x_parity_mask[i].addr,
6346 bnx2x_parity_mask[i].mask);
6350 static void bnx2x_reset_common(struct bnx2x *bp)
6353 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6355 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6358 static void bnx2x_init_pxp(struct bnx2x *bp)
6361 int r_order, w_order;
6363 pci_read_config_word(bp->pdev,
6364 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6365 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6366 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6368 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6370 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6374 bnx2x_init_pxp_arb(bp, r_order, w_order);
6377 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6383 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6384 SHARED_HW_CFG_FAN_FAILURE_MASK;
6386 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6390 * The fan failure mechanism is usually related to the PHY type since
6391 * the power consumption of the board is affected by the PHY. Currently,
6392 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6394 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6395 for (port = PORT_0; port < PORT_MAX; port++) {
6397 SHMEM_RD(bp, dev_info.port_hw_config[port].
6398 external_phy_config) &
6399 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6402 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6404 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6406 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6409 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6411 if (is_required == 0)
6414 /* Fan failure is indicated by SPIO 5 */
6415 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6416 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6418 /* set to active low mode */
6419 val = REG_RD(bp, MISC_REG_SPIO_INT);
6420 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6421 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6422 REG_WR(bp, MISC_REG_SPIO_INT, val);
6424 /* enable interrupt to signal the IGU */
6425 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6426 val |= (1 << MISC_REGISTERS_SPIO_5);
6427 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6430 static int bnx2x_init_common(struct bnx2x *bp)
6437 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6439 bnx2x_reset_common(bp);
6440 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6441 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6443 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6444 if (CHIP_IS_E1H(bp))
6445 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6447 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6449 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6451 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6452 if (CHIP_IS_E1(bp)) {
6453 /* enable HW interrupt from PXP on USDM overflow
6454 bit 16 on INT_MASK_0 */
6455 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6458 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6462 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6463 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6464 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6465 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6466 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6467 /* make sure this value is 0 */
6468 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6470 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6471 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6472 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6473 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6474 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6477 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6479 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6480 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6481 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6484 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6485 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6487 /* let the HW do it's magic ... */
6489 /* finish PXP init */
6490 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6492 BNX2X_ERR("PXP2 CFG failed\n");
6495 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6497 BNX2X_ERR("PXP2 RD_INIT failed\n");
6501 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6502 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6504 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6506 /* clean the DMAE memory */
6508 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6510 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6511 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6512 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6513 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6515 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6516 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6517 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6518 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6520 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6525 for (i = 0; i < 64; i++) {
6526 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6527 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6529 if (CHIP_IS_E1H(bp)) {
6530 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6531 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6536 /* soft reset pulse */
6537 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6538 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6541 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6544 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6545 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6546 if (!CHIP_REV_IS_SLOW(bp)) {
6547 /* enable hw interrupt from doorbell Q */
6548 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6551 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6552 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6553 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6556 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6558 if (CHIP_IS_E1H(bp))
6559 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6561 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6562 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6563 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6564 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6566 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6567 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6568 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6569 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6571 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6572 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6573 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6574 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6577 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6579 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6582 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6583 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6584 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6586 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6587 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6588 REG_WR(bp, i, 0xc0cac01a);
6589 /* TODO: replace with something meaningful */
6591 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6593 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6594 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6595 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6596 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6597 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6598 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6599 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6600 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6601 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6602 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6604 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6606 if (sizeof(union cdu_context) != 1024)
6607 /* we currently assume that a context is 1024 bytes */
6608 pr_alert("please adjust the size of cdu_context(%ld)\n",
6609 (long)sizeof(union cdu_context));
6611 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6612 val = (4 << 24) + (0 << 12) + 1024;
6613 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6615 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6616 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6617 /* enable context validation interrupt from CFC */
6618 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6620 /* set the thresholds to prevent CFC/CDU race */
6621 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6623 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6624 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6626 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6627 /* Reset PCIE errors for debug */
6628 REG_WR(bp, 0x2814, 0xffffffff);
6629 REG_WR(bp, 0x3820, 0xffffffff);
6631 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6632 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6633 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6634 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6636 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6637 if (CHIP_IS_E1H(bp)) {
6638 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6639 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6642 if (CHIP_REV_IS_SLOW(bp))
6645 /* finish CFC init */
6646 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6648 BNX2X_ERR("CFC LL_INIT failed\n");
6651 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6653 BNX2X_ERR("CFC AC_INIT failed\n");
6656 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6658 BNX2X_ERR("CFC CAM_INIT failed\n");
6661 REG_WR(bp, CFC_REG_DEBUG0, 0);
6663 /* read NIG statistic
6664 to see if this is our first up since powerup */
6665 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6666 val = *bnx2x_sp(bp, wb_data[0]);
6668 /* do internal memory self test */
6669 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6670 BNX2X_ERR("internal mem self test failed\n");
6674 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6675 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6676 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6677 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6678 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6679 bp->port.need_hw_lock = 1;
6686 bnx2x_setup_fan_failure_detection(bp);
6688 /* clear PXP2 attentions */
6689 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6691 enable_blocks_attention(bp);
6692 if (CHIP_PARITY_SUPPORTED(bp))
6693 enable_blocks_parity(bp);
6695 if (!BP_NOMCP(bp)) {
6696 bnx2x_acquire_phy_lock(bp);
6697 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6698 bnx2x_release_phy_lock(bp);
6700 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6705 static int bnx2x_init_port(struct bnx2x *bp)
6707 int port = BP_PORT(bp);
6708 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6712 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6714 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6716 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6717 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6719 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6720 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6721 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6722 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6725 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6727 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6728 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6729 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6731 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6733 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6734 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6735 /* no pause for emulation and FPGA */
6740 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6741 else if (bp->dev->mtu > 4096) {
6742 if (bp->flags & ONE_PORT_FLAG)
6746 /* (24*1024 + val*4)/256 */
6747 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6750 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6751 high = low + 56; /* 14*1024/256 */
6753 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6754 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6757 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6759 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6760 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6761 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6762 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6764 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6765 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6766 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6767 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6769 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6770 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6772 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6774 /* configure PBF to work without PAUSE mtu 9000 */
6775 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6777 /* update threshold */
6778 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6779 /* update init credit */
6780 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6783 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6785 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6788 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6790 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6791 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6793 if (CHIP_IS_E1(bp)) {
6794 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6795 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6797 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6799 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6800 /* init aeu_mask_attn_func_0/1:
6801 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6802 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6803 * bits 4-7 are used for "per vn group attention" */
6804 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6805 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6807 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6808 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6809 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6810 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6811 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6813 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6815 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6817 if (CHIP_IS_E1H(bp)) {
6818 /* 0x2 disable e1hov, 0x1 enable */
6819 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6820 (IS_E1HMF(bp) ? 0x1 : 0x2));
6823 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6824 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6825 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6829 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6830 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6832 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6833 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6835 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6837 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6838 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6840 /* The GPIO should be swapped if the swap register is
6842 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6843 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6845 /* Select function upon port-swap configuration */
6847 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6848 aeu_gpio_mask = (swap_val && swap_override) ?
6849 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6850 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6852 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6853 aeu_gpio_mask = (swap_val && swap_override) ?
6854 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6855 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6857 val = REG_RD(bp, offset);
6858 /* add GPIO3 to group */
6859 val |= aeu_gpio_mask;
6860 REG_WR(bp, offset, val);
6864 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6865 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6866 /* add SPIO 5 to group 0 */
6868 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6869 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6870 val = REG_RD(bp, reg_addr);
6871 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6872 REG_WR(bp, reg_addr, val);
6880 bnx2x__link_reset(bp);
6885 #define ILT_PER_FUNC (768/2)
6886 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6887 /* the phys address is shifted right 12 bits and has an added
6888 1=valid bit added to the 53rd bit
6889 then since this is a wide register(TM)
6890 we split it into two 32 bit writes
6892 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6893 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6894 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6895 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6898 #define CNIC_ILT_LINES 127
6899 #define CNIC_CTX_PER_ILT 16
6901 #define CNIC_ILT_LINES 0
6904 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6908 if (CHIP_IS_E1H(bp))
6909 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6911 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6913 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6916 static int bnx2x_init_func(struct bnx2x *bp)
6918 int port = BP_PORT(bp);
6919 int func = BP_FUNC(bp);
6923 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6925 /* set MSI reconfigure capability */
6926 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6927 val = REG_RD(bp, addr);
6928 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6929 REG_WR(bp, addr, val);
6931 i = FUNC_ILT_BASE(func);
6933 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6934 if (CHIP_IS_E1H(bp)) {
6935 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6936 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6938 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6939 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6942 i += 1 + CNIC_ILT_LINES;
6943 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6945 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6947 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6948 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6952 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6954 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6956 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6957 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6961 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6963 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6965 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6966 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6969 /* tell the searcher where the T2 table is */
6970 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6972 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6973 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6975 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6976 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6977 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6979 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6982 if (CHIP_IS_E1H(bp)) {
6983 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6984 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6985 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6986 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6987 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6988 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6989 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6990 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6991 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6993 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6994 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6997 /* HC init per function */
6998 if (CHIP_IS_E1H(bp)) {
6999 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7001 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7002 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7004 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7006 /* Reset PCIE errors for debug */
7007 REG_WR(bp, 0x2114, 0xffffffff);
7008 REG_WR(bp, 0x2120, 0xffffffff);
7013 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7017 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7018 BP_FUNC(bp), load_code);
7021 mutex_init(&bp->dmae_mutex);
7022 rc = bnx2x_gunzip_init(bp);
7026 switch (load_code) {
7027 case FW_MSG_CODE_DRV_LOAD_COMMON:
7028 rc = bnx2x_init_common(bp);
7033 case FW_MSG_CODE_DRV_LOAD_PORT:
7035 rc = bnx2x_init_port(bp);
7040 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7042 rc = bnx2x_init_func(bp);
7048 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7052 if (!BP_NOMCP(bp)) {
7053 int func = BP_FUNC(bp);
7055 bp->fw_drv_pulse_wr_seq =
7056 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7057 DRV_PULSE_SEQ_MASK);
7058 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7061 /* this needs to be done before gunzip end */
7062 bnx2x_zero_def_sb(bp);
7063 for_each_queue(bp, i)
7064 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7066 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7070 bnx2x_gunzip_end(bp);
7075 static void bnx2x_free_mem(struct bnx2x *bp)
7078 #define BNX2X_PCI_FREE(x, y, size) \
7081 dma_free_coherent(&bp->pdev->dev, size, x, y); \
7087 #define BNX2X_FREE(x) \
7099 for_each_queue(bp, i) {
7102 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7103 bnx2x_fp(bp, i, status_blk_mapping),
7104 sizeof(struct host_status_block));
7107 for_each_queue(bp, i) {
7109 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7110 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7111 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7112 bnx2x_fp(bp, i, rx_desc_mapping),
7113 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7115 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7116 bnx2x_fp(bp, i, rx_comp_mapping),
7117 sizeof(struct eth_fast_path_rx_cqe) *
7121 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7122 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7123 bnx2x_fp(bp, i, rx_sge_mapping),
7124 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7127 for_each_queue(bp, i) {
7129 /* fastpath tx rings: tx_buf tx_desc */
7130 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7131 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7132 bnx2x_fp(bp, i, tx_desc_mapping),
7133 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7135 /* end of fastpath */
7137 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7138 sizeof(struct host_def_status_block));
7140 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7141 sizeof(struct bnx2x_slowpath));
7144 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7145 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7146 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7147 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7148 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7149 sizeof(struct host_status_block));
7151 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7153 #undef BNX2X_PCI_FREE
7157 static int bnx2x_alloc_mem(struct bnx2x *bp)
7160 #define BNX2X_PCI_ALLOC(x, y, size) \
7162 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7164 goto alloc_mem_err; \
7165 memset(x, 0, size); \
7168 #define BNX2X_ALLOC(x, size) \
7170 x = vmalloc(size); \
7172 goto alloc_mem_err; \
7173 memset(x, 0, size); \
7180 for_each_queue(bp, i) {
7181 bnx2x_fp(bp, i, bp) = bp;
7184 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7185 &bnx2x_fp(bp, i, status_blk_mapping),
7186 sizeof(struct host_status_block));
7189 for_each_queue(bp, i) {
7191 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7192 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7193 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7194 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7195 &bnx2x_fp(bp, i, rx_desc_mapping),
7196 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7198 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7199 &bnx2x_fp(bp, i, rx_comp_mapping),
7200 sizeof(struct eth_fast_path_rx_cqe) *
7204 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7205 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7206 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7207 &bnx2x_fp(bp, i, rx_sge_mapping),
7208 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7211 for_each_queue(bp, i) {
7213 /* fastpath tx rings: tx_buf tx_desc */
7214 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7215 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7216 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7217 &bnx2x_fp(bp, i, tx_desc_mapping),
7218 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7220 /* end of fastpath */
7222 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7223 sizeof(struct host_def_status_block));
7225 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7226 sizeof(struct bnx2x_slowpath));
7229 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7231 /* allocate searcher T2 table
7232 we allocate 1/4 of alloc num for T2
7233 (which is not entered into the ILT) */
7234 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7236 /* Initialize T2 (for 1024 connections) */
7237 for (i = 0; i < 16*1024; i += 64)
7238 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7240 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7241 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7243 /* QM queues (128*MAX_CONN) */
7244 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7246 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7247 sizeof(struct host_status_block));
7250 /* Slow path ring */
7251 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7259 #undef BNX2X_PCI_ALLOC
7263 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7267 for_each_queue(bp, i) {
7268 struct bnx2x_fastpath *fp = &bp->fp[i];
7270 u16 bd_cons = fp->tx_bd_cons;
7271 u16 sw_prod = fp->tx_pkt_prod;
7272 u16 sw_cons = fp->tx_pkt_cons;
7274 while (sw_cons != sw_prod) {
7275 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7281 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7285 for_each_queue(bp, j) {
7286 struct bnx2x_fastpath *fp = &bp->fp[j];
7288 for (i = 0; i < NUM_RX_BD; i++) {
7289 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7290 struct sk_buff *skb = rx_buf->skb;
7295 dma_unmap_single(&bp->pdev->dev,
7296 dma_unmap_addr(rx_buf, mapping),
7297 bp->rx_buf_size, DMA_FROM_DEVICE);
7302 if (!fp->disable_tpa)
7303 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7304 ETH_MAX_AGGREGATION_QUEUES_E1 :
7305 ETH_MAX_AGGREGATION_QUEUES_E1H);
7309 static void bnx2x_free_skbs(struct bnx2x *bp)
7311 bnx2x_free_tx_skbs(bp);
7312 bnx2x_free_rx_skbs(bp);
7315 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7319 free_irq(bp->msix_table[0].vector, bp->dev);
7320 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7321 bp->msix_table[0].vector);
7326 for_each_queue(bp, i) {
7327 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
7328 "state %x\n", i, bp->msix_table[i + offset].vector,
7329 bnx2x_fp(bp, i, state));
7331 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7335 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7337 if (bp->flags & USING_MSIX_FLAG) {
7339 bnx2x_free_msix_irqs(bp);
7340 pci_disable_msix(bp->pdev);
7341 bp->flags &= ~USING_MSIX_FLAG;
7343 } else if (bp->flags & USING_MSI_FLAG) {
7345 free_irq(bp->pdev->irq, bp->dev);
7346 pci_disable_msi(bp->pdev);
7347 bp->flags &= ~USING_MSI_FLAG;
7349 } else if (!disable_only)
7350 free_irq(bp->pdev->irq, bp->dev);
7353 static int bnx2x_enable_msix(struct bnx2x *bp)
7355 int i, rc, offset = 1;
7358 bp->msix_table[0].entry = igu_vec;
7359 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7362 igu_vec = BP_L_ID(bp) + offset;
7363 bp->msix_table[1].entry = igu_vec;
7364 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7367 for_each_queue(bp, i) {
7368 igu_vec = BP_L_ID(bp) + offset + i;
7369 bp->msix_table[i + offset].entry = igu_vec;
7370 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7371 "(fastpath #%u)\n", i + offset, igu_vec, i);
7374 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7375 BNX2X_NUM_QUEUES(bp) + offset);
7377 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7381 bp->flags |= USING_MSIX_FLAG;
7386 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7388 int i, rc, offset = 1;
7390 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7391 bp->dev->name, bp->dev);
7393 BNX2X_ERR("request sp irq failed\n");
7400 for_each_queue(bp, i) {
7401 struct bnx2x_fastpath *fp = &bp->fp[i];
7402 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7405 rc = request_irq(bp->msix_table[i + offset].vector,
7406 bnx2x_msix_fp_int, 0, fp->name, fp);
7408 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7409 bnx2x_free_msix_irqs(bp);
7413 fp->state = BNX2X_FP_STATE_IRQ;
7416 i = BNX2X_NUM_QUEUES(bp);
7417 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7418 bp->msix_table[0].vector,
7419 0, bp->msix_table[offset].vector,
7420 i - 1, bp->msix_table[offset + i - 1].vector);
7425 static int bnx2x_enable_msi(struct bnx2x *bp)
7429 rc = pci_enable_msi(bp->pdev);
7431 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7434 bp->flags |= USING_MSI_FLAG;
7439 static int bnx2x_req_irq(struct bnx2x *bp)
7441 unsigned long flags;
7444 if (bp->flags & USING_MSI_FLAG)
7447 flags = IRQF_SHARED;
7449 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7450 bp->dev->name, bp->dev);
7452 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7457 static void bnx2x_napi_enable(struct bnx2x *bp)
7461 for_each_queue(bp, i)
7462 napi_enable(&bnx2x_fp(bp, i, napi));
7465 static void bnx2x_napi_disable(struct bnx2x *bp)
7469 for_each_queue(bp, i)
7470 napi_disable(&bnx2x_fp(bp, i, napi));
7473 static void bnx2x_netif_start(struct bnx2x *bp)
7477 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7478 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7481 if (netif_running(bp->dev)) {
7482 bnx2x_napi_enable(bp);
7483 bnx2x_int_enable(bp);
7484 if (bp->state == BNX2X_STATE_OPEN)
7485 netif_tx_wake_all_queues(bp->dev);
7490 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7492 bnx2x_int_disable_sync(bp, disable_hw);
7493 bnx2x_napi_disable(bp);
7494 netif_tx_disable(bp->dev);
7498 * Init service functions
7502 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7504 * @param bp driver descriptor
7505 * @param set set or clear an entry (1 or 0)
7506 * @param mac pointer to a buffer containing a MAC
7507 * @param cl_bit_vec bit vector of clients to register a MAC for
7508 * @param cam_offset offset in a CAM to use
7509 * @param with_bcast set broadcast MAC as well
7511 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7512 u32 cl_bit_vec, u8 cam_offset,
7515 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7516 int port = BP_PORT(bp);
7519 * unicasts 0-31:port0 32-63:port1
7520 * multicast 64-127:port0 128-191:port1
7522 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7523 config->hdr.offset = cam_offset;
7524 config->hdr.client_id = 0xff;
7525 config->hdr.reserved1 = 0;
7528 config->config_table[0].cam_entry.msb_mac_addr =
7529 swab16(*(u16 *)&mac[0]);
7530 config->config_table[0].cam_entry.middle_mac_addr =
7531 swab16(*(u16 *)&mac[2]);
7532 config->config_table[0].cam_entry.lsb_mac_addr =
7533 swab16(*(u16 *)&mac[4]);
7534 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7536 config->config_table[0].target_table_entry.flags = 0;
7538 CAM_INVALIDATE(config->config_table[0]);
7539 config->config_table[0].target_table_entry.clients_bit_vector =
7540 cpu_to_le32(cl_bit_vec);
7541 config->config_table[0].target_table_entry.vlan_id = 0;
7543 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7544 (set ? "setting" : "clearing"),
7545 config->config_table[0].cam_entry.msb_mac_addr,
7546 config->config_table[0].cam_entry.middle_mac_addr,
7547 config->config_table[0].cam_entry.lsb_mac_addr);
7551 config->config_table[1].cam_entry.msb_mac_addr =
7552 cpu_to_le16(0xffff);
7553 config->config_table[1].cam_entry.middle_mac_addr =
7554 cpu_to_le16(0xffff);
7555 config->config_table[1].cam_entry.lsb_mac_addr =
7556 cpu_to_le16(0xffff);
7557 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7559 config->config_table[1].target_table_entry.flags =
7560 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7562 CAM_INVALIDATE(config->config_table[1]);
7563 config->config_table[1].target_table_entry.clients_bit_vector =
7564 cpu_to_le32(cl_bit_vec);
7565 config->config_table[1].target_table_entry.vlan_id = 0;
7568 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7569 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7570 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7574 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7576 * @param bp driver descriptor
7577 * @param set set or clear an entry (1 or 0)
7578 * @param mac pointer to a buffer containing a MAC
7579 * @param cl_bit_vec bit vector of clients to register a MAC for
7580 * @param cam_offset offset in a CAM to use
7582 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7583 u32 cl_bit_vec, u8 cam_offset)
7585 struct mac_configuration_cmd_e1h *config =
7586 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7588 config->hdr.length = 1;
7589 config->hdr.offset = cam_offset;
7590 config->hdr.client_id = 0xff;
7591 config->hdr.reserved1 = 0;
7594 config->config_table[0].msb_mac_addr =
7595 swab16(*(u16 *)&mac[0]);
7596 config->config_table[0].middle_mac_addr =
7597 swab16(*(u16 *)&mac[2]);
7598 config->config_table[0].lsb_mac_addr =
7599 swab16(*(u16 *)&mac[4]);
7600 config->config_table[0].clients_bit_vector =
7601 cpu_to_le32(cl_bit_vec);
7602 config->config_table[0].vlan_id = 0;
7603 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7605 config->config_table[0].flags = BP_PORT(bp);
7607 config->config_table[0].flags =
7608 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7610 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7611 (set ? "setting" : "clearing"),
7612 config->config_table[0].msb_mac_addr,
7613 config->config_table[0].middle_mac_addr,
7614 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7616 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7617 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7618 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7621 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7622 int *state_p, int poll)
7624 /* can take a while if any port is running */
7627 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7628 poll ? "polling" : "waiting", state, idx);
7633 bnx2x_rx_int(bp->fp, 10);
7634 /* if index is different from 0
7635 * the reply for some commands will
7636 * be on the non default queue
7639 bnx2x_rx_int(&bp->fp[idx], 10);
7642 mb(); /* state is changed by bnx2x_sp_event() */
7643 if (*state_p == state) {
7644 #ifdef BNX2X_STOP_ON_ERROR
7645 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7657 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7658 poll ? "polling" : "waiting", state, idx);
7659 #ifdef BNX2X_STOP_ON_ERROR
7666 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7668 bp->set_mac_pending++;
7671 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7672 (1 << bp->fp->cl_id), BP_FUNC(bp));
7674 /* Wait for a completion */
7675 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7678 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7680 bp->set_mac_pending++;
7683 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7684 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7687 /* Wait for a completion */
7688 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7693 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7694 * MAC(s). This function will wait until the ramdord completion
7697 * @param bp driver handle
7698 * @param set set or clear the CAM entry
7700 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7702 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7704 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7706 bp->set_mac_pending++;
7709 /* Send a SET_MAC ramrod */
7711 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7712 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7715 /* CAM allocation for E1H
7716 * unicasts: by func number
7717 * multicast: 20+FUNC*20, 20 each
7719 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7720 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7722 /* Wait for a completion when setting */
7723 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7729 static int bnx2x_setup_leading(struct bnx2x *bp)
7733 /* reset IGU state */
7734 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7737 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7739 /* Wait for completion */
7740 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7745 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7747 struct bnx2x_fastpath *fp = &bp->fp[index];
7749 /* reset IGU state */
7750 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7753 fp->state = BNX2X_FP_STATE_OPENING;
7754 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7757 /* Wait for completion */
7758 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7762 static int bnx2x_poll(struct napi_struct *napi, int budget);
7764 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7767 switch (bp->multi_mode) {
7768 case ETH_RSS_MODE_DISABLED:
7772 case ETH_RSS_MODE_REGULAR:
7774 bp->num_queues = min_t(u32, num_queues,
7775 BNX2X_MAX_QUEUES(bp));
7777 bp->num_queues = min_t(u32, num_online_cpus(),
7778 BNX2X_MAX_QUEUES(bp));
7788 static int bnx2x_set_num_queues(struct bnx2x *bp)
7796 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7801 /* Set number of queues according to bp->multi_mode value */
7802 bnx2x_set_num_queues_msix(bp);
7804 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7807 /* if we can't use MSI-X we only need one fp,
7808 * so try to enable MSI-X with the requested number of fp's
7809 * and fallback to MSI or legacy INTx with one fp
7811 rc = bnx2x_enable_msix(bp);
7813 /* failed to enable MSI-X */
7817 bp->dev->real_num_tx_queues = bp->num_queues;
7822 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7823 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7826 /* must be called with rtnl_lock */
7827 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7832 #ifdef BNX2X_STOP_ON_ERROR
7833 if (unlikely(bp->panic))
7837 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7839 rc = bnx2x_set_num_queues(bp);
7841 if (bnx2x_alloc_mem(bp)) {
7842 bnx2x_free_irq(bp, true);
7846 for_each_queue(bp, i)
7847 bnx2x_fp(bp, i, disable_tpa) =
7848 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7850 for_each_queue(bp, i)
7851 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7854 bnx2x_napi_enable(bp);
7856 if (bp->flags & USING_MSIX_FLAG) {
7857 rc = bnx2x_req_msix_irqs(bp);
7859 bnx2x_free_irq(bp, true);
7863 /* Fall to INTx if failed to enable MSI-X due to lack of
7864 memory (in bnx2x_set_num_queues()) */
7865 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7866 bnx2x_enable_msi(bp);
7868 rc = bnx2x_req_irq(bp);
7870 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7871 bnx2x_free_irq(bp, true);
7874 if (bp->flags & USING_MSI_FLAG) {
7875 bp->dev->irq = bp->pdev->irq;
7876 netdev_info(bp->dev, "using MSI IRQ %d\n",
7881 /* Send LOAD_REQUEST command to MCP
7882 Returns the type of LOAD command:
7883 if it is the first port to be initialized
7884 common blocks should be initialized, otherwise - not
7886 if (!BP_NOMCP(bp)) {
7887 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7889 BNX2X_ERR("MCP response failure, aborting\n");
7893 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7894 rc = -EBUSY; /* other port in diagnostic mode */
7899 int port = BP_PORT(bp);
7901 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7902 load_count[0], load_count[1], load_count[2]);
7904 load_count[1 + port]++;
7905 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7906 load_count[0], load_count[1], load_count[2]);
7907 if (load_count[0] == 1)
7908 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7909 else if (load_count[1 + port] == 1)
7910 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7912 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7915 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7916 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7920 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7923 rc = bnx2x_init_hw(bp, load_code);
7925 BNX2X_ERR("HW init failed, aborting\n");
7926 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7927 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7928 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7932 /* Setup NIC internals and enable interrupts */
7933 bnx2x_nic_init(bp, load_code);
7935 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7936 (bp->common.shmem2_base))
7937 SHMEM2_WR(bp, dcc_support,
7938 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7939 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7941 /* Send LOAD_DONE command to MCP */
7942 if (!BP_NOMCP(bp)) {
7943 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7945 BNX2X_ERR("MCP response failure, aborting\n");
7951 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7953 rc = bnx2x_setup_leading(bp);
7955 BNX2X_ERR("Setup leading failed!\n");
7956 #ifndef BNX2X_STOP_ON_ERROR
7964 if (CHIP_IS_E1H(bp))
7965 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7966 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7967 bp->flags |= MF_FUNC_DIS;
7970 if (bp->state == BNX2X_STATE_OPEN) {
7972 /* Enable Timer scan */
7973 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7975 for_each_nondefault_queue(bp, i) {
7976 rc = bnx2x_setup_multi(bp, i);
7986 bnx2x_set_eth_mac_addr_e1(bp, 1);
7988 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7990 /* Set iSCSI L2 MAC */
7991 mutex_lock(&bp->cnic_mutex);
7992 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7993 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7994 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7995 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7998 mutex_unlock(&bp->cnic_mutex);
8003 bnx2x_initial_phy_init(bp, load_mode);
8005 /* Start fast path */
8006 switch (load_mode) {
8008 if (bp->state == BNX2X_STATE_OPEN) {
8009 /* Tx queue should be only reenabled */
8010 netif_tx_wake_all_queues(bp->dev);
8012 /* Initialize the receive filter. */
8013 bnx2x_set_rx_mode(bp->dev);
8017 netif_tx_start_all_queues(bp->dev);
8018 if (bp->state != BNX2X_STATE_OPEN)
8019 netif_tx_disable(bp->dev);
8020 /* Initialize the receive filter. */
8021 bnx2x_set_rx_mode(bp->dev);
8025 /* Initialize the receive filter. */
8026 bnx2x_set_rx_mode(bp->dev);
8027 bp->state = BNX2X_STATE_DIAG;
8035 bnx2x__link_status_update(bp);
8037 /* start the timer */
8038 mod_timer(&bp->timer, jiffies + bp->current_interval);
8041 bnx2x_setup_cnic_irq_info(bp);
8042 if (bp->state == BNX2X_STATE_OPEN)
8043 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8045 bnx2x_inc_load_cnt(bp);
8051 /* Disable Timer scan */
8052 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8055 bnx2x_int_disable_sync(bp, 1);
8056 if (!BP_NOMCP(bp)) {
8057 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8058 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8061 /* Free SKBs, SGEs, TPA pool and driver internals */
8062 bnx2x_free_skbs(bp);
8063 for_each_queue(bp, i)
8064 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8067 bnx2x_free_irq(bp, false);
8069 bnx2x_napi_disable(bp);
8070 for_each_queue(bp, i)
8071 netif_napi_del(&bnx2x_fp(bp, i, napi));
8077 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8079 struct bnx2x_fastpath *fp = &bp->fp[index];
8082 /* halt the connection */
8083 fp->state = BNX2X_FP_STATE_HALTING;
8084 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8086 /* Wait for completion */
8087 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8089 if (rc) /* timeout */
8092 /* delete cfc entry */
8093 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8095 /* Wait for completion */
8096 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8101 static int bnx2x_stop_leading(struct bnx2x *bp)
8103 __le16 dsb_sp_prod_idx;
8104 /* if the other port is handling traffic,
8105 this can take a lot of time */
8111 /* Send HALT ramrod */
8112 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8113 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8115 /* Wait for completion */
8116 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8117 &(bp->fp[0].state), 1);
8118 if (rc) /* timeout */
8121 dsb_sp_prod_idx = *bp->dsb_sp_prod;
8123 /* Send PORT_DELETE ramrod */
8124 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8126 /* Wait for completion to arrive on default status block
8127 we are going to reset the chip anyway
8128 so there is not much to do if this times out
8130 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8132 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8133 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8134 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8135 #ifdef BNX2X_STOP_ON_ERROR
8143 rmb(); /* Refresh the dsb_sp_prod */
8145 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8146 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8151 static void bnx2x_reset_func(struct bnx2x *bp)
8153 int port = BP_PORT(bp);
8154 int func = BP_FUNC(bp);
8158 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8162 /* Disable Timer scan */
8163 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8165 * Wait for at least 10ms and up to 2 second for the timers scan to
8168 for (i = 0; i < 200; i++) {
8170 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8175 base = FUNC_ILT_BASE(func);
8176 for (i = base; i < base + ILT_PER_FUNC; i++)
8177 bnx2x_ilt_wr(bp, i, 0);
8180 static void bnx2x_reset_port(struct bnx2x *bp)
8182 int port = BP_PORT(bp);
8185 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8187 /* Do not rcv packets to BRB */
8188 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8189 /* Do not direct rcv packets that are not for MCP to the BRB */
8190 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8191 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8194 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8197 /* Check for BRB port occupancy */
8198 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8200 DP(NETIF_MSG_IFDOWN,
8201 "BRB1 is not empty %d blocks are occupied\n", val);
8203 /* TODO: Close Doorbell port? */
8206 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8208 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8209 BP_FUNC(bp), reset_code);
8211 switch (reset_code) {
8212 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8213 bnx2x_reset_port(bp);
8214 bnx2x_reset_func(bp);
8215 bnx2x_reset_common(bp);
8218 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8219 bnx2x_reset_port(bp);
8220 bnx2x_reset_func(bp);
8223 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8224 bnx2x_reset_func(bp);
8228 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8233 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8235 int port = BP_PORT(bp);
8239 /* Wait until tx fastpath tasks complete */
8240 for_each_queue(bp, i) {
8241 struct bnx2x_fastpath *fp = &bp->fp[i];
8244 while (bnx2x_has_tx_work_unload(fp)) {
8248 BNX2X_ERR("timeout waiting for queue[%d]\n",
8250 #ifdef BNX2X_STOP_ON_ERROR
8261 /* Give HW time to discard old tx messages */
8264 if (CHIP_IS_E1(bp)) {
8265 struct mac_configuration_cmd *config =
8266 bnx2x_sp(bp, mcast_config);
8268 bnx2x_set_eth_mac_addr_e1(bp, 0);
8270 for (i = 0; i < config->hdr.length; i++)
8271 CAM_INVALIDATE(config->config_table[i]);
8273 config->hdr.length = i;
8274 if (CHIP_REV_IS_SLOW(bp))
8275 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8277 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8278 config->hdr.client_id = bp->fp->cl_id;
8279 config->hdr.reserved1 = 0;
8281 bp->set_mac_pending++;
8284 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8285 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8286 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8289 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8291 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8293 for (i = 0; i < MC_HASH_SIZE; i++)
8294 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8296 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8299 /* Clear iSCSI L2 MAC */
8300 mutex_lock(&bp->cnic_mutex);
8301 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8302 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8303 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8305 mutex_unlock(&bp->cnic_mutex);
8308 if (unload_mode == UNLOAD_NORMAL)
8309 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8311 else if (bp->flags & NO_WOL_FLAG)
8312 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8315 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8316 u8 *mac_addr = bp->dev->dev_addr;
8318 /* The mac address is written to entries 1-4 to
8319 preserve entry 0 which is used by the PMF */
8320 u8 entry = (BP_E1HVN(bp) + 1)*8;
8322 val = (mac_addr[0] << 8) | mac_addr[1];
8323 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8325 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8326 (mac_addr[4] << 8) | mac_addr[5];
8327 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8329 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8332 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8334 /* Close multi and leading connections
8335 Completions for ramrods are collected in a synchronous way */
8336 for_each_nondefault_queue(bp, i)
8337 if (bnx2x_stop_multi(bp, i))
8340 rc = bnx2x_stop_leading(bp);
8342 BNX2X_ERR("Stop leading failed!\n");
8343 #ifdef BNX2X_STOP_ON_ERROR
8352 reset_code = bnx2x_fw_command(bp, reset_code);
8354 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8355 load_count[0], load_count[1], load_count[2]);
8357 load_count[1 + port]--;
8358 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8359 load_count[0], load_count[1], load_count[2]);
8360 if (load_count[0] == 0)
8361 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8362 else if (load_count[1 + port] == 0)
8363 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8365 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8368 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8369 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8370 bnx2x__link_reset(bp);
8372 /* Reset the chip */
8373 bnx2x_reset_chip(bp, reset_code);
8375 /* Report UNLOAD_DONE to MCP */
8377 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8381 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8385 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8387 if (CHIP_IS_E1(bp)) {
8388 int port = BP_PORT(bp);
8389 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8390 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8392 val = REG_RD(bp, addr);
8394 REG_WR(bp, addr, val);
8395 } else if (CHIP_IS_E1H(bp)) {
8396 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8397 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8398 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8399 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8403 /* must be called with rtnl_lock */
8404 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8408 if (bp->state == BNX2X_STATE_CLOSED) {
8409 /* Interface has been removed - nothing to recover */
8410 bp->recovery_state = BNX2X_RECOVERY_DONE;
8412 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8419 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8421 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8423 /* Set "drop all" */
8424 bp->rx_mode = BNX2X_RX_MODE_NONE;
8425 bnx2x_set_storm_rx_mode(bp);
8427 /* Disable HW interrupts, NAPI and Tx */
8428 bnx2x_netif_stop(bp, 1);
8430 del_timer_sync(&bp->timer);
8431 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8432 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8433 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8436 bnx2x_free_irq(bp, false);
8438 /* Cleanup the chip if needed */
8439 if (unload_mode != UNLOAD_RECOVERY)
8440 bnx2x_chip_cleanup(bp, unload_mode);
8444 /* Free SKBs, SGEs, TPA pool and driver internals */
8445 bnx2x_free_skbs(bp);
8446 for_each_queue(bp, i)
8447 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8448 for_each_queue(bp, i)
8449 netif_napi_del(&bnx2x_fp(bp, i, napi));
8452 bp->state = BNX2X_STATE_CLOSED;
8454 netif_carrier_off(bp->dev);
8456 /* The last driver must disable a "close the gate" if there is no
8457 * parity attention or "process kill" pending.
8459 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8460 bnx2x_reset_is_done(bp))
8461 bnx2x_disable_close_the_gate(bp);
8463 /* Reset MCP mail box sequence if there is on going recovery */
8464 if (unload_mode == UNLOAD_RECOVERY)
8470 /* Close gates #2, #3 and #4: */
8471 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8475 /* Gates #2 and #4a are closed/opened for "not E1" only */
8476 if (!CHIP_IS_E1(bp)) {
8478 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8479 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8480 close ? (val | 0x1) : (val & (~(u32)1)));
8482 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8483 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8484 close ? (val | 0x1) : (val & (~(u32)1)));
8488 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8489 val = REG_RD(bp, addr);
8490 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8492 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8493 close ? "closing" : "opening");
8497 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8499 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8501 /* Do some magic... */
8502 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8503 *magic_val = val & SHARED_MF_CLP_MAGIC;
8504 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8507 /* Restore the value of the `magic' bit.
8509 * @param pdev Device handle.
8510 * @param magic_val Old value of the `magic' bit.
8512 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8514 /* Restore the `magic' bit value... */
8515 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8516 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8517 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8518 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8519 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8520 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8523 /* Prepares for MCP reset: takes care of CLP configurations.
8526 * @param magic_val Old value of 'magic' bit.
8528 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8531 u32 validity_offset;
8533 DP(NETIF_MSG_HW, "Starting\n");
8535 /* Set `magic' bit in order to save MF config */
8536 if (!CHIP_IS_E1(bp))
8537 bnx2x_clp_reset_prep(bp, magic_val);
8539 /* Get shmem offset */
8540 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8541 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8543 /* Clear validity map flags */
8545 REG_WR(bp, shmem + validity_offset, 0);
8548 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8549 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
8551 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8552 * depending on the HW type.
8556 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8558 /* special handling for emulation and FPGA,
8559 wait 10 times longer */
8560 if (CHIP_REV_IS_SLOW(bp))
8561 msleep(MCP_ONE_TIMEOUT*10);
8563 msleep(MCP_ONE_TIMEOUT);
8566 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8568 u32 shmem, cnt, validity_offset, val;
8573 /* Get shmem offset */
8574 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8576 BNX2X_ERR("Shmem 0 return failure\n");
8581 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8583 /* Wait for MCP to come up */
8584 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8585 /* TBD: its best to check validity map of last port.
8586 * currently checks on port 0.
8588 val = REG_RD(bp, shmem + validity_offset);
8589 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8590 shmem + validity_offset, val);
8592 /* check that shared memory is valid. */
8593 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8594 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8597 bnx2x_mcp_wait_one(bp);
8600 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8602 /* Check that shared memory is valid. This indicates that MCP is up. */
8603 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8604 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8605 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8611 /* Restore the `magic' bit value */
8612 if (!CHIP_IS_E1(bp))
8613 bnx2x_clp_reset_done(bp, magic_val);
8618 static void bnx2x_pxp_prep(struct bnx2x *bp)
8620 if (!CHIP_IS_E1(bp)) {
8621 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8622 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8623 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8629 * Reset the whole chip except for:
8631 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8634 * - MISC (including AEU)
8638 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8640 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8643 MISC_REGISTERS_RESET_REG_1_RST_HC |
8644 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8645 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8648 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8649 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8650 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8651 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8652 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8653 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8654 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8655 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8657 reset_mask1 = 0xffffffff;
8660 reset_mask2 = 0xffff;
8662 reset_mask2 = 0x1ffff;
8664 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8665 reset_mask1 & (~not_reset_mask1));
8666 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8667 reset_mask2 & (~not_reset_mask2));
8672 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8677 static int bnx2x_process_kill(struct bnx2x *bp)
8681 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8684 /* Empty the Tetris buffer, wait for 1s */
8686 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8687 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8688 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8689 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8690 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8691 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8692 ((port_is_idle_0 & 0x1) == 0x1) &&
8693 ((port_is_idle_1 & 0x1) == 0x1) &&
8694 (pgl_exp_rom2 == 0xffffffff))
8697 } while (cnt-- > 0);
8700 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8702 " outstanding read requests after 1s!\n");
8703 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8704 " port_is_idle_0=0x%08x,"
8705 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8706 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8713 /* Close gates #2, #3 and #4 */
8714 bnx2x_set_234_gates(bp, true);
8716 /* TBD: Indicate that "process kill" is in progress to MCP */
8718 /* Clear "unprepared" bit */
8719 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8722 /* Make sure all is written to the chip before the reset */
8725 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8726 * PSWHST, GRC and PSWRD Tetris buffer.
8730 /* Prepare to chip reset: */
8732 bnx2x_reset_mcp_prep(bp, &val);
8738 /* reset the chip */
8739 bnx2x_process_kill_chip_reset(bp);
8742 /* Recover after reset: */
8744 if (bnx2x_reset_mcp_comp(bp, val))
8750 /* Open the gates #2, #3 and #4 */
8751 bnx2x_set_234_gates(bp, false);
8753 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8754 * reset state, re-enable attentions. */
8759 static int bnx2x_leader_reset(struct bnx2x *bp)
8762 /* Try to recover after the failure */
8763 if (bnx2x_process_kill(bp)) {
8764 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8767 goto exit_leader_reset;
8770 /* Clear "reset is in progress" bit and update the driver state */
8771 bnx2x_set_reset_done(bp);
8772 bp->recovery_state = BNX2X_RECOVERY_DONE;
8776 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8781 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8783 /* Assumption: runs under rtnl lock. This together with the fact
8784 * that it's called only from bnx2x_reset_task() ensure that it
8785 * will never be called when netif_running(bp->dev) is false.
8787 static void bnx2x_parity_recover(struct bnx2x *bp)
8789 DP(NETIF_MSG_HW, "Handling parity\n");
8791 switch (bp->recovery_state) {
8792 case BNX2X_RECOVERY_INIT:
8793 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8794 /* Try to get a LEADER_LOCK HW lock */
8795 if (bnx2x_trylock_hw_lock(bp,
8796 HW_LOCK_RESOURCE_RESERVED_08))
8799 /* Stop the driver */
8800 /* If interface has been removed - break */
8801 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8804 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8805 /* Ensure "is_leader" and "recovery_state"
8806 * update values are seen on other CPUs
8811 case BNX2X_RECOVERY_WAIT:
8812 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8813 if (bp->is_leader) {
8814 u32 load_counter = bnx2x_get_load_cnt(bp);
8816 /* Wait until all other functions get
8819 schedule_delayed_work(&bp->reset_task,
8823 /* If all other functions got down -
8824 * try to bring the chip back to
8825 * normal. In any case it's an exit
8826 * point for a leader.
8828 if (bnx2x_leader_reset(bp) ||
8829 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8830 printk(KERN_ERR"%s: Recovery "
8831 "has failed. Power cycle is "
8832 "needed.\n", bp->dev->name);
8833 /* Disconnect this device */
8834 netif_device_detach(bp->dev);
8835 /* Block ifup for all function
8836 * of this ASIC until
8837 * "process kill" or power
8840 bnx2x_set_reset_in_progress(bp);
8841 /* Shut down the power */
8842 bnx2x_set_power_state(bp,
8849 } else { /* non-leader */
8850 if (!bnx2x_reset_is_done(bp)) {
8851 /* Try to get a LEADER_LOCK HW lock as
8852 * long as a former leader may have
8853 * been unloaded by the user or
8854 * released a leadership by another
8857 if (bnx2x_trylock_hw_lock(bp,
8858 HW_LOCK_RESOURCE_RESERVED_08)) {
8859 /* I'm a leader now! Restart a
8866 schedule_delayed_work(&bp->reset_task,
8870 } else { /* A leader has completed
8871 * the "process kill". It's an exit
8872 * point for a non-leader.
8874 bnx2x_nic_load(bp, LOAD_NORMAL);
8875 bp->recovery_state =
8876 BNX2X_RECOVERY_DONE;
8887 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8888 * scheduled on a general queue in order to prevent a dead lock.
8890 static void bnx2x_reset_task(struct work_struct *work)
8892 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8894 #ifdef BNX2X_STOP_ON_ERROR
8895 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8896 " so reset not done to allow debug dump,\n"
8897 KERN_ERR " you will need to reboot when done\n");
8903 if (!netif_running(bp->dev))
8904 goto reset_task_exit;
8906 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8907 bnx2x_parity_recover(bp);
8909 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8910 bnx2x_nic_load(bp, LOAD_NORMAL);
8917 /* end of nic load/unload */
8922 * Init service functions
8925 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8928 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8929 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8930 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8931 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8932 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8933 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8934 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8935 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8937 BNX2X_ERR("Unsupported function index: %d\n", func);
8942 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8944 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8946 /* Flush all outstanding writes */
8949 /* Pretend to be function 0 */
8951 /* Flush the GRC transaction (in the chip) */
8952 new_val = REG_RD(bp, reg);
8954 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8959 /* From now we are in the "like-E1" mode */
8960 bnx2x_int_disable(bp);
8962 /* Flush all outstanding writes */
8965 /* Restore the original funtion settings */
8966 REG_WR(bp, reg, orig_func);
8967 new_val = REG_RD(bp, reg);
8968 if (new_val != orig_func) {
8969 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8970 orig_func, new_val);
8975 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8977 if (CHIP_IS_E1H(bp))
8978 bnx2x_undi_int_disable_e1h(bp, func);
8980 bnx2x_int_disable(bp);
8983 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8987 /* Check if there is any driver already loaded */
8988 val = REG_RD(bp, MISC_REG_UNPREPARED);
8990 /* Check if it is the UNDI driver
8991 * UNDI driver initializes CID offset for normal bell to 0x7
8993 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8994 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8996 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8998 int func = BP_FUNC(bp);
9002 /* clear the UNDI indication */
9003 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9005 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9007 /* try unload UNDI on port 0 */
9010 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9011 DRV_MSG_SEQ_NUMBER_MASK);
9012 reset_code = bnx2x_fw_command(bp, reset_code);
9014 /* if UNDI is loaded on the other port */
9015 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9017 /* send "DONE" for previous unload */
9018 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9020 /* unload UNDI on port 1 */
9023 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9024 DRV_MSG_SEQ_NUMBER_MASK);
9025 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9027 bnx2x_fw_command(bp, reset_code);
9030 /* now it's safe to release the lock */
9031 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9033 bnx2x_undi_int_disable(bp, func);
9035 /* close input traffic and wait for it */
9036 /* Do not rcv packets to BRB */
9038 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9039 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9040 /* Do not direct rcv packets that are not for MCP to
9043 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9044 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9047 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9048 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9051 /* save NIG port swap info */
9052 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9053 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9056 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9059 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9061 /* take the NIG out of reset and restore swap values */
9063 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9064 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9065 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9066 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9068 /* send unload done to the MCP */
9069 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9071 /* restore our func and fw_seq */
9074 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9075 DRV_MSG_SEQ_NUMBER_MASK);
9078 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9082 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9084 u32 val, val2, val3, val4, id;
9087 /* Get the chip revision id and number. */
9088 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9089 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9090 id = ((val & 0xffff) << 16);
9091 val = REG_RD(bp, MISC_REG_CHIP_REV);
9092 id |= ((val & 0xf) << 12);
9093 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9094 id |= ((val & 0xff) << 4);
9095 val = REG_RD(bp, MISC_REG_BOND_ID);
9097 bp->common.chip_id = id;
9098 bp->link_params.chip_id = bp->common.chip_id;
9099 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9101 val = (REG_RD(bp, 0x2874) & 0x55);
9102 if ((bp->common.chip_id & 0x1) ||
9103 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9104 bp->flags |= ONE_PORT_FLAG;
9105 BNX2X_DEV_INFO("single port device\n");
9108 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9109 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9110 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9111 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9112 bp->common.flash_size, bp->common.flash_size);
9114 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9115 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9116 bp->link_params.shmem_base = bp->common.shmem_base;
9117 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9118 bp->common.shmem_base, bp->common.shmem2_base);
9120 if (!bp->common.shmem_base ||
9121 (bp->common.shmem_base < 0xA0000) ||
9122 (bp->common.shmem_base >= 0xC0000)) {
9123 BNX2X_DEV_INFO("MCP not active\n");
9124 bp->flags |= NO_MCP_FLAG;
9128 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9129 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9130 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9131 BNX2X_ERR("BAD MCP validity signature\n");
9133 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9134 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9136 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9137 SHARED_HW_CFG_LED_MODE_MASK) >>
9138 SHARED_HW_CFG_LED_MODE_SHIFT);
9140 bp->link_params.feature_config_flags = 0;
9141 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9142 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9143 bp->link_params.feature_config_flags |=
9144 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9146 bp->link_params.feature_config_flags &=
9147 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9149 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9150 bp->common.bc_ver = val;
9151 BNX2X_DEV_INFO("bc_ver %X\n", val);
9152 if (val < BNX2X_BC_VER) {
9153 /* for now only warn
9154 * later we might need to enforce this */
9155 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
9156 " please upgrade BC\n", BNX2X_BC_VER, val);
9158 bp->link_params.feature_config_flags |=
9159 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9160 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9162 if (BP_E1HVN(bp) == 0) {
9163 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9164 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9166 /* no WOL capability for E1HVN != 0 */
9167 bp->flags |= NO_WOL_FLAG;
9169 BNX2X_DEV_INFO("%sWoL capable\n",
9170 (bp->flags & NO_WOL_FLAG) ? "not " : "");
9172 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9173 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9174 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9175 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9177 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
9180 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9183 int port = BP_PORT(bp);
9186 switch (switch_cfg) {
9188 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9191 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9192 switch (ext_phy_type) {
9193 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9194 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9197 bp->port.supported |= (SUPPORTED_10baseT_Half |
9198 SUPPORTED_10baseT_Full |
9199 SUPPORTED_100baseT_Half |
9200 SUPPORTED_100baseT_Full |
9201 SUPPORTED_1000baseT_Full |
9202 SUPPORTED_2500baseX_Full |
9207 SUPPORTED_Asym_Pause);
9210 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9211 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9214 bp->port.supported |= (SUPPORTED_10baseT_Half |
9215 SUPPORTED_10baseT_Full |
9216 SUPPORTED_100baseT_Half |
9217 SUPPORTED_100baseT_Full |
9218 SUPPORTED_1000baseT_Full |
9223 SUPPORTED_Asym_Pause);
9227 BNX2X_ERR("NVRAM config error. "
9228 "BAD SerDes ext_phy_config 0x%x\n",
9229 bp->link_params.ext_phy_config);
9233 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9235 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9238 case SWITCH_CFG_10G:
9239 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9242 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9243 switch (ext_phy_type) {
9244 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9245 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9248 bp->port.supported |= (SUPPORTED_10baseT_Half |
9249 SUPPORTED_10baseT_Full |
9250 SUPPORTED_100baseT_Half |
9251 SUPPORTED_100baseT_Full |
9252 SUPPORTED_1000baseT_Full |
9253 SUPPORTED_2500baseX_Full |
9254 SUPPORTED_10000baseT_Full |
9259 SUPPORTED_Asym_Pause);
9262 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9263 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9266 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9267 SUPPORTED_1000baseT_Full |
9271 SUPPORTED_Asym_Pause);
9274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9275 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9278 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9279 SUPPORTED_2500baseX_Full |
9280 SUPPORTED_1000baseT_Full |
9284 SUPPORTED_Asym_Pause);
9287 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9288 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9291 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9294 SUPPORTED_Asym_Pause);
9297 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9298 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9301 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9302 SUPPORTED_1000baseT_Full |
9305 SUPPORTED_Asym_Pause);
9308 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9309 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9312 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9313 SUPPORTED_1000baseT_Full |
9317 SUPPORTED_Asym_Pause);
9320 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9321 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9324 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9325 SUPPORTED_1000baseT_Full |
9329 SUPPORTED_Asym_Pause);
9332 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9333 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9336 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9340 SUPPORTED_Asym_Pause);
9343 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9344 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9347 bp->port.supported |= (SUPPORTED_10baseT_Half |
9348 SUPPORTED_10baseT_Full |
9349 SUPPORTED_100baseT_Half |
9350 SUPPORTED_100baseT_Full |
9351 SUPPORTED_1000baseT_Full |
9352 SUPPORTED_10000baseT_Full |
9356 SUPPORTED_Asym_Pause);
9359 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9360 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9361 bp->link_params.ext_phy_config);
9365 BNX2X_ERR("NVRAM config error. "
9366 "BAD XGXS ext_phy_config 0x%x\n",
9367 bp->link_params.ext_phy_config);
9371 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9373 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9378 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9379 bp->port.link_config);
9382 bp->link_params.phy_addr = bp->port.phy_addr;
9384 /* mask what we support according to speed_cap_mask */
9385 if (!(bp->link_params.speed_cap_mask &
9386 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9387 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9389 if (!(bp->link_params.speed_cap_mask &
9390 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9391 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9393 if (!(bp->link_params.speed_cap_mask &
9394 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9395 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9397 if (!(bp->link_params.speed_cap_mask &
9398 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9399 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9401 if (!(bp->link_params.speed_cap_mask &
9402 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9403 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9404 SUPPORTED_1000baseT_Full);
9406 if (!(bp->link_params.speed_cap_mask &
9407 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9408 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9410 if (!(bp->link_params.speed_cap_mask &
9411 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9412 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9414 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9417 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9419 bp->link_params.req_duplex = DUPLEX_FULL;
9421 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9422 case PORT_FEATURE_LINK_SPEED_AUTO:
9423 if (bp->port.supported & SUPPORTED_Autoneg) {
9424 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9425 bp->port.advertising = bp->port.supported;
9428 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9430 if ((ext_phy_type ==
9431 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9433 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9434 /* force 10G, no AN */
9435 bp->link_params.req_line_speed = SPEED_10000;
9436 bp->port.advertising =
9437 (ADVERTISED_10000baseT_Full |
9441 BNX2X_ERR("NVRAM config error. "
9442 "Invalid link_config 0x%x"
9443 " Autoneg not supported\n",
9444 bp->port.link_config);
9449 case PORT_FEATURE_LINK_SPEED_10M_FULL:
9450 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9451 bp->link_params.req_line_speed = SPEED_10;
9452 bp->port.advertising = (ADVERTISED_10baseT_Full |
9455 BNX2X_ERR("NVRAM config error. "
9456 "Invalid link_config 0x%x"
9457 " speed_cap_mask 0x%x\n",
9458 bp->port.link_config,
9459 bp->link_params.speed_cap_mask);
9464 case PORT_FEATURE_LINK_SPEED_10M_HALF:
9465 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9466 bp->link_params.req_line_speed = SPEED_10;
9467 bp->link_params.req_duplex = DUPLEX_HALF;
9468 bp->port.advertising = (ADVERTISED_10baseT_Half |
9471 BNX2X_ERR("NVRAM config error. "
9472 "Invalid link_config 0x%x"
9473 " speed_cap_mask 0x%x\n",
9474 bp->port.link_config,
9475 bp->link_params.speed_cap_mask);
9480 case PORT_FEATURE_LINK_SPEED_100M_FULL:
9481 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9482 bp->link_params.req_line_speed = SPEED_100;
9483 bp->port.advertising = (ADVERTISED_100baseT_Full |
9486 BNX2X_ERR("NVRAM config error. "
9487 "Invalid link_config 0x%x"
9488 " speed_cap_mask 0x%x\n",
9489 bp->port.link_config,
9490 bp->link_params.speed_cap_mask);
9495 case PORT_FEATURE_LINK_SPEED_100M_HALF:
9496 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9497 bp->link_params.req_line_speed = SPEED_100;
9498 bp->link_params.req_duplex = DUPLEX_HALF;
9499 bp->port.advertising = (ADVERTISED_100baseT_Half |
9502 BNX2X_ERR("NVRAM config error. "
9503 "Invalid link_config 0x%x"
9504 " speed_cap_mask 0x%x\n",
9505 bp->port.link_config,
9506 bp->link_params.speed_cap_mask);
9511 case PORT_FEATURE_LINK_SPEED_1G:
9512 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9513 bp->link_params.req_line_speed = SPEED_1000;
9514 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9517 BNX2X_ERR("NVRAM config error. "
9518 "Invalid link_config 0x%x"
9519 " speed_cap_mask 0x%x\n",
9520 bp->port.link_config,
9521 bp->link_params.speed_cap_mask);
9526 case PORT_FEATURE_LINK_SPEED_2_5G:
9527 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9528 bp->link_params.req_line_speed = SPEED_2500;
9529 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9532 BNX2X_ERR("NVRAM config error. "
9533 "Invalid link_config 0x%x"
9534 " speed_cap_mask 0x%x\n",
9535 bp->port.link_config,
9536 bp->link_params.speed_cap_mask);
9541 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9542 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9543 case PORT_FEATURE_LINK_SPEED_10G_KR:
9544 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9545 bp->link_params.req_line_speed = SPEED_10000;
9546 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9549 BNX2X_ERR("NVRAM config error. "
9550 "Invalid link_config 0x%x"
9551 " speed_cap_mask 0x%x\n",
9552 bp->port.link_config,
9553 bp->link_params.speed_cap_mask);
9559 BNX2X_ERR("NVRAM config error. "
9560 "BAD link speed link_config 0x%x\n",
9561 bp->port.link_config);
9562 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9563 bp->port.advertising = bp->port.supported;
9567 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9568 PORT_FEATURE_FLOW_CONTROL_MASK);
9569 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9570 !(bp->port.supported & SUPPORTED_Autoneg))
9571 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9573 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
9574 " advertising 0x%x\n",
9575 bp->link_params.req_line_speed,
9576 bp->link_params.req_duplex,
9577 bp->link_params.req_flow_ctrl, bp->port.advertising);
9580 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9582 mac_hi = cpu_to_be16(mac_hi);
9583 mac_lo = cpu_to_be32(mac_lo);
9584 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9585 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9588 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9590 int port = BP_PORT(bp);
9596 bp->link_params.bp = bp;
9597 bp->link_params.port = port;
9599 bp->link_params.lane_config =
9600 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9601 bp->link_params.ext_phy_config =
9603 dev_info.port_hw_config[port].external_phy_config);
9604 /* BCM8727_NOC => BCM8727 no over current */
9605 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9606 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9607 bp->link_params.ext_phy_config &=
9608 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9609 bp->link_params.ext_phy_config |=
9610 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9611 bp->link_params.feature_config_flags |=
9612 FEATURE_CONFIG_BCM8727_NOC;
9615 bp->link_params.speed_cap_mask =
9617 dev_info.port_hw_config[port].speed_capability_mask);
9619 bp->port.link_config =
9620 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9622 /* Get the 4 lanes xgxs config rx and tx */
9623 for (i = 0; i < 2; i++) {
9625 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9626 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9627 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9630 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9631 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9632 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9635 /* If the device is capable of WoL, set the default state according
9638 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9639 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9640 (config & PORT_FEATURE_WOL_ENABLED));
9642 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9643 " speed_cap_mask 0x%08x link_config 0x%08x\n",
9644 bp->link_params.lane_config,
9645 bp->link_params.ext_phy_config,
9646 bp->link_params.speed_cap_mask, bp->port.link_config);
9648 bp->link_params.switch_cfg |= (bp->port.link_config &
9649 PORT_FEATURE_CONNECTED_SWITCH_MASK);
9650 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9652 bnx2x_link_settings_requested(bp);
9655 * If connected directly, work with the internal PHY, otherwise, work
9656 * with the external PHY
9658 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9659 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9660 bp->mdio.prtad = bp->link_params.phy_addr;
9662 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9663 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9665 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9667 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9668 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9669 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9670 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9671 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9674 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9675 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9676 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9680 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9682 int func = BP_FUNC(bp);
9686 bnx2x_get_common_hwinfo(bp);
9690 if (CHIP_IS_E1H(bp)) {
9692 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9694 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9695 FUNC_MF_CFG_E1HOV_TAG_MASK);
9696 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9698 BNX2X_DEV_INFO("%s function mode\n",
9699 IS_E1HMF(bp) ? "multi" : "single");
9702 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9704 FUNC_MF_CFG_E1HOV_TAG_MASK);
9705 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9707 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9709 func, bp->e1hov, bp->e1hov);
9711 BNX2X_ERR("!!! No valid E1HOV for func %d,"
9712 " aborting\n", func);
9717 BNX2X_ERR("!!! VN %d in single function mode,"
9718 " aborting\n", BP_E1HVN(bp));
9724 if (!BP_NOMCP(bp)) {
9725 bnx2x_get_port_hwinfo(bp);
9727 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9728 DRV_MSG_SEQ_NUMBER_MASK);
9729 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9733 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9734 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9735 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9736 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9737 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9738 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9739 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9740 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9741 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9742 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9743 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9745 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9753 /* only supposed to happen on emulation/FPGA */
9754 BNX2X_ERR("warning random MAC workaround active\n");
9755 random_ether_addr(bp->dev->dev_addr);
9756 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9762 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9764 int cnt, i, block_end, rodi;
9765 char vpd_data[BNX2X_VPD_LEN+1];
9766 char str_id_reg[VENDOR_ID_LEN+1];
9767 char str_id_cap[VENDOR_ID_LEN+1];
9770 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9771 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9773 if (cnt < BNX2X_VPD_LEN)
9776 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9777 PCI_VPD_LRDT_RO_DATA);
9782 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9783 pci_vpd_lrdt_size(&vpd_data[i]);
9785 i += PCI_VPD_LRDT_TAG_SIZE;
9787 if (block_end > BNX2X_VPD_LEN)
9790 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9791 PCI_VPD_RO_KEYWORD_MFR_ID);
9795 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9797 if (len != VENDOR_ID_LEN)
9800 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9802 /* vendor specific info */
9803 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9804 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9805 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9806 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9808 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9809 PCI_VPD_RO_KEYWORD_VENDOR0);
9811 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9813 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9815 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9816 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9817 bp->fw_ver[len] = ' ';
9826 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9828 int func = BP_FUNC(bp);
9832 /* Disable interrupt handling until HW is initialized */
9833 atomic_set(&bp->intr_sem, 1);
9834 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9836 mutex_init(&bp->port.phy_mutex);
9837 mutex_init(&bp->fw_mb_mutex);
9839 mutex_init(&bp->cnic_mutex);
9842 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9843 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9845 rc = bnx2x_get_hwinfo(bp);
9847 bnx2x_read_fwinfo(bp);
9848 /* need to reset chip if undi was active */
9850 bnx2x_undi_unload(bp);
9852 if (CHIP_REV_IS_FPGA(bp))
9853 pr_err("FPGA detected\n");
9855 if (BP_NOMCP(bp) && (func == 0))
9856 pr_err("MCP disabled, must load devices in order!\n");
9858 /* Set multi queue mode */
9859 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9860 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9861 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
9862 multi_mode = ETH_RSS_MODE_DISABLED;
9864 bp->multi_mode = multi_mode;
9867 bp->dev->features |= NETIF_F_GRO;
9871 bp->flags &= ~TPA_ENABLE_FLAG;
9872 bp->dev->features &= ~NETIF_F_LRO;
9874 bp->flags |= TPA_ENABLE_FLAG;
9875 bp->dev->features |= NETIF_F_LRO;
9879 bp->dropless_fc = 0;
9881 bp->dropless_fc = dropless_fc;
9885 bp->tx_ring_size = MAX_TX_AVAIL;
9886 bp->rx_ring_size = MAX_RX_AVAIL;
9890 /* make sure that the numbers are in the right granularity */
9891 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9892 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9894 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9895 bp->current_interval = (poll ? poll : timer_interval);
9897 init_timer(&bp->timer);
9898 bp->timer.expires = jiffies + bp->current_interval;
9899 bp->timer.data = (unsigned long) bp;
9900 bp->timer.function = bnx2x_timer;
9906 * ethtool service functions
9909 /* All ethtool functions called with rtnl_lock */
9911 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9913 struct bnx2x *bp = netdev_priv(dev);
9915 cmd->supported = bp->port.supported;
9916 cmd->advertising = bp->port.advertising;
9918 if ((bp->state == BNX2X_STATE_OPEN) &&
9919 !(bp->flags & MF_FUNC_DIS) &&
9920 (bp->link_vars.link_up)) {
9921 cmd->speed = bp->link_vars.line_speed;
9922 cmd->duplex = bp->link_vars.duplex;
9927 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9928 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9929 if (vn_max_rate < cmd->speed)
9930 cmd->speed = vn_max_rate;
9937 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9939 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9941 switch (ext_phy_type) {
9942 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9943 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9945 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9947 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9948 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9949 cmd->port = PORT_FIBRE;
9952 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9953 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9954 cmd->port = PORT_TP;
9957 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9958 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9959 bp->link_params.ext_phy_config);
9963 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9964 bp->link_params.ext_phy_config);
9968 cmd->port = PORT_TP;
9970 cmd->phy_address = bp->mdio.prtad;
9971 cmd->transceiver = XCVR_INTERNAL;
9973 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9974 cmd->autoneg = AUTONEG_ENABLE;
9976 cmd->autoneg = AUTONEG_DISABLE;
9981 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9982 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9983 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9984 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9985 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9986 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9987 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9992 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9994 struct bnx2x *bp = netdev_priv(dev);
10000 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10001 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10002 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10003 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10004 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10005 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10006 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10008 if (cmd->autoneg == AUTONEG_ENABLE) {
10009 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10010 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10014 /* advertise the requested speed and duplex if supported */
10015 cmd->advertising &= bp->port.supported;
10017 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10018 bp->link_params.req_duplex = DUPLEX_FULL;
10019 bp->port.advertising |= (ADVERTISED_Autoneg |
10022 } else { /* forced speed */
10023 /* advertise the requested speed and duplex if supported */
10024 switch (cmd->speed) {
10026 if (cmd->duplex == DUPLEX_FULL) {
10027 if (!(bp->port.supported &
10028 SUPPORTED_10baseT_Full)) {
10030 "10M full not supported\n");
10034 advertising = (ADVERTISED_10baseT_Full |
10037 if (!(bp->port.supported &
10038 SUPPORTED_10baseT_Half)) {
10040 "10M half not supported\n");
10044 advertising = (ADVERTISED_10baseT_Half |
10050 if (cmd->duplex == DUPLEX_FULL) {
10051 if (!(bp->port.supported &
10052 SUPPORTED_100baseT_Full)) {
10054 "100M full not supported\n");
10058 advertising = (ADVERTISED_100baseT_Full |
10061 if (!(bp->port.supported &
10062 SUPPORTED_100baseT_Half)) {
10064 "100M half not supported\n");
10068 advertising = (ADVERTISED_100baseT_Half |
10074 if (cmd->duplex != DUPLEX_FULL) {
10075 DP(NETIF_MSG_LINK, "1G half not supported\n");
10079 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10080 DP(NETIF_MSG_LINK, "1G full not supported\n");
10084 advertising = (ADVERTISED_1000baseT_Full |
10089 if (cmd->duplex != DUPLEX_FULL) {
10091 "2.5G half not supported\n");
10095 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10097 "2.5G full not supported\n");
10101 advertising = (ADVERTISED_2500baseX_Full |
10106 if (cmd->duplex != DUPLEX_FULL) {
10107 DP(NETIF_MSG_LINK, "10G half not supported\n");
10111 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10112 DP(NETIF_MSG_LINK, "10G full not supported\n");
10116 advertising = (ADVERTISED_10000baseT_Full |
10121 DP(NETIF_MSG_LINK, "Unsupported speed\n");
10125 bp->link_params.req_line_speed = cmd->speed;
10126 bp->link_params.req_duplex = cmd->duplex;
10127 bp->port.advertising = advertising;
10130 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10131 DP_LEVEL " req_duplex %d advertising 0x%x\n",
10132 bp->link_params.req_line_speed, bp->link_params.req_duplex,
10133 bp->port.advertising);
10135 if (netif_running(dev)) {
10136 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10137 bnx2x_link_set(bp);
10143 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10144 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10146 static int bnx2x_get_regs_len(struct net_device *dev)
10148 struct bnx2x *bp = netdev_priv(dev);
10149 int regdump_len = 0;
10152 if (CHIP_IS_E1(bp)) {
10153 for (i = 0; i < REGS_COUNT; i++)
10154 if (IS_E1_ONLINE(reg_addrs[i].info))
10155 regdump_len += reg_addrs[i].size;
10157 for (i = 0; i < WREGS_COUNT_E1; i++)
10158 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10159 regdump_len += wreg_addrs_e1[i].size *
10160 (1 + wreg_addrs_e1[i].read_regs_count);
10163 for (i = 0; i < REGS_COUNT; i++)
10164 if (IS_E1H_ONLINE(reg_addrs[i].info))
10165 regdump_len += reg_addrs[i].size;
10167 for (i = 0; i < WREGS_COUNT_E1H; i++)
10168 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10169 regdump_len += wreg_addrs_e1h[i].size *
10170 (1 + wreg_addrs_e1h[i].read_regs_count);
10173 regdump_len += sizeof(struct dump_hdr);
10175 return regdump_len;
10178 static void bnx2x_get_regs(struct net_device *dev,
10179 struct ethtool_regs *regs, void *_p)
10182 struct bnx2x *bp = netdev_priv(dev);
10183 struct dump_hdr dump_hdr = {0};
10186 memset(p, 0, regs->len);
10188 if (!netif_running(bp->dev))
10191 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10192 dump_hdr.dump_sign = dump_sign_all;
10193 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10194 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10195 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10196 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10197 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10199 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10200 p += dump_hdr.hdr_size + 1;
10202 if (CHIP_IS_E1(bp)) {
10203 for (i = 0; i < REGS_COUNT; i++)
10204 if (IS_E1_ONLINE(reg_addrs[i].info))
10205 for (j = 0; j < reg_addrs[i].size; j++)
10207 reg_addrs[i].addr + j*4);
10210 for (i = 0; i < REGS_COUNT; i++)
10211 if (IS_E1H_ONLINE(reg_addrs[i].info))
10212 for (j = 0; j < reg_addrs[i].size; j++)
10214 reg_addrs[i].addr + j*4);
10218 #define PHY_FW_VER_LEN 10
10220 static void bnx2x_get_drvinfo(struct net_device *dev,
10221 struct ethtool_drvinfo *info)
10223 struct bnx2x *bp = netdev_priv(dev);
10224 u8 phy_fw_ver[PHY_FW_VER_LEN];
10226 strcpy(info->driver, DRV_MODULE_NAME);
10227 strcpy(info->version, DRV_MODULE_VERSION);
10229 phy_fw_ver[0] = '\0';
10230 if (bp->port.pmf) {
10231 bnx2x_acquire_phy_lock(bp);
10232 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10233 (bp->state != BNX2X_STATE_CLOSED),
10234 phy_fw_ver, PHY_FW_VER_LEN);
10235 bnx2x_release_phy_lock(bp);
10238 strncpy(info->fw_version, bp->fw_ver, 32);
10239 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10241 (bp->common.bc_ver & 0xff0000) >> 16,
10242 (bp->common.bc_ver & 0xff00) >> 8,
10243 (bp->common.bc_ver & 0xff),
10244 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10245 strcpy(info->bus_info, pci_name(bp->pdev));
10246 info->n_stats = BNX2X_NUM_STATS;
10247 info->testinfo_len = BNX2X_NUM_TESTS;
10248 info->eedump_len = bp->common.flash_size;
10249 info->regdump_len = bnx2x_get_regs_len(dev);
10252 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10254 struct bnx2x *bp = netdev_priv(dev);
10256 if (bp->flags & NO_WOL_FLAG) {
10257 wol->supported = 0;
10260 wol->supported = WAKE_MAGIC;
10262 wol->wolopts = WAKE_MAGIC;
10266 memset(&wol->sopass, 0, sizeof(wol->sopass));
10269 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10271 struct bnx2x *bp = netdev_priv(dev);
10273 if (wol->wolopts & ~WAKE_MAGIC)
10276 if (wol->wolopts & WAKE_MAGIC) {
10277 if (bp->flags & NO_WOL_FLAG)
10287 static u32 bnx2x_get_msglevel(struct net_device *dev)
10289 struct bnx2x *bp = netdev_priv(dev);
10291 return bp->msg_enable;
10294 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10296 struct bnx2x *bp = netdev_priv(dev);
10298 if (capable(CAP_NET_ADMIN))
10299 bp->msg_enable = level;
10302 static int bnx2x_nway_reset(struct net_device *dev)
10304 struct bnx2x *bp = netdev_priv(dev);
10309 if (netif_running(dev)) {
10310 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10311 bnx2x_link_set(bp);
10317 static u32 bnx2x_get_link(struct net_device *dev)
10319 struct bnx2x *bp = netdev_priv(dev);
10321 if (bp->flags & MF_FUNC_DIS)
10324 return bp->link_vars.link_up;
10327 static int bnx2x_get_eeprom_len(struct net_device *dev)
10329 struct bnx2x *bp = netdev_priv(dev);
10331 return bp->common.flash_size;
10334 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10336 int port = BP_PORT(bp);
10340 /* adjust timeout for emulation/FPGA */
10341 count = NVRAM_TIMEOUT_COUNT;
10342 if (CHIP_REV_IS_SLOW(bp))
10345 /* request access to nvram interface */
10346 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10347 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10349 for (i = 0; i < count*10; i++) {
10350 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10351 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10357 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10358 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10365 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10367 int port = BP_PORT(bp);
10371 /* adjust timeout for emulation/FPGA */
10372 count = NVRAM_TIMEOUT_COUNT;
10373 if (CHIP_REV_IS_SLOW(bp))
10376 /* relinquish nvram interface */
10377 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10378 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10380 for (i = 0; i < count*10; i++) {
10381 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10382 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10388 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10389 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10396 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10400 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10402 /* enable both bits, even on read */
10403 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10404 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10405 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10408 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10412 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10414 /* disable both bits, even after read */
10415 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10416 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10417 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10420 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10426 /* build the command word */
10427 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10429 /* need to clear DONE bit separately */
10430 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10432 /* address of the NVRAM to read from */
10433 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10434 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10436 /* issue a read command */
10437 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10439 /* adjust timeout for emulation/FPGA */
10440 count = NVRAM_TIMEOUT_COUNT;
10441 if (CHIP_REV_IS_SLOW(bp))
10444 /* wait for completion */
10447 for (i = 0; i < count; i++) {
10449 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10451 if (val & MCPR_NVM_COMMAND_DONE) {
10452 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10453 /* we read nvram data in cpu order
10454 * but ethtool sees it as an array of bytes
10455 * converting to big-endian will do the work */
10456 *ret_val = cpu_to_be32(val);
10465 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10472 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10474 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10479 if (offset + buf_size > bp->common.flash_size) {
10480 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10481 " buf_size (0x%x) > flash_size (0x%x)\n",
10482 offset, buf_size, bp->common.flash_size);
10486 /* request access to nvram interface */
10487 rc = bnx2x_acquire_nvram_lock(bp);
10491 /* enable access to nvram interface */
10492 bnx2x_enable_nvram_access(bp);
10494 /* read the first word(s) */
10495 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10496 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10497 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10498 memcpy(ret_buf, &val, 4);
10500 /* advance to the next dword */
10501 offset += sizeof(u32);
10502 ret_buf += sizeof(u32);
10503 buf_size -= sizeof(u32);
10508 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10509 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10510 memcpy(ret_buf, &val, 4);
10513 /* disable access to nvram interface */
10514 bnx2x_disable_nvram_access(bp);
10515 bnx2x_release_nvram_lock(bp);
10520 static int bnx2x_get_eeprom(struct net_device *dev,
10521 struct ethtool_eeprom *eeprom, u8 *eebuf)
10523 struct bnx2x *bp = netdev_priv(dev);
10526 if (!netif_running(dev))
10529 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10530 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10531 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10532 eeprom->len, eeprom->len);
10534 /* parameters already validated in ethtool_get_eeprom */
10536 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10541 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10546 /* build the command word */
10547 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10549 /* need to clear DONE bit separately */
10550 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10552 /* write the data */
10553 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10555 /* address of the NVRAM to write to */
10556 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10557 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10559 /* issue the write command */
10560 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10562 /* adjust timeout for emulation/FPGA */
10563 count = NVRAM_TIMEOUT_COUNT;
10564 if (CHIP_REV_IS_SLOW(bp))
10567 /* wait for completion */
10569 for (i = 0; i < count; i++) {
10571 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10572 if (val & MCPR_NVM_COMMAND_DONE) {
10581 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
10583 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10591 if (offset + buf_size > bp->common.flash_size) {
10592 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10593 " buf_size (0x%x) > flash_size (0x%x)\n",
10594 offset, buf_size, bp->common.flash_size);
10598 /* request access to nvram interface */
10599 rc = bnx2x_acquire_nvram_lock(bp);
10603 /* enable access to nvram interface */
10604 bnx2x_enable_nvram_access(bp);
10606 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10607 align_offset = (offset & ~0x03);
10608 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10611 val &= ~(0xff << BYTE_OFFSET(offset));
10612 val |= (*data_buf << BYTE_OFFSET(offset));
10614 /* nvram data is returned as an array of bytes
10615 * convert it back to cpu order */
10616 val = be32_to_cpu(val);
10618 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10622 /* disable access to nvram interface */
10623 bnx2x_disable_nvram_access(bp);
10624 bnx2x_release_nvram_lock(bp);
10629 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10635 u32 written_so_far;
10637 if (buf_size == 1) /* ethtool */
10638 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10640 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10642 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10647 if (offset + buf_size > bp->common.flash_size) {
10648 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10649 " buf_size (0x%x) > flash_size (0x%x)\n",
10650 offset, buf_size, bp->common.flash_size);
10654 /* request access to nvram interface */
10655 rc = bnx2x_acquire_nvram_lock(bp);
10659 /* enable access to nvram interface */
10660 bnx2x_enable_nvram_access(bp);
10662 written_so_far = 0;
10663 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10664 while ((written_so_far < buf_size) && (rc == 0)) {
10665 if (written_so_far == (buf_size - sizeof(u32)))
10666 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10667 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10668 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10669 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10670 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10672 memcpy(&val, data_buf, 4);
10674 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10676 /* advance to the next dword */
10677 offset += sizeof(u32);
10678 data_buf += sizeof(u32);
10679 written_so_far += sizeof(u32);
10683 /* disable access to nvram interface */
10684 bnx2x_disable_nvram_access(bp);
10685 bnx2x_release_nvram_lock(bp);
10690 static int bnx2x_set_eeprom(struct net_device *dev,
10691 struct ethtool_eeprom *eeprom, u8 *eebuf)
10693 struct bnx2x *bp = netdev_priv(dev);
10694 int port = BP_PORT(bp);
10697 if (!netif_running(dev))
10700 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10701 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10702 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10703 eeprom->len, eeprom->len);
10705 /* parameters already validated in ethtool_set_eeprom */
10707 /* PHY eeprom can be accessed only by the PMF */
10708 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10712 if (eeprom->magic == 0x50485950) {
10713 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10714 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10716 bnx2x_acquire_phy_lock(bp);
10717 rc |= bnx2x_link_reset(&bp->link_params,
10718 &bp->link_vars, 0);
10719 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10720 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10721 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10722 MISC_REGISTERS_GPIO_HIGH, port);
10723 bnx2x_release_phy_lock(bp);
10724 bnx2x_link_report(bp);
10726 } else if (eeprom->magic == 0x50485952) {
10727 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10728 if (bp->state == BNX2X_STATE_OPEN) {
10729 bnx2x_acquire_phy_lock(bp);
10730 rc |= bnx2x_link_reset(&bp->link_params,
10731 &bp->link_vars, 1);
10733 rc |= bnx2x_phy_init(&bp->link_params,
10735 bnx2x_release_phy_lock(bp);
10736 bnx2x_calc_fc_adv(bp);
10738 } else if (eeprom->magic == 0x53985943) {
10739 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10740 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10741 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10743 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10745 /* DSP Remove Download Mode */
10746 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10747 MISC_REGISTERS_GPIO_LOW, port);
10749 bnx2x_acquire_phy_lock(bp);
10751 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10753 /* wait 0.5 sec to allow it to run */
10755 bnx2x_ext_phy_hw_reset(bp, port);
10757 bnx2x_release_phy_lock(bp);
10760 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10765 static int bnx2x_get_coalesce(struct net_device *dev,
10766 struct ethtool_coalesce *coal)
10768 struct bnx2x *bp = netdev_priv(dev);
10770 memset(coal, 0, sizeof(struct ethtool_coalesce));
10772 coal->rx_coalesce_usecs = bp->rx_ticks;
10773 coal->tx_coalesce_usecs = bp->tx_ticks;
10778 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
10779 static int bnx2x_set_coalesce(struct net_device *dev,
10780 struct ethtool_coalesce *coal)
10782 struct bnx2x *bp = netdev_priv(dev);
10784 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
10785 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
10786 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
10788 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
10789 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
10790 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
10792 if (netif_running(dev))
10793 bnx2x_update_coalesce(bp);
10798 static void bnx2x_get_ringparam(struct net_device *dev,
10799 struct ethtool_ringparam *ering)
10801 struct bnx2x *bp = netdev_priv(dev);
10803 ering->rx_max_pending = MAX_RX_AVAIL;
10804 ering->rx_mini_max_pending = 0;
10805 ering->rx_jumbo_max_pending = 0;
10807 ering->rx_pending = bp->rx_ring_size;
10808 ering->rx_mini_pending = 0;
10809 ering->rx_jumbo_pending = 0;
10811 ering->tx_max_pending = MAX_TX_AVAIL;
10812 ering->tx_pending = bp->tx_ring_size;
10815 static int bnx2x_set_ringparam(struct net_device *dev,
10816 struct ethtool_ringparam *ering)
10818 struct bnx2x *bp = netdev_priv(dev);
10821 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10822 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10826 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10827 (ering->tx_pending > MAX_TX_AVAIL) ||
10828 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10831 bp->rx_ring_size = ering->rx_pending;
10832 bp->tx_ring_size = ering->tx_pending;
10834 if (netif_running(dev)) {
10835 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10836 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10842 static void bnx2x_get_pauseparam(struct net_device *dev,
10843 struct ethtool_pauseparam *epause)
10845 struct bnx2x *bp = netdev_priv(dev);
10847 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10848 BNX2X_FLOW_CTRL_AUTO) &&
10849 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10851 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10852 BNX2X_FLOW_CTRL_RX);
10853 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10854 BNX2X_FLOW_CTRL_TX);
10856 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10857 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10858 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10861 static int bnx2x_set_pauseparam(struct net_device *dev,
10862 struct ethtool_pauseparam *epause)
10864 struct bnx2x *bp = netdev_priv(dev);
10869 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10870 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10871 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10873 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10875 if (epause->rx_pause)
10876 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10878 if (epause->tx_pause)
10879 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10881 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10882 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10884 if (epause->autoneg) {
10885 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10886 DP(NETIF_MSG_LINK, "autoneg not supported\n");
10890 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10891 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10895 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10897 if (netif_running(dev)) {
10898 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10899 bnx2x_link_set(bp);
10905 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10907 struct bnx2x *bp = netdev_priv(dev);
10911 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10912 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10916 /* TPA requires Rx CSUM offloading */
10917 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10918 if (!disable_tpa) {
10919 if (!(dev->features & NETIF_F_LRO)) {
10920 dev->features |= NETIF_F_LRO;
10921 bp->flags |= TPA_ENABLE_FLAG;
10926 } else if (dev->features & NETIF_F_LRO) {
10927 dev->features &= ~NETIF_F_LRO;
10928 bp->flags &= ~TPA_ENABLE_FLAG;
10932 if (changed && netif_running(dev)) {
10933 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10934 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10940 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10942 struct bnx2x *bp = netdev_priv(dev);
10944 return bp->rx_csum;
10947 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10949 struct bnx2x *bp = netdev_priv(dev);
10952 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10953 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10957 bp->rx_csum = data;
10959 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10960 TPA'ed packets will be discarded due to wrong TCP CSUM */
10962 u32 flags = ethtool_op_get_flags(dev);
10964 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10970 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10973 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10974 dev->features |= NETIF_F_TSO6;
10976 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10977 dev->features &= ~NETIF_F_TSO6;
10983 static const struct {
10984 char string[ETH_GSTRING_LEN];
10985 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10986 { "register_test (offline)" },
10987 { "memory_test (offline)" },
10988 { "loopback_test (offline)" },
10989 { "nvram_test (online)" },
10990 { "interrupt_test (online)" },
10991 { "link_test (online)" },
10992 { "idle check (online)" }
10995 static int bnx2x_test_registers(struct bnx2x *bp)
10997 int idx, i, rc = -ENODEV;
10999 int port = BP_PORT(bp);
11000 static const struct {
11005 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11006 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11007 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11008 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11009 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11010 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11011 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11012 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11013 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11014 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11015 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11016 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11017 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11018 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11019 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11020 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11021 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11022 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
11023 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
11024 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11025 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
11026 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11027 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11028 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11029 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11030 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11031 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11032 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11033 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
11034 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11035 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
11036 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11037 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11038 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11039 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11040 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11041 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11043 { 0xffffffff, 0, 0x00000000 }
11046 if (!netif_running(bp->dev))
11049 /* Repeat the test twice:
11050 First by writing 0x00000000, second by writing 0xffffffff */
11051 for (idx = 0; idx < 2; idx++) {
11058 wr_val = 0xffffffff;
11062 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11063 u32 offset, mask, save_val, val;
11065 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11066 mask = reg_tbl[i].mask;
11068 save_val = REG_RD(bp, offset);
11070 REG_WR(bp, offset, wr_val);
11071 val = REG_RD(bp, offset);
11073 /* Restore the original register's value */
11074 REG_WR(bp, offset, save_val);
11076 /* verify that value is as expected value */
11077 if ((val & mask) != (wr_val & mask))
11078 goto test_reg_exit;
11088 static int bnx2x_test_memory(struct bnx2x *bp)
11090 int i, j, rc = -ENODEV;
11092 static const struct {
11096 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11097 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11098 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11099 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11100 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11101 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11102 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11106 static const struct {
11112 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11113 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11114 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11115 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11116 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11117 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11119 { NULL, 0xffffffff, 0, 0 }
11122 if (!netif_running(bp->dev))
11125 /* Go through all the memories */
11126 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11127 for (j = 0; j < mem_tbl[i].size; j++)
11128 REG_RD(bp, mem_tbl[i].offset + j*4);
11130 /* Check the parity status */
11131 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11132 val = REG_RD(bp, prty_tbl[i].offset);
11133 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11134 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11136 "%s is 0x%x\n", prty_tbl[i].name, val);
11137 goto test_mem_exit;
11147 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11152 while (bnx2x_link_test(bp) && cnt--)
11156 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11158 unsigned int pkt_size, num_pkts, i;
11159 struct sk_buff *skb;
11160 unsigned char *packet;
11161 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11162 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11163 u16 tx_start_idx, tx_idx;
11164 u16 rx_start_idx, rx_idx;
11165 u16 pkt_prod, bd_prod;
11166 struct sw_tx_bd *tx_buf;
11167 struct eth_tx_start_bd *tx_start_bd;
11168 struct eth_tx_parse_bd *pbd = NULL;
11169 dma_addr_t mapping;
11170 union eth_rx_cqe *cqe;
11172 struct sw_rx_bd *rx_buf;
11176 /* check the loopback mode */
11177 switch (loopback_mode) {
11178 case BNX2X_PHY_LOOPBACK:
11179 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11182 case BNX2X_MAC_LOOPBACK:
11183 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11184 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11190 /* prepare the loopback packet */
11191 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11192 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11193 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11196 goto test_loopback_exit;
11198 packet = skb_put(skb, pkt_size);
11199 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11200 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11201 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11202 for (i = ETH_HLEN; i < pkt_size; i++)
11203 packet[i] = (unsigned char) (i & 0xff);
11205 /* send the loopback packet */
11207 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11208 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11210 pkt_prod = fp_tx->tx_pkt_prod++;
11211 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11212 tx_buf->first_bd = fp_tx->tx_bd_prod;
11216 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11217 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11218 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11219 skb_headlen(skb), DMA_TO_DEVICE);
11220 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11221 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11222 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11223 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11224 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11225 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11226 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11227 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11229 /* turn on parsing and get a BD */
11230 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11231 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11233 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11237 fp_tx->tx_db.data.prod += 2;
11239 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11244 fp_tx->tx_bd_prod += 2; /* start + pbd */
11248 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11249 if (tx_idx != tx_start_idx + num_pkts)
11250 goto test_loopback_exit;
11252 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11253 if (rx_idx != rx_start_idx + num_pkts)
11254 goto test_loopback_exit;
11256 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11257 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11258 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11259 goto test_loopback_rx_exit;
11261 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11262 if (len != pkt_size)
11263 goto test_loopback_rx_exit;
11265 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11267 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11268 for (i = ETH_HLEN; i < pkt_size; i++)
11269 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11270 goto test_loopback_rx_exit;
11274 test_loopback_rx_exit:
11276 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11277 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11278 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11279 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11281 /* Update producers */
11282 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11283 fp_rx->rx_sge_prod);
11285 test_loopback_exit:
11286 bp->link_params.loopback_mode = LOOPBACK_NONE;
11291 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11295 if (!netif_running(bp->dev))
11296 return BNX2X_LOOPBACK_FAILED;
11298 bnx2x_netif_stop(bp, 1);
11299 bnx2x_acquire_phy_lock(bp);
11301 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11303 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11304 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11307 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11309 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11310 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11313 bnx2x_release_phy_lock(bp);
11314 bnx2x_netif_start(bp);
11319 #define CRC32_RESIDUAL 0xdebb20e3
11321 static int bnx2x_test_nvram(struct bnx2x *bp)
11323 static const struct {
11327 { 0, 0x14 }, /* bootstrap */
11328 { 0x14, 0xec }, /* dir */
11329 { 0x100, 0x350 }, /* manuf_info */
11330 { 0x450, 0xf0 }, /* feature_info */
11331 { 0x640, 0x64 }, /* upgrade_key_info */
11333 { 0x708, 0x70 }, /* manuf_key_info */
11337 __be32 buf[0x350 / 4];
11338 u8 *data = (u8 *)buf;
11342 rc = bnx2x_nvram_read(bp, 0, data, 4);
11344 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11345 goto test_nvram_exit;
11348 magic = be32_to_cpu(buf[0]);
11349 if (magic != 0x669955aa) {
11350 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11352 goto test_nvram_exit;
11355 for (i = 0; nvram_tbl[i].size; i++) {
11357 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11358 nvram_tbl[i].size);
11360 DP(NETIF_MSG_PROBE,
11361 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11362 goto test_nvram_exit;
11365 crc = ether_crc_le(nvram_tbl[i].size, data);
11366 if (crc != CRC32_RESIDUAL) {
11367 DP(NETIF_MSG_PROBE,
11368 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11370 goto test_nvram_exit;
11378 static int bnx2x_test_intr(struct bnx2x *bp)
11380 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11383 if (!netif_running(bp->dev))
11386 config->hdr.length = 0;
11387 if (CHIP_IS_E1(bp))
11388 /* use last unicast entries */
11389 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11391 config->hdr.offset = BP_FUNC(bp);
11392 config->hdr.client_id = bp->fp->cl_id;
11393 config->hdr.reserved1 = 0;
11395 bp->set_mac_pending++;
11397 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11398 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11399 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11401 for (i = 0; i < 10; i++) {
11402 if (!bp->set_mac_pending)
11405 msleep_interruptible(10);
11414 static void bnx2x_self_test(struct net_device *dev,
11415 struct ethtool_test *etest, u64 *buf)
11417 struct bnx2x *bp = netdev_priv(dev);
11419 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11420 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11421 etest->flags |= ETH_TEST_FL_FAILED;
11425 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11427 if (!netif_running(dev))
11430 /* offline tests are not supported in MF mode */
11432 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11434 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11435 int port = BP_PORT(bp);
11439 /* save current value of input enable for TX port IF */
11440 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11441 /* disable input for TX port IF */
11442 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11444 link_up = (bnx2x_link_test(bp) == 0);
11445 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11446 bnx2x_nic_load(bp, LOAD_DIAG);
11447 /* wait until link state is restored */
11448 bnx2x_wait_for_link(bp, link_up);
11450 if (bnx2x_test_registers(bp) != 0) {
11452 etest->flags |= ETH_TEST_FL_FAILED;
11454 if (bnx2x_test_memory(bp) != 0) {
11456 etest->flags |= ETH_TEST_FL_FAILED;
11458 buf[2] = bnx2x_test_loopback(bp, link_up);
11460 etest->flags |= ETH_TEST_FL_FAILED;
11462 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11464 /* restore input for TX port IF */
11465 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11467 bnx2x_nic_load(bp, LOAD_NORMAL);
11468 /* wait until link state is restored */
11469 bnx2x_wait_for_link(bp, link_up);
11471 if (bnx2x_test_nvram(bp) != 0) {
11473 etest->flags |= ETH_TEST_FL_FAILED;
11475 if (bnx2x_test_intr(bp) != 0) {
11477 etest->flags |= ETH_TEST_FL_FAILED;
11480 if (bnx2x_link_test(bp) != 0) {
11482 etest->flags |= ETH_TEST_FL_FAILED;
11485 #ifdef BNX2X_EXTRA_DEBUG
11486 bnx2x_panic_dump(bp);
11490 static const struct {
11493 u8 string[ETH_GSTRING_LEN];
11494 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11495 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11496 { Q_STATS_OFFSET32(error_bytes_received_hi),
11497 8, "[%d]: rx_error_bytes" },
11498 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11499 8, "[%d]: rx_ucast_packets" },
11500 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11501 8, "[%d]: rx_mcast_packets" },
11502 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11503 8, "[%d]: rx_bcast_packets" },
11504 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11505 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11506 4, "[%d]: rx_phy_ip_err_discards"},
11507 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11508 4, "[%d]: rx_skb_alloc_discard" },
11509 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11511 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11512 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11513 8, "[%d]: tx_packets" }
11516 static const struct {
11520 #define STATS_FLAGS_PORT 1
11521 #define STATS_FLAGS_FUNC 2
11522 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11523 u8 string[ETH_GSTRING_LEN];
11524 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11525 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11526 8, STATS_FLAGS_BOTH, "rx_bytes" },
11527 { STATS_OFFSET32(error_bytes_received_hi),
11528 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11529 { STATS_OFFSET32(total_unicast_packets_received_hi),
11530 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11531 { STATS_OFFSET32(total_multicast_packets_received_hi),
11532 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11533 { STATS_OFFSET32(total_broadcast_packets_received_hi),
11534 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11535 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11536 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11537 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11538 8, STATS_FLAGS_PORT, "rx_align_errors" },
11539 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11540 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11541 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11542 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11543 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11544 8, STATS_FLAGS_PORT, "rx_fragments" },
11545 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11546 8, STATS_FLAGS_PORT, "rx_jabbers" },
11547 { STATS_OFFSET32(no_buff_discard_hi),
11548 8, STATS_FLAGS_BOTH, "rx_discards" },
11549 { STATS_OFFSET32(mac_filter_discard),
11550 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11551 { STATS_OFFSET32(xxoverflow_discard),
11552 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11553 { STATS_OFFSET32(brb_drop_hi),
11554 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11555 { STATS_OFFSET32(brb_truncate_hi),
11556 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11557 { STATS_OFFSET32(pause_frames_received_hi),
11558 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11559 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11560 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11561 { STATS_OFFSET32(nig_timer_max),
11562 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11563 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11564 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11565 { STATS_OFFSET32(rx_skb_alloc_failed),
11566 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11567 { STATS_OFFSET32(hw_csum_err),
11568 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11570 { STATS_OFFSET32(total_bytes_transmitted_hi),
11571 8, STATS_FLAGS_BOTH, "tx_bytes" },
11572 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11573 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11574 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11575 8, STATS_FLAGS_BOTH, "tx_packets" },
11576 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11577 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11578 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11579 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11580 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11581 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11582 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11583 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11584 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11585 8, STATS_FLAGS_PORT, "tx_deferred" },
11586 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11587 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11588 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11589 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11590 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11591 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11592 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11593 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11594 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11595 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11596 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11597 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11598 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11599 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11600 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11601 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11602 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11603 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11604 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
11605 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11606 { STATS_OFFSET32(pause_frames_sent_hi),
11607 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11610 #define IS_PORT_STAT(i) \
11611 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11612 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11613 #define IS_E1HMF_MODE_STAT(bp) \
11614 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11616 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11618 struct bnx2x *bp = netdev_priv(dev);
11621 switch(stringset) {
11623 if (is_multi(bp)) {
11624 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11625 if (!IS_E1HMF_MODE_STAT(bp))
11626 num_stats += BNX2X_NUM_STATS;
11628 if (IS_E1HMF_MODE_STAT(bp)) {
11630 for (i = 0; i < BNX2X_NUM_STATS; i++)
11631 if (IS_FUNC_STAT(i))
11634 num_stats = BNX2X_NUM_STATS;
11639 return BNX2X_NUM_TESTS;
11646 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11648 struct bnx2x *bp = netdev_priv(dev);
11651 switch (stringset) {
11653 if (is_multi(bp)) {
11655 for_each_queue(bp, i) {
11656 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11657 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11658 bnx2x_q_stats_arr[j].string, i);
11659 k += BNX2X_NUM_Q_STATS;
11661 if (IS_E1HMF_MODE_STAT(bp))
11663 for (j = 0; j < BNX2X_NUM_STATS; j++)
11664 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11665 bnx2x_stats_arr[j].string);
11667 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11668 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11670 strcpy(buf + j*ETH_GSTRING_LEN,
11671 bnx2x_stats_arr[i].string);
11678 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11683 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11684 struct ethtool_stats *stats, u64 *buf)
11686 struct bnx2x *bp = netdev_priv(dev);
11687 u32 *hw_stats, *offset;
11690 if (is_multi(bp)) {
11692 for_each_queue(bp, i) {
11693 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11694 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11695 if (bnx2x_q_stats_arr[j].size == 0) {
11696 /* skip this counter */
11700 offset = (hw_stats +
11701 bnx2x_q_stats_arr[j].offset);
11702 if (bnx2x_q_stats_arr[j].size == 4) {
11703 /* 4-byte counter */
11704 buf[k + j] = (u64) *offset;
11707 /* 8-byte counter */
11708 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11710 k += BNX2X_NUM_Q_STATS;
11712 if (IS_E1HMF_MODE_STAT(bp))
11714 hw_stats = (u32 *)&bp->eth_stats;
11715 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11716 if (bnx2x_stats_arr[j].size == 0) {
11717 /* skip this counter */
11721 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11722 if (bnx2x_stats_arr[j].size == 4) {
11723 /* 4-byte counter */
11724 buf[k + j] = (u64) *offset;
11727 /* 8-byte counter */
11728 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11731 hw_stats = (u32 *)&bp->eth_stats;
11732 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11733 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11735 if (bnx2x_stats_arr[i].size == 0) {
11736 /* skip this counter */
11741 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11742 if (bnx2x_stats_arr[i].size == 4) {
11743 /* 4-byte counter */
11744 buf[j] = (u64) *offset;
11748 /* 8-byte counter */
11749 buf[j] = HILO_U64(*offset, *(offset + 1));
11755 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11757 struct bnx2x *bp = netdev_priv(dev);
11760 if (!netif_running(dev))
11769 for (i = 0; i < (data * 2); i++) {
11771 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11774 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11776 msleep_interruptible(500);
11777 if (signal_pending(current))
11781 if (bp->link_vars.link_up)
11782 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11783 bp->link_vars.line_speed);
11788 static const struct ethtool_ops bnx2x_ethtool_ops = {
11789 .get_settings = bnx2x_get_settings,
11790 .set_settings = bnx2x_set_settings,
11791 .get_drvinfo = bnx2x_get_drvinfo,
11792 .get_regs_len = bnx2x_get_regs_len,
11793 .get_regs = bnx2x_get_regs,
11794 .get_wol = bnx2x_get_wol,
11795 .set_wol = bnx2x_set_wol,
11796 .get_msglevel = bnx2x_get_msglevel,
11797 .set_msglevel = bnx2x_set_msglevel,
11798 .nway_reset = bnx2x_nway_reset,
11799 .get_link = bnx2x_get_link,
11800 .get_eeprom_len = bnx2x_get_eeprom_len,
11801 .get_eeprom = bnx2x_get_eeprom,
11802 .set_eeprom = bnx2x_set_eeprom,
11803 .get_coalesce = bnx2x_get_coalesce,
11804 .set_coalesce = bnx2x_set_coalesce,
11805 .get_ringparam = bnx2x_get_ringparam,
11806 .set_ringparam = bnx2x_set_ringparam,
11807 .get_pauseparam = bnx2x_get_pauseparam,
11808 .set_pauseparam = bnx2x_set_pauseparam,
11809 .get_rx_csum = bnx2x_get_rx_csum,
11810 .set_rx_csum = bnx2x_set_rx_csum,
11811 .get_tx_csum = ethtool_op_get_tx_csum,
11812 .set_tx_csum = ethtool_op_set_tx_hw_csum,
11813 .set_flags = bnx2x_set_flags,
11814 .get_flags = ethtool_op_get_flags,
11815 .get_sg = ethtool_op_get_sg,
11816 .set_sg = ethtool_op_set_sg,
11817 .get_tso = ethtool_op_get_tso,
11818 .set_tso = bnx2x_set_tso,
11819 .self_test = bnx2x_self_test,
11820 .get_sset_count = bnx2x_get_sset_count,
11821 .get_strings = bnx2x_get_strings,
11822 .phys_id = bnx2x_phys_id,
11823 .get_ethtool_stats = bnx2x_get_ethtool_stats,
11826 /* end of ethtool_ops */
11828 /****************************************************************************
11829 * General service functions
11830 ****************************************************************************/
11832 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11836 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11840 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11841 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11842 PCI_PM_CTRL_PME_STATUS));
11844 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11845 /* delay required during transition out of D3hot */
11850 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11854 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11856 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11859 /* No more memory access after this point until
11860 * device is brought back to D0.
11870 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11874 /* Tell compiler that status block fields can change */
11876 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11877 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11879 return (fp->rx_comp_cons != rx_cons_sb);
11883 * net_device service functions
11886 static int bnx2x_poll(struct napi_struct *napi, int budget)
11889 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11891 struct bnx2x *bp = fp->bp;
11894 #ifdef BNX2X_STOP_ON_ERROR
11895 if (unlikely(bp->panic)) {
11896 napi_complete(napi);
11901 if (bnx2x_has_tx_work(fp))
11904 if (bnx2x_has_rx_work(fp)) {
11905 work_done += bnx2x_rx_int(fp, budget - work_done);
11907 /* must not complete if we consumed full budget */
11908 if (work_done >= budget)
11912 /* Fall out from the NAPI loop if needed */
11913 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11914 bnx2x_update_fpsb_idx(fp);
11915 /* bnx2x_has_rx_work() reads the status block, thus we need
11916 * to ensure that status block indices have been actually read
11917 * (bnx2x_update_fpsb_idx) prior to this check
11918 * (bnx2x_has_rx_work) so that we won't write the "newer"
11919 * value of the status block to IGU (if there was a DMA right
11920 * after bnx2x_has_rx_work and if there is no rmb, the memory
11921 * reading (bnx2x_update_fpsb_idx) may be postponed to right
11922 * before bnx2x_ack_sb). In this case there will never be
11923 * another interrupt until there is another update of the
11924 * status block, while there is still unhandled work.
11928 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11929 napi_complete(napi);
11930 /* Re-enable interrupts */
11931 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11932 le16_to_cpu(fp->fp_c_idx),
11934 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11935 le16_to_cpu(fp->fp_u_idx),
11936 IGU_INT_ENABLE, 1);
11946 /* we split the first BD into headers and data BDs
11947 * to ease the pain of our fellow microcode engineers
11948 * we use one mapping for both BDs
11949 * So far this has only been observed to happen
11950 * in Other Operating Systems(TM)
11952 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11953 struct bnx2x_fastpath *fp,
11954 struct sw_tx_bd *tx_buf,
11955 struct eth_tx_start_bd **tx_bd, u16 hlen,
11956 u16 bd_prod, int nbd)
11958 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11959 struct eth_tx_bd *d_tx_bd;
11960 dma_addr_t mapping;
11961 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11963 /* first fix first BD */
11964 h_tx_bd->nbd = cpu_to_le16(nbd);
11965 h_tx_bd->nbytes = cpu_to_le16(hlen);
11967 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11968 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11969 h_tx_bd->addr_lo, h_tx_bd->nbd);
11971 /* now get a new data BD
11972 * (after the pbd) and fill it */
11973 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11974 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11976 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11977 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11979 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11980 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11981 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11983 /* this marks the BD as one that has no individual mapping */
11984 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11986 DP(NETIF_MSG_TX_QUEUED,
11987 "TSO split data size is %d (%x:%x)\n",
11988 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11991 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11996 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11999 csum = (u16) ~csum_fold(csum_sub(csum,
12000 csum_partial(t_header - fix, fix, 0)));
12003 csum = (u16) ~csum_fold(csum_add(csum,
12004 csum_partial(t_header, -fix, 0)));
12006 return swab16(csum);
12009 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12013 if (skb->ip_summed != CHECKSUM_PARTIAL)
12017 if (skb->protocol == htons(ETH_P_IPV6)) {
12019 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12020 rc |= XMIT_CSUM_TCP;
12024 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12025 rc |= XMIT_CSUM_TCP;
12029 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12030 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12032 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12033 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12038 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12039 /* check if packet requires linearization (packet is too fragmented)
12040 no need to check fragmentation if page size > 8K (there will be no
12041 violation to FW restrictions) */
12042 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12047 int first_bd_sz = 0;
12049 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12050 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12052 if (xmit_type & XMIT_GSO) {
12053 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12054 /* Check if LSO packet needs to be copied:
12055 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12056 int wnd_size = MAX_FETCH_BD - 3;
12057 /* Number of windows to check */
12058 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12063 /* Headers length */
12064 hlen = (int)(skb_transport_header(skb) - skb->data) +
12067 /* Amount of data (w/o headers) on linear part of SKB*/
12068 first_bd_sz = skb_headlen(skb) - hlen;
12070 wnd_sum = first_bd_sz;
12072 /* Calculate the first sum - it's special */
12073 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12075 skb_shinfo(skb)->frags[frag_idx].size;
12077 /* If there was data on linear skb data - check it */
12078 if (first_bd_sz > 0) {
12079 if (unlikely(wnd_sum < lso_mss)) {
12084 wnd_sum -= first_bd_sz;
12087 /* Others are easier: run through the frag list and
12088 check all windows */
12089 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12091 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12093 if (unlikely(wnd_sum < lso_mss)) {
12098 skb_shinfo(skb)->frags[wnd_idx].size;
12101 /* in non-LSO too fragmented packet should always
12108 if (unlikely(to_copy))
12109 DP(NETIF_MSG_TX_QUEUED,
12110 "Linearization IS REQUIRED for %s packet. "
12111 "num_frags %d hlen %d first_bd_sz %d\n",
12112 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12113 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12119 /* called with netif_tx_lock
12120 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12121 * netif_wake_queue()
12123 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12125 struct bnx2x *bp = netdev_priv(dev);
12126 struct bnx2x_fastpath *fp;
12127 struct netdev_queue *txq;
12128 struct sw_tx_bd *tx_buf;
12129 struct eth_tx_start_bd *tx_start_bd;
12130 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12131 struct eth_tx_parse_bd *pbd = NULL;
12132 u16 pkt_prod, bd_prod;
12134 dma_addr_t mapping;
12135 u32 xmit_type = bnx2x_xmit_type(bp, skb);
12138 __le16 pkt_size = 0;
12140 #ifdef BNX2X_STOP_ON_ERROR
12141 if (unlikely(bp->panic))
12142 return NETDEV_TX_BUSY;
12145 fp_index = skb_get_queue_mapping(skb);
12146 txq = netdev_get_tx_queue(dev, fp_index);
12148 fp = &bp->fp[fp_index];
12150 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12151 fp->eth_q_stats.driver_xoff++;
12152 netif_tx_stop_queue(txq);
12153 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12154 return NETDEV_TX_BUSY;
12157 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12158 " gso type %x xmit_type %x\n",
12159 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12160 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12162 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12163 /* First, check if we need to linearize the skb (due to FW
12164 restrictions). No need to check fragmentation if page size > 8K
12165 (there will be no violation to FW restrictions) */
12166 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12167 /* Statistics of linearization */
12169 if (skb_linearize(skb) != 0) {
12170 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12171 "silently dropping this SKB\n");
12172 dev_kfree_skb_any(skb);
12173 return NETDEV_TX_OK;
12179 Please read carefully. First we use one BD which we mark as start,
12180 then we have a parsing info BD (used for TSO or xsum),
12181 and only then we have the rest of the TSO BDs.
12182 (don't forget to mark the last one as last,
12183 and to unmap only AFTER you write to the BD ...)
12184 And above all, all pdb sizes are in words - NOT DWORDS!
12187 pkt_prod = fp->tx_pkt_prod++;
12188 bd_prod = TX_BD(fp->tx_bd_prod);
12190 /* get a tx_buf and first BD */
12191 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12192 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12194 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12195 tx_start_bd->general_data = (UNICAST_ADDRESS <<
12196 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12198 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12200 /* remember the first BD of the packet */
12201 tx_buf->first_bd = fp->tx_bd_prod;
12205 DP(NETIF_MSG_TX_QUEUED,
12206 "sending pkt %u @%p next_idx %u bd %u @%p\n",
12207 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12210 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12211 (bp->flags & HW_VLAN_TX_FLAG)) {
12212 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12213 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12216 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12218 /* turn on parsing and get a BD */
12219 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12220 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12222 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12224 if (xmit_type & XMIT_CSUM) {
12225 hlen = (skb_network_header(skb) - skb->data) / 2;
12227 /* for now NS flag is not used in Linux */
12229 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12230 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12232 pbd->ip_hlen = (skb_transport_header(skb) -
12233 skb_network_header(skb)) / 2;
12235 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12237 pbd->total_hlen = cpu_to_le16(hlen);
12240 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12242 if (xmit_type & XMIT_CSUM_V4)
12243 tx_start_bd->bd_flags.as_bitfield |=
12244 ETH_TX_BD_FLAGS_IP_CSUM;
12246 tx_start_bd->bd_flags.as_bitfield |=
12247 ETH_TX_BD_FLAGS_IPV6;
12249 if (xmit_type & XMIT_CSUM_TCP) {
12250 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12253 s8 fix = SKB_CS_OFF(skb); /* signed! */
12255 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12257 DP(NETIF_MSG_TX_QUEUED,
12258 "hlen %d fix %d csum before fix %x\n",
12259 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12261 /* HW bug: fixup the CSUM */
12262 pbd->tcp_pseudo_csum =
12263 bnx2x_csum_fix(skb_transport_header(skb),
12266 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12267 pbd->tcp_pseudo_csum);
12271 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12272 skb_headlen(skb), DMA_TO_DEVICE);
12274 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12275 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12276 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12277 tx_start_bd->nbd = cpu_to_le16(nbd);
12278 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12279 pkt_size = tx_start_bd->nbytes;
12281 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
12282 " nbytes %d flags %x vlan %x\n",
12283 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12284 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12285 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12287 if (xmit_type & XMIT_GSO) {
12289 DP(NETIF_MSG_TX_QUEUED,
12290 "TSO packet len %d hlen %d total len %d tso size %d\n",
12291 skb->len, hlen, skb_headlen(skb),
12292 skb_shinfo(skb)->gso_size);
12294 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12296 if (unlikely(skb_headlen(skb) > hlen))
12297 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12298 hlen, bd_prod, ++nbd);
12300 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12301 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12302 pbd->tcp_flags = pbd_tcp_flags(skb);
12304 if (xmit_type & XMIT_GSO_V4) {
12305 pbd->ip_id = swab16(ip_hdr(skb)->id);
12306 pbd->tcp_pseudo_csum =
12307 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12308 ip_hdr(skb)->daddr,
12309 0, IPPROTO_TCP, 0));
12312 pbd->tcp_pseudo_csum =
12313 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12314 &ipv6_hdr(skb)->daddr,
12315 0, IPPROTO_TCP, 0));
12317 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12319 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12321 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12322 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12324 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12325 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12326 if (total_pkt_bd == NULL)
12327 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12329 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12331 frag->size, DMA_TO_DEVICE);
12333 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12334 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12335 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12336 le16_add_cpu(&pkt_size, frag->size);
12338 DP(NETIF_MSG_TX_QUEUED,
12339 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12340 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12341 le16_to_cpu(tx_data_bd->nbytes));
12344 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12346 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12348 /* now send a tx doorbell, counting the next BD
12349 * if the packet contains or ends with it
12351 if (TX_BD_POFF(bd_prod) < nbd)
12354 if (total_pkt_bd != NULL)
12355 total_pkt_bd->total_pkt_bytes = pkt_size;
12358 DP(NETIF_MSG_TX_QUEUED,
12359 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12360 " tcp_flags %x xsum %x seq %u hlen %u\n",
12361 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12362 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12363 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12365 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
12368 * Make sure that the BD data is updated before updating the producer
12369 * since FW might read the BD right after the producer is updated.
12370 * This is only applicable for weak-ordered memory model archs such
12371 * as IA-64. The following barrier is also mandatory since FW will
12372 * assumes packets must have BDs.
12376 fp->tx_db.data.prod += nbd;
12378 DOORBELL(bp, fp->index, fp->tx_db.raw);
12382 fp->tx_bd_prod += nbd;
12384 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12385 netif_tx_stop_queue(txq);
12387 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12388 * ordering of set_bit() in netif_tx_stop_queue() and read of
12389 * fp->bd_tx_cons */
12392 fp->eth_q_stats.driver_xoff++;
12393 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12394 netif_tx_wake_queue(txq);
12398 return NETDEV_TX_OK;
12401 /* called with rtnl_lock */
12402 static int bnx2x_open(struct net_device *dev)
12404 struct bnx2x *bp = netdev_priv(dev);
12406 netif_carrier_off(dev);
12408 bnx2x_set_power_state(bp, PCI_D0);
12410 if (!bnx2x_reset_is_done(bp)) {
12412 /* Reset MCP mail box sequence if there is on going
12417 /* If it's the first function to load and reset done
12418 * is still not cleared it may mean that. We don't
12419 * check the attention state here because it may have
12420 * already been cleared by a "common" reset but we
12421 * shell proceed with "process kill" anyway.
12423 if ((bnx2x_get_load_cnt(bp) == 0) &&
12424 bnx2x_trylock_hw_lock(bp,
12425 HW_LOCK_RESOURCE_RESERVED_08) &&
12426 (!bnx2x_leader_reset(bp))) {
12427 DP(NETIF_MSG_HW, "Recovered in open\n");
12431 bnx2x_set_power_state(bp, PCI_D3hot);
12433 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12434 " completed yet. Try again later. If u still see this"
12435 " message after a few retries then power cycle is"
12436 " required.\n", bp->dev->name);
12442 bp->recovery_state = BNX2X_RECOVERY_DONE;
12444 return bnx2x_nic_load(bp, LOAD_OPEN);
12447 /* called with rtnl_lock */
12448 static int bnx2x_close(struct net_device *dev)
12450 struct bnx2x *bp = netdev_priv(dev);
12452 /* Unload the driver, release IRQs */
12453 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12454 if (atomic_read(&bp->pdev->enable_cnt) == 1)
12455 if (!CHIP_REV_IS_SLOW(bp))
12456 bnx2x_set_power_state(bp, PCI_D3hot);
12461 /* called with netif_tx_lock from dev_mcast.c */
12462 static void bnx2x_set_rx_mode(struct net_device *dev)
12464 struct bnx2x *bp = netdev_priv(dev);
12465 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12466 int port = BP_PORT(bp);
12468 if (bp->state != BNX2X_STATE_OPEN) {
12469 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12473 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12475 if (dev->flags & IFF_PROMISC)
12476 rx_mode = BNX2X_RX_MODE_PROMISC;
12478 else if ((dev->flags & IFF_ALLMULTI) ||
12479 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12481 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12483 else { /* some multicasts */
12484 if (CHIP_IS_E1(bp)) {
12485 int i, old, offset;
12486 struct netdev_hw_addr *ha;
12487 struct mac_configuration_cmd *config =
12488 bnx2x_sp(bp, mcast_config);
12491 netdev_for_each_mc_addr(ha, dev) {
12492 config->config_table[i].
12493 cam_entry.msb_mac_addr =
12494 swab16(*(u16 *)&ha->addr[0]);
12495 config->config_table[i].
12496 cam_entry.middle_mac_addr =
12497 swab16(*(u16 *)&ha->addr[2]);
12498 config->config_table[i].
12499 cam_entry.lsb_mac_addr =
12500 swab16(*(u16 *)&ha->addr[4]);
12501 config->config_table[i].cam_entry.flags =
12503 config->config_table[i].
12504 target_table_entry.flags = 0;
12505 config->config_table[i].target_table_entry.
12506 clients_bit_vector =
12507 cpu_to_le32(1 << BP_L_ID(bp));
12508 config->config_table[i].
12509 target_table_entry.vlan_id = 0;
12512 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12513 config->config_table[i].
12514 cam_entry.msb_mac_addr,
12515 config->config_table[i].
12516 cam_entry.middle_mac_addr,
12517 config->config_table[i].
12518 cam_entry.lsb_mac_addr);
12521 old = config->hdr.length;
12523 for (; i < old; i++) {
12524 if (CAM_IS_INVALID(config->
12525 config_table[i])) {
12526 /* already invalidated */
12530 CAM_INVALIDATE(config->
12535 if (CHIP_REV_IS_SLOW(bp))
12536 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12538 offset = BNX2X_MAX_MULTICAST*(1 + port);
12540 config->hdr.length = i;
12541 config->hdr.offset = offset;
12542 config->hdr.client_id = bp->fp->cl_id;
12543 config->hdr.reserved1 = 0;
12545 bp->set_mac_pending++;
12548 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12549 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12550 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12553 /* Accept one or more multicasts */
12554 struct netdev_hw_addr *ha;
12555 u32 mc_filter[MC_HASH_SIZE];
12556 u32 crc, bit, regidx;
12559 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12561 netdev_for_each_mc_addr(ha, dev) {
12562 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12565 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12566 bit = (crc >> 24) & 0xff;
12569 mc_filter[regidx] |= (1 << bit);
12572 for (i = 0; i < MC_HASH_SIZE; i++)
12573 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12578 bp->rx_mode = rx_mode;
12579 bnx2x_set_storm_rx_mode(bp);
12582 /* called with rtnl_lock */
12583 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12585 struct sockaddr *addr = p;
12586 struct bnx2x *bp = netdev_priv(dev);
12588 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12591 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12592 if (netif_running(dev)) {
12593 if (CHIP_IS_E1(bp))
12594 bnx2x_set_eth_mac_addr_e1(bp, 1);
12596 bnx2x_set_eth_mac_addr_e1h(bp, 1);
12602 /* called with rtnl_lock */
12603 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12604 int devad, u16 addr)
12606 struct bnx2x *bp = netdev_priv(netdev);
12609 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12611 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12612 prtad, devad, addr);
12614 if (prtad != bp->mdio.prtad) {
12615 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12616 prtad, bp->mdio.prtad);
12620 /* The HW expects different devad if CL22 is used */
12621 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12623 bnx2x_acquire_phy_lock(bp);
12624 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12625 devad, addr, &value);
12626 bnx2x_release_phy_lock(bp);
12627 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12634 /* called with rtnl_lock */
12635 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12636 u16 addr, u16 value)
12638 struct bnx2x *bp = netdev_priv(netdev);
12639 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12642 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12643 " value 0x%x\n", prtad, devad, addr, value);
12645 if (prtad != bp->mdio.prtad) {
12646 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12647 prtad, bp->mdio.prtad);
12651 /* The HW expects different devad if CL22 is used */
12652 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12654 bnx2x_acquire_phy_lock(bp);
12655 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12656 devad, addr, value);
12657 bnx2x_release_phy_lock(bp);
12661 /* called with rtnl_lock */
12662 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12664 struct bnx2x *bp = netdev_priv(dev);
12665 struct mii_ioctl_data *mdio = if_mii(ifr);
12667 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12668 mdio->phy_id, mdio->reg_num, mdio->val_in);
12670 if (!netif_running(dev))
12673 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12676 /* called with rtnl_lock */
12677 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12679 struct bnx2x *bp = netdev_priv(dev);
12682 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12683 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12687 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12688 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12691 /* This does not race with packet allocation
12692 * because the actual alloc size is
12693 * only updated as part of load
12695 dev->mtu = new_mtu;
12697 if (netif_running(dev)) {
12698 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12699 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12705 static void bnx2x_tx_timeout(struct net_device *dev)
12707 struct bnx2x *bp = netdev_priv(dev);
12709 #ifdef BNX2X_STOP_ON_ERROR
12713 /* This allows the netif to be shutdown gracefully before resetting */
12714 schedule_delayed_work(&bp->reset_task, 0);
12718 /* called with rtnl_lock */
12719 static void bnx2x_vlan_rx_register(struct net_device *dev,
12720 struct vlan_group *vlgrp)
12722 struct bnx2x *bp = netdev_priv(dev);
12726 /* Set flags according to the required capabilities */
12727 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12729 if (dev->features & NETIF_F_HW_VLAN_TX)
12730 bp->flags |= HW_VLAN_TX_FLAG;
12732 if (dev->features & NETIF_F_HW_VLAN_RX)
12733 bp->flags |= HW_VLAN_RX_FLAG;
12735 if (netif_running(dev))
12736 bnx2x_set_client_config(bp);
12741 #ifdef CONFIG_NET_POLL_CONTROLLER
12742 static void poll_bnx2x(struct net_device *dev)
12744 struct bnx2x *bp = netdev_priv(dev);
12746 disable_irq(bp->pdev->irq);
12747 bnx2x_interrupt(bp->pdev->irq, dev);
12748 enable_irq(bp->pdev->irq);
12752 static const struct net_device_ops bnx2x_netdev_ops = {
12753 .ndo_open = bnx2x_open,
12754 .ndo_stop = bnx2x_close,
12755 .ndo_start_xmit = bnx2x_start_xmit,
12756 .ndo_set_multicast_list = bnx2x_set_rx_mode,
12757 .ndo_set_mac_address = bnx2x_change_mac_addr,
12758 .ndo_validate_addr = eth_validate_addr,
12759 .ndo_do_ioctl = bnx2x_ioctl,
12760 .ndo_change_mtu = bnx2x_change_mtu,
12761 .ndo_tx_timeout = bnx2x_tx_timeout,
12763 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12765 #ifdef CONFIG_NET_POLL_CONTROLLER
12766 .ndo_poll_controller = poll_bnx2x,
12770 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12771 struct net_device *dev)
12776 SET_NETDEV_DEV(dev, &pdev->dev);
12777 bp = netdev_priv(dev);
12782 bp->func = PCI_FUNC(pdev->devfn);
12784 rc = pci_enable_device(pdev);
12786 pr_err("Cannot enable PCI device, aborting\n");
12790 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12791 pr_err("Cannot find PCI device base address, aborting\n");
12793 goto err_out_disable;
12796 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12797 pr_err("Cannot find second PCI device base address, aborting\n");
12799 goto err_out_disable;
12802 if (atomic_read(&pdev->enable_cnt) == 1) {
12803 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12805 pr_err("Cannot obtain PCI resources, aborting\n");
12806 goto err_out_disable;
12809 pci_set_master(pdev);
12810 pci_save_state(pdev);
12813 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12814 if (bp->pm_cap == 0) {
12815 pr_err("Cannot find power management capability, aborting\n");
12817 goto err_out_release;
12820 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12821 if (bp->pcie_cap == 0) {
12822 pr_err("Cannot find PCI Express capability, aborting\n");
12824 goto err_out_release;
12827 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12828 bp->flags |= USING_DAC_FLAG;
12829 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12830 pr_err("dma_set_coherent_mask failed, aborting\n");
12832 goto err_out_release;
12835 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12836 pr_err("System does not support DMA, aborting\n");
12838 goto err_out_release;
12841 dev->mem_start = pci_resource_start(pdev, 0);
12842 dev->base_addr = dev->mem_start;
12843 dev->mem_end = pci_resource_end(pdev, 0);
12845 dev->irq = pdev->irq;
12847 bp->regview = pci_ioremap_bar(pdev, 0);
12848 if (!bp->regview) {
12849 pr_err("Cannot map register space, aborting\n");
12851 goto err_out_release;
12854 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12855 min_t(u64, BNX2X_DB_SIZE,
12856 pci_resource_len(pdev, 2)));
12857 if (!bp->doorbells) {
12858 pr_err("Cannot map doorbell space, aborting\n");
12860 goto err_out_unmap;
12863 bnx2x_set_power_state(bp, PCI_D0);
12865 /* clean indirect addresses */
12866 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12867 PCICFG_VENDOR_ID_OFFSET);
12868 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12869 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12870 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12871 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
12873 /* Reset the load counter */
12874 bnx2x_clear_load_cnt(bp);
12876 dev->watchdog_timeo = TX_TIMEOUT;
12878 dev->netdev_ops = &bnx2x_netdev_ops;
12879 dev->ethtool_ops = &bnx2x_ethtool_ops;
12880 dev->features |= NETIF_F_SG;
12881 dev->features |= NETIF_F_HW_CSUM;
12882 if (bp->flags & USING_DAC_FLAG)
12883 dev->features |= NETIF_F_HIGHDMA;
12884 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12885 dev->features |= NETIF_F_TSO6;
12887 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
12888 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12890 dev->vlan_features |= NETIF_F_SG;
12891 dev->vlan_features |= NETIF_F_HW_CSUM;
12892 if (bp->flags & USING_DAC_FLAG)
12893 dev->vlan_features |= NETIF_F_HIGHDMA;
12894 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12895 dev->vlan_features |= NETIF_F_TSO6;
12898 /* get_port_hwinfo() will set prtad and mmds properly */
12899 bp->mdio.prtad = MDIO_PRTAD_NONE;
12901 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12902 bp->mdio.dev = dev;
12903 bp->mdio.mdio_read = bnx2x_mdio_read;
12904 bp->mdio.mdio_write = bnx2x_mdio_write;
12910 iounmap(bp->regview);
12911 bp->regview = NULL;
12913 if (bp->doorbells) {
12914 iounmap(bp->doorbells);
12915 bp->doorbells = NULL;
12919 if (atomic_read(&pdev->enable_cnt) == 1)
12920 pci_release_regions(pdev);
12923 pci_disable_device(pdev);
12924 pci_set_drvdata(pdev, NULL);
12930 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
12931 int *width, int *speed)
12933 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
12935 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
12937 /* return value of 1=2.5GHz 2=5GHz */
12938 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12941 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12943 const struct firmware *firmware = bp->firmware;
12944 struct bnx2x_fw_file_hdr *fw_hdr;
12945 struct bnx2x_fw_file_section *sections;
12946 u32 offset, len, num_ops;
12951 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12954 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12955 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12957 /* Make sure none of the offsets and sizes make us read beyond
12958 * the end of the firmware data */
12959 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12960 offset = be32_to_cpu(sections[i].offset);
12961 len = be32_to_cpu(sections[i].len);
12962 if (offset + len > firmware->size) {
12963 pr_err("Section %d length is out of bounds\n", i);
12968 /* Likewise for the init_ops offsets */
12969 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12970 ops_offsets = (u16 *)(firmware->data + offset);
12971 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12973 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12974 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12975 pr_err("Section offset %d is out of bounds\n", i);
12980 /* Check FW version */
12981 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12982 fw_ver = firmware->data + offset;
12983 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12984 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12985 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12986 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12987 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
12988 fw_ver[0], fw_ver[1], fw_ver[2],
12989 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12990 BCM_5710_FW_MINOR_VERSION,
12991 BCM_5710_FW_REVISION_VERSION,
12992 BCM_5710_FW_ENGINEERING_VERSION);
12999 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13001 const __be32 *source = (const __be32 *)_source;
13002 u32 *target = (u32 *)_target;
13005 for (i = 0; i < n/4; i++)
13006 target[i] = be32_to_cpu(source[i]);
13010 Ops array is stored in the following format:
13011 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13013 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13015 const __be32 *source = (const __be32 *)_source;
13016 struct raw_op *target = (struct raw_op *)_target;
13019 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13020 tmp = be32_to_cpu(source[j]);
13021 target[i].op = (tmp >> 24) & 0xff;
13022 target[i].offset = tmp & 0xffffff;
13023 target[i].raw_data = be32_to_cpu(source[j+1]);
13027 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13029 const __be16 *source = (const __be16 *)_source;
13030 u16 *target = (u16 *)_target;
13033 for (i = 0; i < n/2; i++)
13034 target[i] = be16_to_cpu(source[i]);
13037 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13039 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13040 bp->arr = kmalloc(len, GFP_KERNEL); \
13042 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13045 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13046 (u8 *)bp->arr, len); \
13049 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13051 const char *fw_file_name;
13052 struct bnx2x_fw_file_hdr *fw_hdr;
13055 if (CHIP_IS_E1(bp))
13056 fw_file_name = FW_FILE_NAME_E1;
13058 fw_file_name = FW_FILE_NAME_E1H;
13060 pr_info("Loading %s\n", fw_file_name);
13062 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13064 pr_err("Can't load firmware file %s\n", fw_file_name);
13065 goto request_firmware_exit;
13068 rc = bnx2x_check_firmware(bp);
13070 pr_err("Corrupt firmware file %s\n", fw_file_name);
13071 goto request_firmware_exit;
13074 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13076 /* Initialize the pointers to the init arrays */
13078 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13081 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13084 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13087 /* STORMs firmware */
13088 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13089 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13090 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13091 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13092 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13093 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13094 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13095 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13096 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13097 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13098 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13099 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13100 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13101 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13102 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13103 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13107 init_offsets_alloc_err:
13108 kfree(bp->init_ops);
13109 init_ops_alloc_err:
13110 kfree(bp->init_data);
13111 request_firmware_exit:
13112 release_firmware(bp->firmware);
13118 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13119 const struct pci_device_id *ent)
13121 struct net_device *dev = NULL;
13123 int pcie_width, pcie_speed;
13126 /* dev zeroed in init_etherdev */
13127 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13129 pr_err("Cannot allocate net device\n");
13133 bp = netdev_priv(dev);
13134 bp->msg_enable = debug;
13136 pci_set_drvdata(pdev, dev);
13138 rc = bnx2x_init_dev(pdev, dev);
13144 rc = bnx2x_init_bp(bp);
13146 goto init_one_exit;
13148 /* Set init arrays */
13149 rc = bnx2x_init_firmware(bp, &pdev->dev);
13151 pr_err("Error loading firmware\n");
13152 goto init_one_exit;
13155 rc = register_netdev(dev);
13157 dev_err(&pdev->dev, "Cannot register net device\n");
13158 goto init_one_exit;
13161 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13162 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
13163 board_info[ent->driver_data].name,
13164 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13165 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13166 dev->base_addr, bp->pdev->irq, dev->dev_addr);
13172 iounmap(bp->regview);
13175 iounmap(bp->doorbells);
13179 if (atomic_read(&pdev->enable_cnt) == 1)
13180 pci_release_regions(pdev);
13182 pci_disable_device(pdev);
13183 pci_set_drvdata(pdev, NULL);
13188 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13190 struct net_device *dev = pci_get_drvdata(pdev);
13194 pr_err("BAD net device from bnx2x_init_one\n");
13197 bp = netdev_priv(dev);
13199 unregister_netdev(dev);
13201 /* Make sure RESET task is not scheduled before continuing */
13202 cancel_delayed_work_sync(&bp->reset_task);
13204 kfree(bp->init_ops_offsets);
13205 kfree(bp->init_ops);
13206 kfree(bp->init_data);
13207 release_firmware(bp->firmware);
13210 iounmap(bp->regview);
13213 iounmap(bp->doorbells);
13217 if (atomic_read(&pdev->enable_cnt) == 1)
13218 pci_release_regions(pdev);
13220 pci_disable_device(pdev);
13221 pci_set_drvdata(pdev, NULL);
13224 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13226 struct net_device *dev = pci_get_drvdata(pdev);
13230 pr_err("BAD net device from bnx2x_init_one\n");
13233 bp = netdev_priv(dev);
13237 pci_save_state(pdev);
13239 if (!netif_running(dev)) {
13244 netif_device_detach(dev);
13246 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13248 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13255 static int bnx2x_resume(struct pci_dev *pdev)
13257 struct net_device *dev = pci_get_drvdata(pdev);
13262 pr_err("BAD net device from bnx2x_init_one\n");
13265 bp = netdev_priv(dev);
13267 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13268 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13274 pci_restore_state(pdev);
13276 if (!netif_running(dev)) {
13281 bnx2x_set_power_state(bp, PCI_D0);
13282 netif_device_attach(dev);
13284 rc = bnx2x_nic_load(bp, LOAD_OPEN);
13291 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13295 bp->state = BNX2X_STATE_ERROR;
13297 bp->rx_mode = BNX2X_RX_MODE_NONE;
13299 bnx2x_netif_stop(bp, 0);
13301 del_timer_sync(&bp->timer);
13302 bp->stats_state = STATS_STATE_DISABLED;
13303 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13306 bnx2x_free_irq(bp, false);
13308 if (CHIP_IS_E1(bp)) {
13309 struct mac_configuration_cmd *config =
13310 bnx2x_sp(bp, mcast_config);
13312 for (i = 0; i < config->hdr.length; i++)
13313 CAM_INVALIDATE(config->config_table[i]);
13316 /* Free SKBs, SGEs, TPA pool and driver internals */
13317 bnx2x_free_skbs(bp);
13318 for_each_queue(bp, i)
13319 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13320 for_each_queue(bp, i)
13321 netif_napi_del(&bnx2x_fp(bp, i, napi));
13322 bnx2x_free_mem(bp);
13324 bp->state = BNX2X_STATE_CLOSED;
13326 netif_carrier_off(bp->dev);
13331 static void bnx2x_eeh_recover(struct bnx2x *bp)
13335 mutex_init(&bp->port.phy_mutex);
13337 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13338 bp->link_params.shmem_base = bp->common.shmem_base;
13339 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13341 if (!bp->common.shmem_base ||
13342 (bp->common.shmem_base < 0xA0000) ||
13343 (bp->common.shmem_base >= 0xC0000)) {
13344 BNX2X_DEV_INFO("MCP not active\n");
13345 bp->flags |= NO_MCP_FLAG;
13349 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13350 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13351 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13352 BNX2X_ERR("BAD MCP validity signature\n");
13354 if (!BP_NOMCP(bp)) {
13355 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13356 & DRV_MSG_SEQ_NUMBER_MASK);
13357 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13362 * bnx2x_io_error_detected - called when PCI error is detected
13363 * @pdev: Pointer to PCI device
13364 * @state: The current pci connection state
13366 * This function is called after a PCI bus error affecting
13367 * this device has been detected.
13369 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13370 pci_channel_state_t state)
13372 struct net_device *dev = pci_get_drvdata(pdev);
13373 struct bnx2x *bp = netdev_priv(dev);
13377 netif_device_detach(dev);
13379 if (state == pci_channel_io_perm_failure) {
13381 return PCI_ERS_RESULT_DISCONNECT;
13384 if (netif_running(dev))
13385 bnx2x_eeh_nic_unload(bp);
13387 pci_disable_device(pdev);
13391 /* Request a slot reset */
13392 return PCI_ERS_RESULT_NEED_RESET;
13396 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13397 * @pdev: Pointer to PCI device
13399 * Restart the card from scratch, as if from a cold-boot.
13401 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13403 struct net_device *dev = pci_get_drvdata(pdev);
13404 struct bnx2x *bp = netdev_priv(dev);
13408 if (pci_enable_device(pdev)) {
13409 dev_err(&pdev->dev,
13410 "Cannot re-enable PCI device after reset\n");
13412 return PCI_ERS_RESULT_DISCONNECT;
13415 pci_set_master(pdev);
13416 pci_restore_state(pdev);
13418 if (netif_running(dev))
13419 bnx2x_set_power_state(bp, PCI_D0);
13423 return PCI_ERS_RESULT_RECOVERED;
13427 * bnx2x_io_resume - called when traffic can start flowing again
13428 * @pdev: Pointer to PCI device
13430 * This callback is called when the error recovery driver tells us that
13431 * its OK to resume normal operation.
13433 static void bnx2x_io_resume(struct pci_dev *pdev)
13435 struct net_device *dev = pci_get_drvdata(pdev);
13436 struct bnx2x *bp = netdev_priv(dev);
13438 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13439 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13445 bnx2x_eeh_recover(bp);
13447 if (netif_running(dev))
13448 bnx2x_nic_load(bp, LOAD_NORMAL);
13450 netif_device_attach(dev);
13455 static struct pci_error_handlers bnx2x_err_handler = {
13456 .error_detected = bnx2x_io_error_detected,
13457 .slot_reset = bnx2x_io_slot_reset,
13458 .resume = bnx2x_io_resume,
13461 static struct pci_driver bnx2x_pci_driver = {
13462 .name = DRV_MODULE_NAME,
13463 .id_table = bnx2x_pci_tbl,
13464 .probe = bnx2x_init_one,
13465 .remove = __devexit_p(bnx2x_remove_one),
13466 .suspend = bnx2x_suspend,
13467 .resume = bnx2x_resume,
13468 .err_handler = &bnx2x_err_handler,
13471 static int __init bnx2x_init(void)
13475 pr_info("%s", version);
13477 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13478 if (bnx2x_wq == NULL) {
13479 pr_err("Cannot create workqueue\n");
13483 ret = pci_register_driver(&bnx2x_pci_driver);
13485 pr_err("Cannot register driver\n");
13486 destroy_workqueue(bnx2x_wq);
13491 static void __exit bnx2x_cleanup(void)
13493 pci_unregister_driver(&bnx2x_pci_driver);
13495 destroy_workqueue(bnx2x_wq);
13498 module_init(bnx2x_init);
13499 module_exit(bnx2x_cleanup);
13503 /* count denotes the number of new completions we have seen */
13504 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13506 struct eth_spe *spe;
13508 #ifdef BNX2X_STOP_ON_ERROR
13509 if (unlikely(bp->panic))
13513 spin_lock_bh(&bp->spq_lock);
13514 bp->cnic_spq_pending -= count;
13516 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13517 bp->cnic_spq_pending++) {
13519 if (!bp->cnic_kwq_pending)
13522 spe = bnx2x_sp_get_next(bp);
13523 *spe = *bp->cnic_kwq_cons;
13525 bp->cnic_kwq_pending--;
13527 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13528 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13530 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13531 bp->cnic_kwq_cons = bp->cnic_kwq;
13533 bp->cnic_kwq_cons++;
13535 bnx2x_sp_prod_update(bp);
13536 spin_unlock_bh(&bp->spq_lock);
13539 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13540 struct kwqe_16 *kwqes[], u32 count)
13542 struct bnx2x *bp = netdev_priv(dev);
13545 #ifdef BNX2X_STOP_ON_ERROR
13546 if (unlikely(bp->panic))
13550 spin_lock_bh(&bp->spq_lock);
13552 for (i = 0; i < count; i++) {
13553 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13555 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13558 *bp->cnic_kwq_prod = *spe;
13560 bp->cnic_kwq_pending++;
13562 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13563 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13564 spe->data.mac_config_addr.hi,
13565 spe->data.mac_config_addr.lo,
13566 bp->cnic_kwq_pending);
13568 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13569 bp->cnic_kwq_prod = bp->cnic_kwq;
13571 bp->cnic_kwq_prod++;
13574 spin_unlock_bh(&bp->spq_lock);
13576 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13577 bnx2x_cnic_sp_post(bp, 0);
13582 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13584 struct cnic_ops *c_ops;
13587 mutex_lock(&bp->cnic_mutex);
13588 c_ops = bp->cnic_ops;
13590 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13591 mutex_unlock(&bp->cnic_mutex);
13596 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13598 struct cnic_ops *c_ops;
13602 c_ops = rcu_dereference(bp->cnic_ops);
13604 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13611 * for commands that have no data
13613 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13615 struct cnic_ctl_info ctl = {0};
13619 return bnx2x_cnic_ctl_send(bp, &ctl);
13622 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13624 struct cnic_ctl_info ctl;
13626 /* first we tell CNIC and only then we count this as a completion */
13627 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13628 ctl.data.comp.cid = cid;
13630 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13631 bnx2x_cnic_sp_post(bp, 1);
13634 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13636 struct bnx2x *bp = netdev_priv(dev);
13639 switch (ctl->cmd) {
13640 case DRV_CTL_CTXTBL_WR_CMD: {
13641 u32 index = ctl->data.io.offset;
13642 dma_addr_t addr = ctl->data.io.dma_addr;
13644 bnx2x_ilt_wr(bp, index, addr);
13648 case DRV_CTL_COMPLETION_CMD: {
13649 int count = ctl->data.comp.comp_count;
13651 bnx2x_cnic_sp_post(bp, count);
13655 /* rtnl_lock is held. */
13656 case DRV_CTL_START_L2_CMD: {
13657 u32 cli = ctl->data.ring.client_id;
13659 bp->rx_mode_cl_mask |= (1 << cli);
13660 bnx2x_set_storm_rx_mode(bp);
13664 /* rtnl_lock is held. */
13665 case DRV_CTL_STOP_L2_CMD: {
13666 u32 cli = ctl->data.ring.client_id;
13668 bp->rx_mode_cl_mask &= ~(1 << cli);
13669 bnx2x_set_storm_rx_mode(bp);
13674 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13681 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13683 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13685 if (bp->flags & USING_MSIX_FLAG) {
13686 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13687 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13688 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13690 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13691 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13693 cp->irq_arr[0].status_blk = bp->cnic_sb;
13694 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13695 cp->irq_arr[1].status_blk = bp->def_status_blk;
13696 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13701 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13704 struct bnx2x *bp = netdev_priv(dev);
13705 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13710 if (atomic_read(&bp->intr_sem) != 0)
13713 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13717 bp->cnic_kwq_cons = bp->cnic_kwq;
13718 bp->cnic_kwq_prod = bp->cnic_kwq;
13719 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13721 bp->cnic_spq_pending = 0;
13722 bp->cnic_kwq_pending = 0;
13724 bp->cnic_data = data;
13727 cp->drv_state = CNIC_DRV_STATE_REGD;
13729 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13731 bnx2x_setup_cnic_irq_info(bp);
13732 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13733 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13734 rcu_assign_pointer(bp->cnic_ops, ops);
13739 static int bnx2x_unregister_cnic(struct net_device *dev)
13741 struct bnx2x *bp = netdev_priv(dev);
13742 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13744 mutex_lock(&bp->cnic_mutex);
13745 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13746 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13747 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13750 rcu_assign_pointer(bp->cnic_ops, NULL);
13751 mutex_unlock(&bp->cnic_mutex);
13753 kfree(bp->cnic_kwq);
13754 bp->cnic_kwq = NULL;
13759 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13761 struct bnx2x *bp = netdev_priv(dev);
13762 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13764 cp->drv_owner = THIS_MODULE;
13765 cp->chip_id = CHIP_ID(bp);
13766 cp->pdev = bp->pdev;
13767 cp->io_base = bp->regview;
13768 cp->io_base2 = bp->doorbells;
13769 cp->max_kwqe_pending = 8;
13770 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13771 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13772 cp->ctx_tbl_len = CNIC_ILT_LINES;
13773 cp->starting_cid = BCM_CNIC_CID_START;
13774 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13775 cp->drv_ctl = bnx2x_drv_ctl;
13776 cp->drv_register_cnic = bnx2x_register_cnic;
13777 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13781 EXPORT_SYMBOL(bnx2x_cnic_probe);
13783 #endif /* BCM_CNIC */