1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.52.1"
60 #define DRV_MODULE_RELDATE "2009/08/12"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
154 * locking is done by mcp
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
176 static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
203 struct dmae_command dmae;
204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 memset(&dmae, 0, sizeof(struct dmae_command));
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 DMAE_CMD_ENDIANITY_DW_SWAP |
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248 mutex_lock(&bp->dmae_mutex);
252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
260 BNX2X_ERR("DMAE timeout!\n");
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
271 mutex_unlock(&bp->dmae_mutex);
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
276 struct dmae_command dmae;
277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 memset(&dmae, 0, sizeof(struct dmae_command));
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
299 DMAE_CMD_ENDIANITY_DW_SWAP |
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
320 mutex_lock(&bp->dmae_mutex);
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329 while (*wb_comp != DMAE_COMP_VAL) {
332 BNX2X_ERR("DMAE timeout!\n");
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
346 mutex_unlock(&bp->dmae_mutex);
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 REG_RD_DMAE(bp, reg, wb_data, 2);
381 return HILO_U64(wb_data[0], wb_data[1]);
385 static int bnx2x_mc_assert(struct bnx2x *bp)
389 u32 row0, row1, row2, row3;
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
506 static void bnx2x_fw_dump(struct bnx2x *bp)
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513 mark = ((mark + 0x3) & ~0x3);
514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
516 printk(KERN_ERR PFX);
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
522 printk(KERN_CONT "%s", (char *)data);
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
529 printk(KERN_CONT "%s", (char *)data);
531 printk(KERN_ERR PFX "end of fw dump\n");
534 static void bnx2x_panic_dump(struct bnx2x *bp)
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
542 BNX2X_ERR("begin crash dump -----------------\n");
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
553 for_each_rx_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i];
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
559 i, fp->rx_bd_prod, fp->rx_bd_cons,
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579 fp->status_blk->c_status_block.status_block_index,
580 fp->tx_db.data.prod);
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590 for (j = start; j != end; j = RX_BD(j + 1)) {
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
600 for (j = start; j != end; j = RX_SGE(j + 1)) {
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 BNX2X_ERR("end crash dump -----------------\n");
646 static void bnx2x_int_enable(struct bnx2x *bp)
648 int port = BP_PORT(bp);
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
673 REG_WR(bp, addr, val);
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
681 REG_WR(bp, addr, val);
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
693 /* enable nig and gpio3 attention */
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
702 /* Make sure that interrupts are indeed enabled from here on */
706 static void bnx2x_int_disable(struct bnx2x *bp)
708 int port = BP_PORT(bp);
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
720 /* flush all outstanding writes */
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
733 /* disable interrupt handling */
734 atomic_inc(&bp->intr_sem);
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
741 /* make sure all ISRs are done */
743 synchronize_irq(bp->msix_table[0].vector);
748 for_each_queue(bp, i)
749 synchronize_irq(bp->msix_table[i + offset].vector);
751 synchronize_irq(bp->pdev->irq);
753 /* make sure sp_task is not running */
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
761 * General service functions
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765 u8 storm, u16 index, u8 op, u8 update)
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
769 struct igu_ack_register igu_ack;
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
782 /* Make sure that ACK is written */
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
789 struct host_status_block *fpsb = fp->status_blk;
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
818 * fast path service functions
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
823 /* Tell compiler that consumer and producer can change */
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
828 /* free skb in the packet ring at pos idx
829 * return idx of last bd freed
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
837 struct sk_buff *skb = tx_buf->skb;
838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853 BNX2X_ERR("BAD nbd!\n");
857 new_cons = nbd + tx_buf->first_bd;
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862 /* Skip a parse bd... */
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
885 dev_kfree_skb_any(skb);
886 tx_buf->first_bd = 0;
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
898 barrier(); /* Tell compiler that prod and cons can change */
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
906 #ifdef BNX2X_STOP_ON_ERROR
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
912 return (s16)(fp->bp->tx_ring_size) - used;
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
917 struct bnx2x *bp = fp->bp;
918 struct netdev_queue *txq;
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
922 #ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
931 while (sw_cons != hw_cons) {
934 pkt_cons = TX_BD(sw_cons);
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
939 hw_cons, sw_cons, pkt_cons);
941 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
954 /* TBD need a thresh? */
955 if (unlikely(netif_tx_queue_stopped(txq))) {
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
965 if ((netif_tx_queue_stopped(txq)) &&
966 (bp->state == BNX2X_STATE_OPEN) &&
967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968 netif_tx_wake_queue(txq);
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
985 fp->index, cid, command, bp->state,
986 rr_cqe->ramrod_cqe.ramrod_type);
991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
996 fp->state = BNX2X_FP_STATE_OPEN;
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1002 fp->state = BNX2X_FP_STATE_HALTED;
1006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
1014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041 bp->set_mac_pending--;
1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1047 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1048 bp->set_mac_pending--;
1053 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1054 command, bp->state);
1057 mb(); /* force bnx2x_wait_ramrod() to see the change */
1060 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1061 struct bnx2x_fastpath *fp, u16 index)
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct page *page = sw_buf->page;
1065 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1067 /* Skip "next page" elements */
1071 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1072 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1075 sw_buf->page = NULL;
1080 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1081 struct bnx2x_fastpath *fp, int last)
1085 for (i = 0; i < last; i++)
1086 bnx2x_free_rx_sge(bp, fp, i);
1089 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1090 struct bnx2x_fastpath *fp, u16 index)
1092 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1093 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1094 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1097 if (unlikely(page == NULL))
1100 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1101 PCI_DMA_FROMDEVICE);
1102 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1103 __free_pages(page, PAGES_PER_SGE_SHIFT);
1107 sw_buf->page = page;
1108 pci_unmap_addr_set(sw_buf, mapping, mapping);
1110 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1111 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1116 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1117 struct bnx2x_fastpath *fp, u16 index)
1119 struct sk_buff *skb;
1120 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1121 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1124 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1125 if (unlikely(skb == NULL))
1128 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1129 PCI_DMA_FROMDEVICE);
1130 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1136 pci_unmap_addr_set(rx_buf, mapping, mapping);
1138 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1139 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1144 /* note that we are not allocating a new skb,
1145 * we are just moving one from cons to prod
1146 * we are not creating a new mapping,
1147 * so there is no need to check for dma_mapping_error().
1149 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1150 struct sk_buff *skb, u16 cons, u16 prod)
1152 struct bnx2x *bp = fp->bp;
1153 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1154 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1155 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1156 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1158 pci_dma_sync_single_for_device(bp->pdev,
1159 pci_unmap_addr(cons_rx_buf, mapping),
1160 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1162 prod_rx_buf->skb = cons_rx_buf->skb;
1163 pci_unmap_addr_set(prod_rx_buf, mapping,
1164 pci_unmap_addr(cons_rx_buf, mapping));
1165 *prod_bd = *cons_bd;
1168 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1171 u16 last_max = fp->last_max_sge;
1173 if (SUB_S16(idx, last_max) > 0)
1174 fp->last_max_sge = idx;
1177 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1181 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1182 int idx = RX_SGE_CNT * i - 1;
1184 for (j = 0; j < 2; j++) {
1185 SGE_MASK_CLEAR_BIT(fp, idx);
1191 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1192 struct eth_fast_path_rx_cqe *fp_cqe)
1194 struct bnx2x *bp = fp->bp;
1195 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1196 le16_to_cpu(fp_cqe->len_on_bd)) >>
1198 u16 last_max, last_elem, first_elem;
1205 /* First mark all used pages */
1206 for (i = 0; i < sge_len; i++)
1207 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1209 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1210 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1212 /* Here we assume that the last SGE index is the biggest */
1213 prefetch((void *)(fp->sge_mask));
1214 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1216 last_max = RX_SGE(fp->last_max_sge);
1217 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1218 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1220 /* If ring is not full */
1221 if (last_elem + 1 != first_elem)
1224 /* Now update the prod */
1225 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1226 if (likely(fp->sge_mask[i]))
1229 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1230 delta += RX_SGE_MASK_ELEM_SZ;
1234 fp->rx_sge_prod += delta;
1235 /* clear page-end entries */
1236 bnx2x_clear_sge_mask_next_elems(fp);
1239 DP(NETIF_MSG_RX_STATUS,
1240 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1241 fp->last_max_sge, fp->rx_sge_prod);
1244 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1246 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1247 memset(fp->sge_mask, 0xff,
1248 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1250 /* Clear the two last indices in the page to 1:
1251 these are the indices that correspond to the "next" element,
1252 hence will never be indicated and should be removed from
1253 the calculations. */
1254 bnx2x_clear_sge_mask_next_elems(fp);
1257 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1258 struct sk_buff *skb, u16 cons, u16 prod)
1260 struct bnx2x *bp = fp->bp;
1261 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1262 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1263 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1266 /* move empty skb from pool to prod and map it */
1267 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1268 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1269 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1270 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1272 /* move partial skb from cons to pool (don't unmap yet) */
1273 fp->tpa_pool[queue] = *cons_rx_buf;
1275 /* mark bin state as start - print error if current state != stop */
1276 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1277 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1279 fp->tpa_state[queue] = BNX2X_TPA_START;
1281 /* point prod_bd to new skb */
1282 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1283 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1285 #ifdef BNX2X_STOP_ON_ERROR
1286 fp->tpa_queue_used |= (1 << queue);
1287 #ifdef __powerpc64__
1288 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1290 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1292 fp->tpa_queue_used);
1296 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297 struct sk_buff *skb,
1298 struct eth_fast_path_rx_cqe *fp_cqe,
1301 struct sw_rx_page *rx_pg, old_rx_pg;
1302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1303 u32 i, frag_len, frag_size, pages;
1307 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1308 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1310 /* This is needed in order to enable forwarding support */
1312 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1313 max(frag_size, (u32)len_on_bd));
1315 #ifdef BNX2X_STOP_ON_ERROR
1317 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1318 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1320 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1321 fp_cqe->pkt_len, len_on_bd);
1327 /* Run through the SGL and compose the fragmented skb */
1328 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1329 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1331 /* FW gives the indices of the SGE as if the ring is an array
1332 (meaning that "next" element will consume 2 indices) */
1333 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1334 rx_pg = &fp->rx_page_ring[sge_idx];
1337 /* If we fail to allocate a substitute page, we simply stop
1338 where we are and drop the whole packet */
1339 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1340 if (unlikely(err)) {
1341 fp->eth_q_stats.rx_skb_alloc_failed++;
1345 /* Unmap the page as we r going to pass it to the stack */
1346 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1347 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1349 /* Add one frag and update the appropriate fields in the skb */
1350 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1352 skb->data_len += frag_len;
1353 skb->truesize += frag_len;
1354 skb->len += frag_len;
1356 frag_size -= frag_len;
1362 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1363 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1366 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1367 struct sk_buff *skb = rx_buf->skb;
1369 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1371 /* Unmap skb in the pool anyway, as we are going to change
1372 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1374 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1375 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1377 if (likely(new_skb)) {
1378 /* fix ip xsum and give it to the stack */
1379 /* (no need to map the new skb) */
1382 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1383 PARSING_FLAGS_VLAN);
1384 int is_not_hwaccel_vlan_cqe =
1385 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1389 prefetch(((char *)(skb)) + 128);
1391 #ifdef BNX2X_STOP_ON_ERROR
1392 if (pad + len > bp->rx_buf_size) {
1393 BNX2X_ERR("skb_put is about to fail... "
1394 "pad %d len %d rx_buf_size %d\n",
1395 pad, len, bp->rx_buf_size);
1401 skb_reserve(skb, pad);
1404 skb->protocol = eth_type_trans(skb, bp->dev);
1405 skb->ip_summed = CHECKSUM_UNNECESSARY;
1410 iph = (struct iphdr *)skb->data;
1412 /* If there is no Rx VLAN offloading -
1413 take VLAN tag into an account */
1414 if (unlikely(is_not_hwaccel_vlan_cqe))
1415 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1418 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1421 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1422 &cqe->fast_path_cqe, cqe_idx)) {
1424 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1425 (!is_not_hwaccel_vlan_cqe))
1426 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1427 le16_to_cpu(cqe->fast_path_cqe.
1431 netif_receive_skb(skb);
1433 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1434 " - dropping packet!\n");
1439 /* put new skb in bin */
1440 fp->tpa_pool[queue].skb = new_skb;
1443 /* else drop the packet and keep the buffer in the bin */
1444 DP(NETIF_MSG_RX_STATUS,
1445 "Failed to allocate new skb - dropping packet!\n");
1446 fp->eth_q_stats.rx_skb_alloc_failed++;
1449 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1452 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1453 struct bnx2x_fastpath *fp,
1454 u16 bd_prod, u16 rx_comp_prod,
1457 struct ustorm_eth_rx_producers rx_prods = {0};
1460 /* Update producers */
1461 rx_prods.bd_prod = bd_prod;
1462 rx_prods.cqe_prod = rx_comp_prod;
1463 rx_prods.sge_prod = rx_sge_prod;
1466 * Make sure that the BD and SGE data is updated before updating the
1467 * producers since FW might read the BD/SGE right after the producer
1469 * This is only applicable for weak-ordered memory model archs such
1470 * as IA-64. The following barrier is also mandatory since FW will
1471 * assumes BDs must have buffers.
1475 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1476 REG_WR(bp, BAR_USTRORM_INTMEM +
1477 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1478 ((u32 *)&rx_prods)[i]);
1480 mmiowb(); /* keep prod updates ordered */
1482 DP(NETIF_MSG_RX_STATUS,
1483 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1484 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1487 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1489 struct bnx2x *bp = fp->bp;
1490 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1491 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1494 #ifdef BNX2X_STOP_ON_ERROR
1495 if (unlikely(bp->panic))
1499 /* CQ "next element" is of the size of the regular element,
1500 that's why it's ok here */
1501 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1502 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1505 bd_cons = fp->rx_bd_cons;
1506 bd_prod = fp->rx_bd_prod;
1507 bd_prod_fw = bd_prod;
1508 sw_comp_cons = fp->rx_comp_cons;
1509 sw_comp_prod = fp->rx_comp_prod;
1511 /* Memory barrier necessary as speculative reads of the rx
1512 * buffer can be ahead of the index in the status block
1516 DP(NETIF_MSG_RX_STATUS,
1517 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1518 fp->index, hw_comp_cons, sw_comp_cons);
1520 while (sw_comp_cons != hw_comp_cons) {
1521 struct sw_rx_bd *rx_buf = NULL;
1522 struct sk_buff *skb;
1523 union eth_rx_cqe *cqe;
1527 comp_ring_cons = RCQ_BD(sw_comp_cons);
1528 bd_prod = RX_BD(bd_prod);
1529 bd_cons = RX_BD(bd_cons);
1531 /* Prefetch the page containing the BD descriptor
1532 at producer's index. It will be needed when new skb is
1534 prefetch((void *)(PAGE_ALIGN((unsigned long)
1535 (&fp->rx_desc_ring[bd_prod])) -
1538 cqe = &fp->rx_comp_ring[comp_ring_cons];
1539 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1541 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1542 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1543 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1544 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1545 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1546 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1548 /* is this a slowpath msg? */
1549 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1550 bnx2x_sp_event(fp, cqe);
1553 /* this is an rx packet */
1555 rx_buf = &fp->rx_buf_ring[bd_cons];
1557 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1558 pad = cqe->fast_path_cqe.placement_offset;
1560 /* If CQE is marked both TPA_START and TPA_END
1561 it is a non-TPA CQE */
1562 if ((!fp->disable_tpa) &&
1563 (TPA_TYPE(cqe_fp_flags) !=
1564 (TPA_TYPE_START | TPA_TYPE_END))) {
1565 u16 queue = cqe->fast_path_cqe.queue_index;
1567 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1568 DP(NETIF_MSG_RX_STATUS,
1569 "calling tpa_start on queue %d\n",
1572 bnx2x_tpa_start(fp, queue, skb,
1577 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1578 DP(NETIF_MSG_RX_STATUS,
1579 "calling tpa_stop on queue %d\n",
1582 if (!BNX2X_RX_SUM_FIX(cqe))
1583 BNX2X_ERR("STOP on none TCP "
1586 /* This is a size of the linear data
1588 len = le16_to_cpu(cqe->fast_path_cqe.
1590 bnx2x_tpa_stop(bp, fp, queue, pad,
1591 len, cqe, comp_ring_cons);
1592 #ifdef BNX2X_STOP_ON_ERROR
1597 bnx2x_update_sge_prod(fp,
1598 &cqe->fast_path_cqe);
1603 pci_dma_sync_single_for_device(bp->pdev,
1604 pci_unmap_addr(rx_buf, mapping),
1605 pad + RX_COPY_THRESH,
1606 PCI_DMA_FROMDEVICE);
1608 prefetch(((char *)(skb)) + 128);
1610 /* is this an error packet? */
1611 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1612 DP(NETIF_MSG_RX_ERR,
1613 "ERROR flags %x rx packet %u\n",
1614 cqe_fp_flags, sw_comp_cons);
1615 fp->eth_q_stats.rx_err_discard_pkt++;
1619 /* Since we don't have a jumbo ring
1620 * copy small packets if mtu > 1500
1622 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1623 (len <= RX_COPY_THRESH)) {
1624 struct sk_buff *new_skb;
1626 new_skb = netdev_alloc_skb(bp->dev,
1628 if (new_skb == NULL) {
1629 DP(NETIF_MSG_RX_ERR,
1630 "ERROR packet dropped "
1631 "because of alloc failure\n");
1632 fp->eth_q_stats.rx_skb_alloc_failed++;
1637 skb_copy_from_linear_data_offset(skb, pad,
1638 new_skb->data + pad, len);
1639 skb_reserve(new_skb, pad);
1640 skb_put(new_skb, len);
1642 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1647 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1648 pci_unmap_single(bp->pdev,
1649 pci_unmap_addr(rx_buf, mapping),
1651 PCI_DMA_FROMDEVICE);
1652 skb_reserve(skb, pad);
1656 DP(NETIF_MSG_RX_ERR,
1657 "ERROR packet dropped because "
1658 "of alloc failure\n");
1659 fp->eth_q_stats.rx_skb_alloc_failed++;
1661 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1665 skb->protocol = eth_type_trans(skb, bp->dev);
1667 skb->ip_summed = CHECKSUM_NONE;
1669 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1670 skb->ip_summed = CHECKSUM_UNNECESSARY;
1672 fp->eth_q_stats.hw_csum_err++;
1676 skb_record_rx_queue(skb, fp->index);
1679 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1680 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1681 PARSING_FLAGS_VLAN))
1682 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1683 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1686 netif_receive_skb(skb);
1692 bd_cons = NEXT_RX_IDX(bd_cons);
1693 bd_prod = NEXT_RX_IDX(bd_prod);
1694 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1697 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1698 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1700 if (rx_pkt == budget)
1704 fp->rx_bd_cons = bd_cons;
1705 fp->rx_bd_prod = bd_prod_fw;
1706 fp->rx_comp_cons = sw_comp_cons;
1707 fp->rx_comp_prod = sw_comp_prod;
1709 /* Update producers */
1710 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1713 fp->rx_pkt += rx_pkt;
1719 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1721 struct bnx2x_fastpath *fp = fp_cookie;
1722 struct bnx2x *bp = fp->bp;
1724 /* Return here if interrupt is disabled */
1725 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1726 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1730 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1731 fp->index, fp->sb_id);
1732 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1734 #ifdef BNX2X_STOP_ON_ERROR
1735 if (unlikely(bp->panic))
1738 /* Handle Rx or Tx according to MSI-X vector */
1739 if (fp->is_rx_queue) {
1740 prefetch(fp->rx_cons_sb);
1741 prefetch(&fp->status_blk->u_status_block.status_block_index);
1743 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1749 bnx2x_update_fpsb_idx(fp);
1753 /* Re-enable interrupts */
1754 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1755 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1756 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1757 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1763 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1765 struct bnx2x *bp = netdev_priv(dev_instance);
1766 u16 status = bnx2x_ack_int(bp);
1770 /* Return here if interrupt is shared and it's not for us */
1771 if (unlikely(status == 0)) {
1772 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1775 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1777 /* Return here if interrupt is disabled */
1778 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1779 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1783 #ifdef BNX2X_STOP_ON_ERROR
1784 if (unlikely(bp->panic))
1788 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1789 struct bnx2x_fastpath *fp = &bp->fp[i];
1791 mask = 0x2 << fp->sb_id;
1792 if (status & mask) {
1793 /* Handle Rx or Tx according to SB id */
1794 if (fp->is_rx_queue) {
1795 prefetch(fp->rx_cons_sb);
1796 prefetch(&fp->status_blk->u_status_block.
1797 status_block_index);
1799 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1802 prefetch(fp->tx_cons_sb);
1803 prefetch(&fp->status_blk->c_status_block.
1804 status_block_index);
1806 bnx2x_update_fpsb_idx(fp);
1810 /* Re-enable interrupts */
1811 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1812 le16_to_cpu(fp->fp_u_idx),
1814 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1815 le16_to_cpu(fp->fp_c_idx),
1823 mask = 0x2 << CNIC_SB_ID(bp);
1824 if (status & (mask | 0x1)) {
1825 struct cnic_ops *c_ops = NULL;
1828 c_ops = rcu_dereference(bp->cnic_ops);
1830 c_ops->cnic_handler(bp->cnic_data, NULL);
1837 if (unlikely(status & 0x1)) {
1838 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1846 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1852 /* end of fast path */
1854 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1859 * General service functions
1862 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1865 u32 resource_bit = (1 << resource);
1866 int func = BP_FUNC(bp);
1867 u32 hw_lock_control_reg;
1870 /* Validating that the resource is within range */
1871 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1873 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1879 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1881 hw_lock_control_reg =
1882 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1885 /* Validating that the resource is not already taken */
1886 lock_status = REG_RD(bp, hw_lock_control_reg);
1887 if (lock_status & resource_bit) {
1888 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1889 lock_status, resource_bit);
1893 /* Try for 5 second every 5ms */
1894 for (cnt = 0; cnt < 1000; cnt++) {
1895 /* Try to acquire the lock */
1896 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1897 lock_status = REG_RD(bp, hw_lock_control_reg);
1898 if (lock_status & resource_bit)
1903 DP(NETIF_MSG_HW, "Timeout\n");
1907 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1910 u32 resource_bit = (1 << resource);
1911 int func = BP_FUNC(bp);
1912 u32 hw_lock_control_reg;
1914 /* Validating that the resource is within range */
1915 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1917 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1918 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1923 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1925 hw_lock_control_reg =
1926 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1929 /* Validating that the resource is currently taken */
1930 lock_status = REG_RD(bp, hw_lock_control_reg);
1931 if (!(lock_status & resource_bit)) {
1932 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1933 lock_status, resource_bit);
1937 REG_WR(bp, hw_lock_control_reg, resource_bit);
1941 /* HW Lock for shared dual port PHYs */
1942 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1944 mutex_lock(&bp->port.phy_mutex);
1946 if (bp->port.need_hw_lock)
1947 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1950 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1952 if (bp->port.need_hw_lock)
1953 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1955 mutex_unlock(&bp->port.phy_mutex);
1958 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1960 /* The GPIO should be swapped if swap register is set and active */
1961 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963 int gpio_shift = gpio_num +
1964 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965 u32 gpio_mask = (1 << gpio_shift);
1969 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1974 /* read GPIO value */
1975 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1977 /* get the requested pin value */
1978 if ((gpio_reg & gpio_mask) == gpio_mask)
1983 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1988 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 /* read GPIO and mask except the float bits */
2005 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2008 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2009 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2010 gpio_num, gpio_shift);
2011 /* clear FLOAT and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2016 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2018 gpio_num, gpio_shift);
2019 /* clear FLOAT and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2024 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2025 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2026 gpio_num, gpio_shift);
2028 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2035 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2036 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2041 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2043 /* The GPIO should be swapped if swap register is set and active */
2044 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046 int gpio_shift = gpio_num +
2047 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048 u32 gpio_mask = (1 << gpio_shift);
2051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2058 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2061 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2062 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2063 "output low\n", gpio_num, gpio_shift);
2064 /* clear SET and set CLR */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2069 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2070 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2071 "output high\n", gpio_num, gpio_shift);
2072 /* clear CLR and set SET */
2073 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2081 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2082 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2087 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2089 u32 spio_mask = (1 << spio_num);
2092 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2093 (spio_num > MISC_REGISTERS_SPIO_7)) {
2094 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2098 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2099 /* read SPIO and mask except the float bits */
2100 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2103 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2104 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2105 /* clear FLOAT and set CLR */
2106 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2107 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2110 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2111 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2112 /* clear FLOAT and set SET */
2113 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2117 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2118 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2120 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2127 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2128 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2133 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2135 switch (bp->link_vars.ieee_fc &
2136 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2137 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2138 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2142 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2143 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2147 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2148 bp->port.advertising |= ADVERTISED_Asym_Pause;
2152 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2158 static void bnx2x_link_report(struct bnx2x *bp)
2160 if (bp->state == BNX2X_STATE_DISABLED) {
2161 netif_carrier_off(bp->dev);
2162 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2166 if (bp->link_vars.link_up) {
2167 if (bp->state == BNX2X_STATE_OPEN)
2168 netif_carrier_on(bp->dev);
2169 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2171 printk("%d Mbps ", bp->link_vars.line_speed);
2173 if (bp->link_vars.duplex == DUPLEX_FULL)
2174 printk("full duplex");
2176 printk("half duplex");
2178 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2179 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2180 printk(", receive ");
2181 if (bp->link_vars.flow_ctrl &
2183 printk("& transmit ");
2185 printk(", transmit ");
2187 printk("flow control ON");
2191 } else { /* link_down */
2192 netif_carrier_off(bp->dev);
2193 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2197 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2199 if (!BP_NOMCP(bp)) {
2202 /* Initialize link parameters structure variables */
2203 /* It is recommended to turn off RX FC for jumbo frames
2204 for better performance */
2205 if (bp->dev->mtu > 5000)
2206 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2208 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2210 bnx2x_acquire_phy_lock(bp);
2212 if (load_mode == LOAD_DIAG)
2213 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2215 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2217 bnx2x_release_phy_lock(bp);
2219 bnx2x_calc_fc_adv(bp);
2221 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2222 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2223 bnx2x_link_report(bp);
2228 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2232 static void bnx2x_link_set(struct bnx2x *bp)
2234 if (!BP_NOMCP(bp)) {
2235 bnx2x_acquire_phy_lock(bp);
2236 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2237 bnx2x_release_phy_lock(bp);
2239 bnx2x_calc_fc_adv(bp);
2241 BNX2X_ERR("Bootcode is missing - can not set link\n");
2244 static void bnx2x__link_reset(struct bnx2x *bp)
2246 if (!BP_NOMCP(bp)) {
2247 bnx2x_acquire_phy_lock(bp);
2248 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2249 bnx2x_release_phy_lock(bp);
2251 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2254 static u8 bnx2x_link_test(struct bnx2x *bp)
2258 bnx2x_acquire_phy_lock(bp);
2259 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2260 bnx2x_release_phy_lock(bp);
2265 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2267 u32 r_param = bp->link_vars.line_speed / 8;
2268 u32 fair_periodic_timeout_usec;
2271 memset(&(bp->cmng.rs_vars), 0,
2272 sizeof(struct rate_shaping_vars_per_port));
2273 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2275 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2276 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2278 /* this is the threshold below which no timer arming will occur
2279 1.25 coefficient is for the threshold to be a little bigger
2280 than the real time, to compensate for timer in-accuracy */
2281 bp->cmng.rs_vars.rs_threshold =
2282 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2284 /* resolution of fairness timer */
2285 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2286 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2287 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2289 /* this is the threshold below which we won't arm the timer anymore */
2290 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2292 /* we multiply by 1e3/8 to get bytes/msec.
2293 We don't want the credits to pass a credit
2294 of the t_fair*FAIR_MEM (algorithm resolution) */
2295 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2296 /* since each tick is 4 usec */
2297 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2300 /* Calculates the sum of vn_min_rates.
2301 It's needed for further normalizing of the min_rates.
2303 sum of vn_min_rates.
2305 0 - if all the min_rates are 0.
2306 In the later case fainess algorithm should be deactivated.
2307 If not all min_rates are zero then those that are zeroes will be set to 1.
2309 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2312 int port = BP_PORT(bp);
2315 bp->vn_weight_sum = 0;
2316 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2317 int func = 2*vn + port;
2318 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2320 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2322 /* Skip hidden vns */
2323 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2326 /* If min rate is zero - set it to 1 */
2328 vn_min_rate = DEF_MIN_RATE;
2332 bp->vn_weight_sum += vn_min_rate;
2335 /* ... only if all min rates are zeros - disable fairness */
2337 bp->vn_weight_sum = 0;
2340 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2342 struct rate_shaping_vars_per_vn m_rs_vn;
2343 struct fairness_vars_per_vn m_fair_vn;
2344 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2345 u16 vn_min_rate, vn_max_rate;
2348 /* If function is hidden - set min and max to zeroes */
2349 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2354 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2355 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2356 /* If fairness is enabled (not all min rates are zeroes) and
2357 if current min rate is zero - set it to 1.
2358 This is a requirement of the algorithm. */
2359 if (bp->vn_weight_sum && (vn_min_rate == 0))
2360 vn_min_rate = DEF_MIN_RATE;
2361 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2362 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2366 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2367 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2369 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2370 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2372 /* global vn counter - maximal Mbps for this vn */
2373 m_rs_vn.vn_counter.rate = vn_max_rate;
2375 /* quota - number of bytes transmitted in this period */
2376 m_rs_vn.vn_counter.quota =
2377 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2379 if (bp->vn_weight_sum) {
2380 /* credit for each period of the fairness algorithm:
2381 number of bytes in T_FAIR (the vn share the port rate).
2382 vn_weight_sum should not be larger than 10000, thus
2383 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2385 m_fair_vn.vn_credit_delta =
2386 max((u32)(vn_min_rate * (T_FAIR_COEF /
2387 (8 * bp->vn_weight_sum))),
2388 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2389 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2390 m_fair_vn.vn_credit_delta);
2393 /* Store it to internal memory */
2394 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2395 REG_WR(bp, BAR_XSTRORM_INTMEM +
2396 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2397 ((u32 *)(&m_rs_vn))[i]);
2399 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2400 REG_WR(bp, BAR_XSTRORM_INTMEM +
2401 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2402 ((u32 *)(&m_fair_vn))[i]);
2406 /* This function is called upon link interrupt */
2407 static void bnx2x_link_attn(struct bnx2x *bp)
2409 /* Make sure that we are synced with the current statistics */
2410 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2412 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2414 if (bp->link_vars.link_up) {
2416 /* dropless flow control */
2417 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2418 int port = BP_PORT(bp);
2419 u32 pause_enabled = 0;
2421 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2424 REG_WR(bp, BAR_USTRORM_INTMEM +
2425 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2429 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2430 struct host_port_stats *pstats;
2432 pstats = bnx2x_sp(bp, port_stats);
2433 /* reset old bmac stats */
2434 memset(&(pstats->mac_stx[0]), 0,
2435 sizeof(struct mac_stx));
2437 if ((bp->state == BNX2X_STATE_OPEN) ||
2438 (bp->state == BNX2X_STATE_DISABLED))
2439 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2442 /* indicate link status */
2443 bnx2x_link_report(bp);
2446 int port = BP_PORT(bp);
2450 /* Set the attention towards other drivers on the same port */
2451 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2452 if (vn == BP_E1HVN(bp))
2455 func = ((vn << 1) | port);
2456 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2457 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2460 if (bp->link_vars.link_up) {
2463 /* Init rate shaping and fairness contexts */
2464 bnx2x_init_port_minmax(bp);
2466 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2467 bnx2x_init_vn_minmax(bp, 2*vn + port);
2469 /* Store it to internal memory */
2471 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2472 REG_WR(bp, BAR_XSTRORM_INTMEM +
2473 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2474 ((u32 *)(&bp->cmng))[i]);
2479 static void bnx2x__link_status_update(struct bnx2x *bp)
2481 int func = BP_FUNC(bp);
2483 if (bp->state != BNX2X_STATE_OPEN)
2486 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2488 if (bp->link_vars.link_up)
2489 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2491 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2493 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2494 bnx2x_calc_vn_weight_sum(bp);
2496 /* indicate link status */
2497 bnx2x_link_report(bp);
2500 static void bnx2x_pmf_update(struct bnx2x *bp)
2502 int port = BP_PORT(bp);
2506 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2508 /* enable nig attention */
2509 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2510 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2511 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2513 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2521 * General service functions
2524 /* send the MCP a request, block until there is a reply */
2525 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2527 int func = BP_FUNC(bp);
2528 u32 seq = ++bp->fw_seq;
2531 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2533 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2534 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2537 /* let the FW do it's magic ... */
2540 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2542 /* Give the FW up to 2 second (200*10ms) */
2543 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2545 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2546 cnt*delay, rc, seq);
2548 /* is this a reply to our command? */
2549 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2550 rc &= FW_MSG_CODE_MASK;
2553 BNX2X_ERR("FW failed to respond!\n");
2561 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2562 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2563 static void bnx2x_set_rx_mode(struct net_device *dev);
2565 static void bnx2x_e1h_disable(struct bnx2x *bp)
2567 int port = BP_PORT(bp);
2570 bp->rx_mode = BNX2X_RX_MODE_NONE;
2571 bnx2x_set_storm_rx_mode(bp);
2573 netif_tx_disable(bp->dev);
2574 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2576 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2578 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2580 for (i = 0; i < MC_HASH_SIZE; i++)
2581 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2583 netif_carrier_off(bp->dev);
2586 static void bnx2x_e1h_enable(struct bnx2x *bp)
2588 int port = BP_PORT(bp);
2590 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2592 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2594 /* Tx queue should be only reenabled */
2595 netif_tx_wake_all_queues(bp->dev);
2597 /* Initialize the receive filter. */
2598 bnx2x_set_rx_mode(bp->dev);
2601 static void bnx2x_update_min_max(struct bnx2x *bp)
2603 int port = BP_PORT(bp);
2606 /* Init rate shaping and fairness contexts */
2607 bnx2x_init_port_minmax(bp);
2609 bnx2x_calc_vn_weight_sum(bp);
2611 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2612 bnx2x_init_vn_minmax(bp, 2*vn + port);
2617 /* Set the attention towards other drivers on the same port */
2618 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2619 if (vn == BP_E1HVN(bp))
2622 func = ((vn << 1) | port);
2623 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2624 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2627 /* Store it to internal memory */
2628 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2629 REG_WR(bp, BAR_XSTRORM_INTMEM +
2630 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2631 ((u32 *)(&bp->cmng))[i]);
2635 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2637 int func = BP_FUNC(bp);
2639 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2640 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2642 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2644 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2645 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2646 bp->state = BNX2X_STATE_DISABLED;
2648 bnx2x_e1h_disable(bp);
2650 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2651 bp->state = BNX2X_STATE_OPEN;
2653 bnx2x_e1h_enable(bp);
2655 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2657 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2659 bnx2x_update_min_max(bp);
2660 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2663 /* Report results to MCP */
2665 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2667 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2670 /* must be called under the spq lock */
2671 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2673 struct eth_spe *next_spe = bp->spq_prod_bd;
2675 if (bp->spq_prod_bd == bp->spq_last_bd) {
2676 bp->spq_prod_bd = bp->spq;
2677 bp->spq_prod_idx = 0;
2678 DP(NETIF_MSG_TIMER, "end of spq\n");
2686 /* must be called under the spq lock */
2687 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2689 int func = BP_FUNC(bp);
2691 /* Make sure that BD data is updated before writing the producer */
2694 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2699 /* the slow path queue is odd since completions arrive on the fastpath ring */
2700 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2701 u32 data_hi, u32 data_lo, int common)
2703 struct eth_spe *spe;
2705 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2706 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2707 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2708 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2709 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2711 #ifdef BNX2X_STOP_ON_ERROR
2712 if (unlikely(bp->panic))
2716 spin_lock_bh(&bp->spq_lock);
2718 if (!bp->spq_left) {
2719 BNX2X_ERR("BUG! SPQ ring full!\n");
2720 spin_unlock_bh(&bp->spq_lock);
2725 spe = bnx2x_sp_get_next(bp);
2727 /* CID needs port number to be encoded int it */
2728 spe->hdr.conn_and_cmd_data =
2729 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2731 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2734 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2736 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2737 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2741 bnx2x_sp_prod_update(bp);
2742 spin_unlock_bh(&bp->spq_lock);
2746 /* acquire split MCP access lock register */
2747 static int bnx2x_acquire_alr(struct bnx2x *bp)
2754 for (j = 0; j < i*10; j++) {
2756 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2757 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2758 if (val & (1L << 31))
2763 if (!(val & (1L << 31))) {
2764 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2771 /* release split MCP access lock register */
2772 static void bnx2x_release_alr(struct bnx2x *bp)
2776 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2779 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2781 struct host_def_status_block *def_sb = bp->def_status_blk;
2784 barrier(); /* status block is written to by the chip */
2785 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2786 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2789 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2790 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2793 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2794 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2797 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2798 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2801 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2802 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2809 * slow path service functions
2812 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2814 int port = BP_PORT(bp);
2815 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2816 COMMAND_REG_ATTN_BITS_SET);
2817 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2818 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2819 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2820 NIG_REG_MASK_INTERRUPT_PORT0;
2824 if (bp->attn_state & asserted)
2825 BNX2X_ERR("IGU ERROR\n");
2827 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2828 aeu_mask = REG_RD(bp, aeu_addr);
2830 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2831 aeu_mask, asserted);
2832 aeu_mask &= ~(asserted & 0xff);
2833 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2835 REG_WR(bp, aeu_addr, aeu_mask);
2836 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2838 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2839 bp->attn_state |= asserted;
2840 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2842 if (asserted & ATTN_HARD_WIRED_MASK) {
2843 if (asserted & ATTN_NIG_FOR_FUNC) {
2845 bnx2x_acquire_phy_lock(bp);
2847 /* save nig interrupt mask */
2848 nig_mask = REG_RD(bp, nig_int_mask_addr);
2849 REG_WR(bp, nig_int_mask_addr, 0);
2851 bnx2x_link_attn(bp);
2853 /* handle unicore attn? */
2855 if (asserted & ATTN_SW_TIMER_4_FUNC)
2856 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2858 if (asserted & GPIO_2_FUNC)
2859 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2861 if (asserted & GPIO_3_FUNC)
2862 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2864 if (asserted & GPIO_4_FUNC)
2865 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2868 if (asserted & ATTN_GENERAL_ATTN_1) {
2869 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2870 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2872 if (asserted & ATTN_GENERAL_ATTN_2) {
2873 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2874 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2876 if (asserted & ATTN_GENERAL_ATTN_3) {
2877 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2878 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2881 if (asserted & ATTN_GENERAL_ATTN_4) {
2882 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2883 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2885 if (asserted & ATTN_GENERAL_ATTN_5) {
2886 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2887 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2889 if (asserted & ATTN_GENERAL_ATTN_6) {
2890 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2891 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2895 } /* if hardwired */
2897 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2899 REG_WR(bp, hc_addr, asserted);
2901 /* now set back the mask */
2902 if (asserted & ATTN_NIG_FOR_FUNC) {
2903 REG_WR(bp, nig_int_mask_addr, nig_mask);
2904 bnx2x_release_phy_lock(bp);
2908 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2910 int port = BP_PORT(bp);
2912 /* mark the failure */
2913 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2914 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2915 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2916 bp->link_params.ext_phy_config);
2918 /* log the failure */
2919 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2920 " the driver to shutdown the card to prevent permanent"
2921 " damage. Please contact Dell Support for assistance\n",
2925 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2927 int port = BP_PORT(bp);
2929 u32 val, swap_val, swap_override;
2931 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2932 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2934 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2936 val = REG_RD(bp, reg_offset);
2937 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2938 REG_WR(bp, reg_offset, val);
2940 BNX2X_ERR("SPIO5 hw attention\n");
2942 /* Fan failure attention */
2943 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2945 /* Low power mode is controlled by GPIO 2 */
2946 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2947 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2948 /* The PHY reset is controlled by GPIO 1 */
2949 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2950 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2953 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2954 /* The PHY reset is controlled by GPIO 1 */
2955 /* fake the port number to cancel the swap done in
2957 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2958 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2959 port = (swap_val && swap_override) ^ 1;
2960 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2961 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2967 bnx2x_fan_failure(bp);
2970 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2971 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2972 bnx2x_acquire_phy_lock(bp);
2973 bnx2x_handle_module_detect_int(&bp->link_params);
2974 bnx2x_release_phy_lock(bp);
2977 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2979 val = REG_RD(bp, reg_offset);
2980 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2981 REG_WR(bp, reg_offset, val);
2983 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2984 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2989 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2993 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2995 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2996 BNX2X_ERR("DB hw attention 0x%x\n", val);
2997 /* DORQ discard attention */
2999 BNX2X_ERR("FATAL error from DORQ\n");
3002 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3004 int port = BP_PORT(bp);
3007 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3008 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3010 val = REG_RD(bp, reg_offset);
3011 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3012 REG_WR(bp, reg_offset, val);
3014 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3015 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3020 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3024 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3026 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3027 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3028 /* CFC error attention */
3030 BNX2X_ERR("FATAL error from CFC\n");
3033 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3035 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3036 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3037 /* RQ_USDMDP_FIFO_OVERFLOW */
3039 BNX2X_ERR("FATAL error from PXP\n");
3042 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3044 int port = BP_PORT(bp);
3047 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3048 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3050 val = REG_RD(bp, reg_offset);
3051 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3052 REG_WR(bp, reg_offset, val);
3054 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3055 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3060 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3064 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3066 if (attn & BNX2X_PMF_LINK_ASSERT) {
3067 int func = BP_FUNC(bp);
3069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3070 val = SHMEM_RD(bp, func_mb[func].drv_status);
3071 if (val & DRV_STATUS_DCC_EVENT_MASK)
3073 (val & DRV_STATUS_DCC_EVENT_MASK));
3074 bnx2x__link_status_update(bp);
3075 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3076 bnx2x_pmf_update(bp);
3078 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3080 BNX2X_ERR("MC assert!\n");
3081 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3083 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3087 } else if (attn & BNX2X_MCP_ASSERT) {
3089 BNX2X_ERR("MCP assert!\n");
3090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3094 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3097 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3098 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3099 if (attn & BNX2X_GRC_TIMEOUT) {
3100 val = CHIP_IS_E1H(bp) ?
3101 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3102 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3104 if (attn & BNX2X_GRC_RSV) {
3105 val = CHIP_IS_E1H(bp) ?
3106 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3107 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3109 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3113 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3115 struct attn_route attn;
3116 struct attn_route group_mask;
3117 int port = BP_PORT(bp);
3123 /* need to take HW lock because MCP or other port might also
3124 try to handle this event */
3125 bnx2x_acquire_alr(bp);
3127 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3128 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3129 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3130 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3131 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3132 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3134 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3135 if (deasserted & (1 << index)) {
3136 group_mask = bp->attn_group[index];
3138 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3139 index, group_mask.sig[0], group_mask.sig[1],
3140 group_mask.sig[2], group_mask.sig[3]);
3142 bnx2x_attn_int_deasserted3(bp,
3143 attn.sig[3] & group_mask.sig[3]);
3144 bnx2x_attn_int_deasserted1(bp,
3145 attn.sig[1] & group_mask.sig[1]);
3146 bnx2x_attn_int_deasserted2(bp,
3147 attn.sig[2] & group_mask.sig[2]);
3148 bnx2x_attn_int_deasserted0(bp,
3149 attn.sig[0] & group_mask.sig[0]);
3151 if ((attn.sig[0] & group_mask.sig[0] &
3152 HW_PRTY_ASSERT_SET_0) ||
3153 (attn.sig[1] & group_mask.sig[1] &
3154 HW_PRTY_ASSERT_SET_1) ||
3155 (attn.sig[2] & group_mask.sig[2] &
3156 HW_PRTY_ASSERT_SET_2))
3157 BNX2X_ERR("FATAL HW block parity attention\n");
3161 bnx2x_release_alr(bp);
3163 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3166 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3168 REG_WR(bp, reg_addr, val);
3170 if (~bp->attn_state & deasserted)
3171 BNX2X_ERR("IGU ERROR\n");
3173 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3174 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3176 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3177 aeu_mask = REG_RD(bp, reg_addr);
3179 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3180 aeu_mask, deasserted);
3181 aeu_mask |= (deasserted & 0xff);
3182 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3184 REG_WR(bp, reg_addr, aeu_mask);
3185 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3187 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3188 bp->attn_state &= ~deasserted;
3189 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3192 static void bnx2x_attn_int(struct bnx2x *bp)
3194 /* read local copy of bits */
3195 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3197 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3199 u32 attn_state = bp->attn_state;
3201 /* look for changed bits */
3202 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3203 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3206 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3207 attn_bits, attn_ack, asserted, deasserted);
3209 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3210 BNX2X_ERR("BAD attention state\n");
3212 /* handle bits that were raised */
3214 bnx2x_attn_int_asserted(bp, asserted);
3217 bnx2x_attn_int_deasserted(bp, deasserted);
3220 static void bnx2x_sp_task(struct work_struct *work)
3222 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3226 /* Return here if interrupt is disabled */
3227 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3228 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3232 status = bnx2x_update_dsb_idx(bp);
3233 /* if (status == 0) */
3234 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3236 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3242 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3244 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3246 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3248 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3250 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3255 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3257 struct net_device *dev = dev_instance;
3258 struct bnx2x *bp = netdev_priv(dev);
3260 /* Return here if interrupt is disabled */
3261 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3262 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3266 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3268 #ifdef BNX2X_STOP_ON_ERROR
3269 if (unlikely(bp->panic))
3275 struct cnic_ops *c_ops;
3278 c_ops = rcu_dereference(bp->cnic_ops);
3280 c_ops->cnic_handler(bp->cnic_data, NULL);
3284 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3289 /* end of slow path */
3293 /****************************************************************************
3295 ****************************************************************************/
3297 /* sum[hi:lo] += add[hi:lo] */
3298 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3301 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3304 /* difference = minuend - subtrahend */
3305 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3307 if (m_lo < s_lo) { \
3309 d_hi = m_hi - s_hi; \
3311 /* we can 'loan' 1 */ \
3313 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3315 /* m_hi <= s_hi */ \
3320 /* m_lo >= s_lo */ \
3321 if (m_hi < s_hi) { \
3325 /* m_hi >= s_hi */ \
3326 d_hi = m_hi - s_hi; \
3327 d_lo = m_lo - s_lo; \
3332 #define UPDATE_STAT64(s, t) \
3334 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3335 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3336 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3337 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3338 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3339 pstats->mac_stx[1].t##_lo, diff.lo); \
3342 #define UPDATE_STAT64_NIG(s, t) \
3344 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3345 diff.lo, new->s##_lo, old->s##_lo); \
3346 ADD_64(estats->t##_hi, diff.hi, \
3347 estats->t##_lo, diff.lo); \
3350 /* sum[hi:lo] += add */
3351 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3354 s_hi += (s_lo < a) ? 1 : 0; \
3357 #define UPDATE_EXTEND_STAT(s) \
3359 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3360 pstats->mac_stx[1].s##_lo, \
3364 #define UPDATE_EXTEND_TSTAT(s, t) \
3366 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3367 old_tclient->s = tclient->s; \
3368 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3371 #define UPDATE_EXTEND_USTAT(s, t) \
3373 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3374 old_uclient->s = uclient->s; \
3375 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3378 #define UPDATE_EXTEND_XSTAT(s, t) \
3380 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3381 old_xclient->s = xclient->s; \
3382 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3385 /* minuend -= subtrahend */
3386 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3388 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3391 /* minuend[hi:lo] -= subtrahend */
3392 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3394 SUB_64(m_hi, 0, m_lo, s); \
3397 #define SUB_EXTEND_USTAT(s, t) \
3399 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3400 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3404 * General service functions
3407 static inline long bnx2x_hilo(u32 *hiref)
3409 u32 lo = *(hiref + 1);
3410 #if (BITS_PER_LONG == 64)
3413 return HILO_U64(hi, lo);
3420 * Init service functions
3423 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3425 if (!bp->stats_pending) {
3426 struct eth_query_ramrod_data ramrod_data = {0};
3429 ramrod_data.drv_counter = bp->stats_counter++;
3430 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3431 for_each_queue(bp, i)
3432 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3434 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3435 ((u32 *)&ramrod_data)[1],
3436 ((u32 *)&ramrod_data)[0], 0);
3438 /* stats ramrod has it's own slot on the spq */
3440 bp->stats_pending = 1;
3445 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3447 struct dmae_command *dmae = &bp->stats_dmae;
3448 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3450 *stats_comp = DMAE_COMP_VAL;
3451 if (CHIP_REV_IS_SLOW(bp))
3455 if (bp->executer_idx) {
3456 int loader_idx = PMF_DMAE_C(bp);
3458 memset(dmae, 0, sizeof(struct dmae_command));
3460 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3461 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3462 DMAE_CMD_DST_RESET |
3464 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3466 DMAE_CMD_ENDIANITY_DW_SWAP |
3468 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3470 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3471 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3472 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3473 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3474 sizeof(struct dmae_command) *
3475 (loader_idx + 1)) >> 2;
3476 dmae->dst_addr_hi = 0;
3477 dmae->len = sizeof(struct dmae_command) >> 2;
3480 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3481 dmae->comp_addr_hi = 0;
3485 bnx2x_post_dmae(bp, dmae, loader_idx);
3487 } else if (bp->func_stx) {
3489 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3493 static int bnx2x_stats_comp(struct bnx2x *bp)
3495 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3499 while (*stats_comp != DMAE_COMP_VAL) {
3501 BNX2X_ERR("timeout waiting for stats finished\n");
3511 * Statistics service functions
3514 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3516 struct dmae_command *dmae;
3518 int loader_idx = PMF_DMAE_C(bp);
3519 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3522 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3523 BNX2X_ERR("BUG!\n");
3527 bp->executer_idx = 0;
3529 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3531 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3533 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3535 DMAE_CMD_ENDIANITY_DW_SWAP |
3537 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3538 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3540 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3541 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3542 dmae->src_addr_lo = bp->port.port_stx >> 2;
3543 dmae->src_addr_hi = 0;
3544 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3545 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3546 dmae->len = DMAE_LEN32_RD_MAX;
3547 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3548 dmae->comp_addr_hi = 0;
3551 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3552 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3553 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3554 dmae->src_addr_hi = 0;
3555 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3556 DMAE_LEN32_RD_MAX * 4);
3557 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3558 DMAE_LEN32_RD_MAX * 4);
3559 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3560 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3561 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3562 dmae->comp_val = DMAE_COMP_VAL;
3565 bnx2x_hw_stats_post(bp);
3566 bnx2x_stats_comp(bp);
3569 static void bnx2x_port_stats_init(struct bnx2x *bp)
3571 struct dmae_command *dmae;
3572 int port = BP_PORT(bp);
3573 int vn = BP_E1HVN(bp);
3575 int loader_idx = PMF_DMAE_C(bp);
3577 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3580 if (!bp->link_vars.link_up || !bp->port.pmf) {
3581 BNX2X_ERR("BUG!\n");
3585 bp->executer_idx = 0;
3588 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3589 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3590 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3592 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3594 DMAE_CMD_ENDIANITY_DW_SWAP |
3596 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3597 (vn << DMAE_CMD_E1HVN_SHIFT));
3599 if (bp->port.port_stx) {
3601 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3602 dmae->opcode = opcode;
3603 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3604 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3605 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3606 dmae->dst_addr_hi = 0;
3607 dmae->len = sizeof(struct host_port_stats) >> 2;
3608 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3609 dmae->comp_addr_hi = 0;
3615 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3616 dmae->opcode = opcode;
3617 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3618 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3619 dmae->dst_addr_lo = bp->func_stx >> 2;
3620 dmae->dst_addr_hi = 0;
3621 dmae->len = sizeof(struct host_func_stats) >> 2;
3622 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3623 dmae->comp_addr_hi = 0;
3628 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3629 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3630 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3632 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3634 DMAE_CMD_ENDIANITY_DW_SWAP |
3636 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3637 (vn << DMAE_CMD_E1HVN_SHIFT));
3639 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3641 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3642 NIG_REG_INGRESS_BMAC0_MEM);
3644 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3645 BIGMAC_REGISTER_TX_STAT_GTBYT */
3646 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3647 dmae->opcode = opcode;
3648 dmae->src_addr_lo = (mac_addr +
3649 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3650 dmae->src_addr_hi = 0;
3651 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3652 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3653 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3654 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3655 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3656 dmae->comp_addr_hi = 0;
3659 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3660 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3661 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3662 dmae->opcode = opcode;
3663 dmae->src_addr_lo = (mac_addr +
3664 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3665 dmae->src_addr_hi = 0;
3666 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3667 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3668 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3669 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3670 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3671 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3672 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3673 dmae->comp_addr_hi = 0;
3676 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3678 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3680 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3681 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3682 dmae->opcode = opcode;
3683 dmae->src_addr_lo = (mac_addr +
3684 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3685 dmae->src_addr_hi = 0;
3686 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3687 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3688 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3689 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3690 dmae->comp_addr_hi = 0;
3693 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3694 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3695 dmae->opcode = opcode;
3696 dmae->src_addr_lo = (mac_addr +
3697 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3698 dmae->src_addr_hi = 0;
3699 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3700 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3701 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3702 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3704 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3705 dmae->comp_addr_hi = 0;
3708 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (mac_addr +
3712 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3715 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3716 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3717 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3718 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3719 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3720 dmae->comp_addr_hi = 0;
3725 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3726 dmae->opcode = opcode;
3727 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3728 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3729 dmae->src_addr_hi = 0;
3730 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3731 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3732 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3733 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3734 dmae->comp_addr_hi = 0;
3737 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3738 dmae->opcode = opcode;
3739 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3740 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3741 dmae->src_addr_hi = 0;
3742 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3743 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3744 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3745 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3746 dmae->len = (2*sizeof(u32)) >> 2;
3747 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3748 dmae->comp_addr_hi = 0;
3751 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3752 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3753 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3754 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3756 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3758 DMAE_CMD_ENDIANITY_DW_SWAP |
3760 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3761 (vn << DMAE_CMD_E1HVN_SHIFT));
3762 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3763 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3764 dmae->src_addr_hi = 0;
3765 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3766 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3767 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3768 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3769 dmae->len = (2*sizeof(u32)) >> 2;
3770 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3771 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3772 dmae->comp_val = DMAE_COMP_VAL;
3777 static void bnx2x_func_stats_init(struct bnx2x *bp)
3779 struct dmae_command *dmae = &bp->stats_dmae;
3780 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3783 if (!bp->func_stx) {
3784 BNX2X_ERR("BUG!\n");
3788 bp->executer_idx = 0;
3789 memset(dmae, 0, sizeof(struct dmae_command));
3791 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3792 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3793 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3795 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3797 DMAE_CMD_ENDIANITY_DW_SWAP |
3799 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3800 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3801 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3802 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3803 dmae->dst_addr_lo = bp->func_stx >> 2;
3804 dmae->dst_addr_hi = 0;
3805 dmae->len = sizeof(struct host_func_stats) >> 2;
3806 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3807 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3808 dmae->comp_val = DMAE_COMP_VAL;
3813 static void bnx2x_stats_start(struct bnx2x *bp)
3816 bnx2x_port_stats_init(bp);
3818 else if (bp->func_stx)
3819 bnx2x_func_stats_init(bp);
3821 bnx2x_hw_stats_post(bp);
3822 bnx2x_storm_stats_post(bp);
3825 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3827 bnx2x_stats_comp(bp);
3828 bnx2x_stats_pmf_update(bp);
3829 bnx2x_stats_start(bp);
3832 static void bnx2x_stats_restart(struct bnx2x *bp)
3834 bnx2x_stats_comp(bp);
3835 bnx2x_stats_start(bp);
3838 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3840 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3841 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3842 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3848 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3849 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3850 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3851 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3852 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3853 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3854 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3855 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3856 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3857 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3858 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3859 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3860 UPDATE_STAT64(tx_stat_gt127,
3861 tx_stat_etherstatspkts65octetsto127octets);
3862 UPDATE_STAT64(tx_stat_gt255,
3863 tx_stat_etherstatspkts128octetsto255octets);
3864 UPDATE_STAT64(tx_stat_gt511,
3865 tx_stat_etherstatspkts256octetsto511octets);
3866 UPDATE_STAT64(tx_stat_gt1023,
3867 tx_stat_etherstatspkts512octetsto1023octets);
3868 UPDATE_STAT64(tx_stat_gt1518,
3869 tx_stat_etherstatspkts1024octetsto1522octets);
3870 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3871 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3872 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3873 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3874 UPDATE_STAT64(tx_stat_gterr,
3875 tx_stat_dot3statsinternalmactransmiterrors);
3876 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3878 estats->pause_frames_received_hi =
3879 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3880 estats->pause_frames_received_lo =
3881 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3883 estats->pause_frames_sent_hi =
3884 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3885 estats->pause_frames_sent_lo =
3886 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3889 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3891 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3892 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3893 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3895 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3896 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3897 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3898 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3899 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3900 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3901 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3902 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3903 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3904 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3905 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3906 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3907 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3908 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3909 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3910 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3911 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3912 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3913 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3914 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3915 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3916 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3917 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3918 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3920 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3921 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3922 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3923 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3924 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3925 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3927 estats->pause_frames_received_hi =
3928 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3929 estats->pause_frames_received_lo =
3930 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3931 ADD_64(estats->pause_frames_received_hi,
3932 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3933 estats->pause_frames_received_lo,
3934 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3936 estats->pause_frames_sent_hi =
3937 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3938 estats->pause_frames_sent_lo =
3939 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3940 ADD_64(estats->pause_frames_sent_hi,
3941 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3942 estats->pause_frames_sent_lo,
3943 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3946 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3948 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3949 struct nig_stats *old = &(bp->port.old_nig_stats);
3950 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3951 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3958 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3959 bnx2x_bmac_stats_update(bp);
3961 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3962 bnx2x_emac_stats_update(bp);
3964 else { /* unreached */
3965 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3969 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3970 new->brb_discard - old->brb_discard);
3971 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3972 new->brb_truncate - old->brb_truncate);
3974 UPDATE_STAT64_NIG(egress_mac_pkt0,
3975 etherstatspkts1024octetsto1522octets);
3976 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3978 memcpy(old, new, sizeof(struct nig_stats));
3980 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3981 sizeof(struct mac_stx));
3982 estats->brb_drop_hi = pstats->brb_drop_hi;
3983 estats->brb_drop_lo = pstats->brb_drop_lo;
3985 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3987 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3988 if (nig_timer_max != estats->nig_timer_max) {
3989 estats->nig_timer_max = nig_timer_max;
3990 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3996 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3998 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3999 struct tstorm_per_port_stats *tport =
4000 &stats->tstorm_common.port_statistics;
4001 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4002 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4005 memcpy(&(fstats->total_bytes_received_hi),
4006 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4007 sizeof(struct host_func_stats) - 2*sizeof(u32));
4008 estats->error_bytes_received_hi = 0;
4009 estats->error_bytes_received_lo = 0;
4010 estats->etherstatsoverrsizepkts_hi = 0;
4011 estats->etherstatsoverrsizepkts_lo = 0;
4012 estats->no_buff_discard_hi = 0;
4013 estats->no_buff_discard_lo = 0;
4015 for_each_rx_queue(bp, i) {
4016 struct bnx2x_fastpath *fp = &bp->fp[i];
4017 int cl_id = fp->cl_id;
4018 struct tstorm_per_client_stats *tclient =
4019 &stats->tstorm_common.client_statistics[cl_id];
4020 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4021 struct ustorm_per_client_stats *uclient =
4022 &stats->ustorm_common.client_statistics[cl_id];
4023 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4024 struct xstorm_per_client_stats *xclient =
4025 &stats->xstorm_common.client_statistics[cl_id];
4026 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4027 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4030 /* are storm stats valid? */
4031 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4032 bp->stats_counter) {
4033 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4034 " xstorm counter (%d) != stats_counter (%d)\n",
4035 i, xclient->stats_counter, bp->stats_counter);
4038 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4039 bp->stats_counter) {
4040 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4041 " tstorm counter (%d) != stats_counter (%d)\n",
4042 i, tclient->stats_counter, bp->stats_counter);
4045 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4046 bp->stats_counter) {
4047 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4048 " ustorm counter (%d) != stats_counter (%d)\n",
4049 i, uclient->stats_counter, bp->stats_counter);
4053 qstats->total_bytes_received_hi =
4054 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4055 qstats->total_bytes_received_lo =
4056 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4058 ADD_64(qstats->total_bytes_received_hi,
4059 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4060 qstats->total_bytes_received_lo,
4061 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4063 ADD_64(qstats->total_bytes_received_hi,
4064 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4065 qstats->total_bytes_received_lo,
4066 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4068 qstats->valid_bytes_received_hi =
4069 qstats->total_bytes_received_hi;
4070 qstats->valid_bytes_received_lo =
4071 qstats->total_bytes_received_lo;
4073 qstats->error_bytes_received_hi =
4074 le32_to_cpu(tclient->rcv_error_bytes.hi);
4075 qstats->error_bytes_received_lo =
4076 le32_to_cpu(tclient->rcv_error_bytes.lo);
4078 ADD_64(qstats->total_bytes_received_hi,
4079 qstats->error_bytes_received_hi,
4080 qstats->total_bytes_received_lo,
4081 qstats->error_bytes_received_lo);
4083 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4084 total_unicast_packets_received);
4085 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4086 total_multicast_packets_received);
4087 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4088 total_broadcast_packets_received);
4089 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4090 etherstatsoverrsizepkts);
4091 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4093 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4094 total_unicast_packets_received);
4095 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4096 total_multicast_packets_received);
4097 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4098 total_broadcast_packets_received);
4099 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4100 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4101 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4103 qstats->total_bytes_transmitted_hi =
4104 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4105 qstats->total_bytes_transmitted_lo =
4106 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4108 ADD_64(qstats->total_bytes_transmitted_hi,
4109 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4110 qstats->total_bytes_transmitted_lo,
4111 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4113 ADD_64(qstats->total_bytes_transmitted_hi,
4114 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4115 qstats->total_bytes_transmitted_lo,
4116 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4118 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4119 total_unicast_packets_transmitted);
4120 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4121 total_multicast_packets_transmitted);
4122 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4123 total_broadcast_packets_transmitted);
4125 old_tclient->checksum_discard = tclient->checksum_discard;
4126 old_tclient->ttl0_discard = tclient->ttl0_discard;
4128 ADD_64(fstats->total_bytes_received_hi,
4129 qstats->total_bytes_received_hi,
4130 fstats->total_bytes_received_lo,
4131 qstats->total_bytes_received_lo);
4132 ADD_64(fstats->total_bytes_transmitted_hi,
4133 qstats->total_bytes_transmitted_hi,
4134 fstats->total_bytes_transmitted_lo,
4135 qstats->total_bytes_transmitted_lo);
4136 ADD_64(fstats->total_unicast_packets_received_hi,
4137 qstats->total_unicast_packets_received_hi,
4138 fstats->total_unicast_packets_received_lo,
4139 qstats->total_unicast_packets_received_lo);
4140 ADD_64(fstats->total_multicast_packets_received_hi,
4141 qstats->total_multicast_packets_received_hi,
4142 fstats->total_multicast_packets_received_lo,
4143 qstats->total_multicast_packets_received_lo);
4144 ADD_64(fstats->total_broadcast_packets_received_hi,
4145 qstats->total_broadcast_packets_received_hi,
4146 fstats->total_broadcast_packets_received_lo,
4147 qstats->total_broadcast_packets_received_lo);
4148 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4149 qstats->total_unicast_packets_transmitted_hi,
4150 fstats->total_unicast_packets_transmitted_lo,
4151 qstats->total_unicast_packets_transmitted_lo);
4152 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4153 qstats->total_multicast_packets_transmitted_hi,
4154 fstats->total_multicast_packets_transmitted_lo,
4155 qstats->total_multicast_packets_transmitted_lo);
4156 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4157 qstats->total_broadcast_packets_transmitted_hi,
4158 fstats->total_broadcast_packets_transmitted_lo,
4159 qstats->total_broadcast_packets_transmitted_lo);
4160 ADD_64(fstats->valid_bytes_received_hi,
4161 qstats->valid_bytes_received_hi,
4162 fstats->valid_bytes_received_lo,
4163 qstats->valid_bytes_received_lo);
4165 ADD_64(estats->error_bytes_received_hi,
4166 qstats->error_bytes_received_hi,
4167 estats->error_bytes_received_lo,
4168 qstats->error_bytes_received_lo);
4169 ADD_64(estats->etherstatsoverrsizepkts_hi,
4170 qstats->etherstatsoverrsizepkts_hi,
4171 estats->etherstatsoverrsizepkts_lo,
4172 qstats->etherstatsoverrsizepkts_lo);
4173 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4174 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4177 ADD_64(fstats->total_bytes_received_hi,
4178 estats->rx_stat_ifhcinbadoctets_hi,
4179 fstats->total_bytes_received_lo,
4180 estats->rx_stat_ifhcinbadoctets_lo);
4182 memcpy(estats, &(fstats->total_bytes_received_hi),
4183 sizeof(struct host_func_stats) - 2*sizeof(u32));
4185 ADD_64(estats->etherstatsoverrsizepkts_hi,
4186 estats->rx_stat_dot3statsframestoolong_hi,
4187 estats->etherstatsoverrsizepkts_lo,
4188 estats->rx_stat_dot3statsframestoolong_lo);
4189 ADD_64(estats->error_bytes_received_hi,
4190 estats->rx_stat_ifhcinbadoctets_hi,
4191 estats->error_bytes_received_lo,
4192 estats->rx_stat_ifhcinbadoctets_lo);
4195 estats->mac_filter_discard =
4196 le32_to_cpu(tport->mac_filter_discard);
4197 estats->xxoverflow_discard =
4198 le32_to_cpu(tport->xxoverflow_discard);
4199 estats->brb_truncate_discard =
4200 le32_to_cpu(tport->brb_truncate_discard);
4201 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4204 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4206 bp->stats_pending = 0;
4211 static void bnx2x_net_stats_update(struct bnx2x *bp)
4213 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4214 struct net_device_stats *nstats = &bp->dev->stats;
4217 nstats->rx_packets =
4218 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4219 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4220 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4222 nstats->tx_packets =
4223 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4224 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4225 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4227 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4229 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4231 nstats->rx_dropped = estats->mac_discard;
4232 for_each_rx_queue(bp, i)
4233 nstats->rx_dropped +=
4234 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4236 nstats->tx_dropped = 0;
4239 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4241 nstats->collisions =
4242 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4244 nstats->rx_length_errors =
4245 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4246 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4247 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4248 bnx2x_hilo(&estats->brb_truncate_hi);
4249 nstats->rx_crc_errors =
4250 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4251 nstats->rx_frame_errors =
4252 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4253 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4254 nstats->rx_missed_errors = estats->xxoverflow_discard;
4256 nstats->rx_errors = nstats->rx_length_errors +
4257 nstats->rx_over_errors +
4258 nstats->rx_crc_errors +
4259 nstats->rx_frame_errors +
4260 nstats->rx_fifo_errors +
4261 nstats->rx_missed_errors;
4263 nstats->tx_aborted_errors =
4264 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4265 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4266 nstats->tx_carrier_errors =
4267 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4268 nstats->tx_fifo_errors = 0;
4269 nstats->tx_heartbeat_errors = 0;
4270 nstats->tx_window_errors = 0;
4272 nstats->tx_errors = nstats->tx_aborted_errors +
4273 nstats->tx_carrier_errors +
4274 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4277 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4279 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4282 estats->driver_xoff = 0;
4283 estats->rx_err_discard_pkt = 0;
4284 estats->rx_skb_alloc_failed = 0;
4285 estats->hw_csum_err = 0;
4286 for_each_rx_queue(bp, i) {
4287 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4289 estats->driver_xoff += qstats->driver_xoff;
4290 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4291 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4292 estats->hw_csum_err += qstats->hw_csum_err;
4296 static void bnx2x_stats_update(struct bnx2x *bp)
4298 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4300 if (*stats_comp != DMAE_COMP_VAL)
4304 bnx2x_hw_stats_update(bp);
4306 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4307 BNX2X_ERR("storm stats were not updated for 3 times\n");
4312 bnx2x_net_stats_update(bp);
4313 bnx2x_drv_stats_update(bp);
4315 if (bp->msglevel & NETIF_MSG_TIMER) {
4316 struct bnx2x_fastpath *fp0_rx = bp->fp;
4317 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4318 struct tstorm_per_client_stats *old_tclient =
4319 &bp->fp->old_tclient;
4320 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4321 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4322 struct net_device_stats *nstats = &bp->dev->stats;
4325 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4326 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4328 bnx2x_tx_avail(fp0_tx),
4329 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4330 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4332 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4333 fp0_rx->rx_comp_cons),
4334 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4335 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4336 "brb truncate %u\n",
4337 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4338 qstats->driver_xoff,
4339 estats->brb_drop_lo, estats->brb_truncate_lo);
4340 printk(KERN_DEBUG "tstats: checksum_discard %u "
4341 "packets_too_big_discard %lu no_buff_discard %lu "
4342 "mac_discard %u mac_filter_discard %u "
4343 "xxovrflow_discard %u brb_truncate_discard %u "
4344 "ttl0_discard %u\n",
4345 le32_to_cpu(old_tclient->checksum_discard),
4346 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4347 bnx2x_hilo(&qstats->no_buff_discard_hi),
4348 estats->mac_discard, estats->mac_filter_discard,
4349 estats->xxoverflow_discard, estats->brb_truncate_discard,
4350 le32_to_cpu(old_tclient->ttl0_discard));
4352 for_each_queue(bp, i) {
4353 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4354 bnx2x_fp(bp, i, tx_pkt),
4355 bnx2x_fp(bp, i, rx_pkt),
4356 bnx2x_fp(bp, i, rx_calls));
4360 bnx2x_hw_stats_post(bp);
4361 bnx2x_storm_stats_post(bp);
4364 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4366 struct dmae_command *dmae;
4368 int loader_idx = PMF_DMAE_C(bp);
4369 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4371 bp->executer_idx = 0;
4373 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4375 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4377 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4379 DMAE_CMD_ENDIANITY_DW_SWAP |
4381 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4382 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4384 if (bp->port.port_stx) {
4386 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4388 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4390 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4391 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4392 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4393 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4394 dmae->dst_addr_hi = 0;
4395 dmae->len = sizeof(struct host_port_stats) >> 2;
4397 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4398 dmae->comp_addr_hi = 0;
4401 dmae->comp_addr_lo =
4402 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4403 dmae->comp_addr_hi =
4404 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4405 dmae->comp_val = DMAE_COMP_VAL;
4413 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4414 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4415 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4416 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4417 dmae->dst_addr_lo = bp->func_stx >> 2;
4418 dmae->dst_addr_hi = 0;
4419 dmae->len = sizeof(struct host_func_stats) >> 2;
4420 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4421 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4422 dmae->comp_val = DMAE_COMP_VAL;
4428 static void bnx2x_stats_stop(struct bnx2x *bp)
4432 bnx2x_stats_comp(bp);
4435 update = (bnx2x_hw_stats_update(bp) == 0);
4437 update |= (bnx2x_storm_stats_update(bp) == 0);
4440 bnx2x_net_stats_update(bp);
4443 bnx2x_port_stats_stop(bp);
4445 bnx2x_hw_stats_post(bp);
4446 bnx2x_stats_comp(bp);
4450 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4454 static const struct {
4455 void (*action)(struct bnx2x *bp);
4456 enum bnx2x_stats_state next_state;
4457 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4460 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4461 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4462 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4463 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4466 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4467 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4468 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4469 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4473 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4475 enum bnx2x_stats_state state = bp->stats_state;
4477 bnx2x_stats_stm[state][event].action(bp);
4478 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4480 /* Make sure the state has been "changed" */
4483 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4484 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4485 state, event, bp->stats_state);
4488 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4490 struct dmae_command *dmae;
4491 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4494 if (!bp->port.pmf || !bp->port.port_stx) {
4495 BNX2X_ERR("BUG!\n");
4499 bp->executer_idx = 0;
4501 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4502 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4503 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4504 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4506 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4508 DMAE_CMD_ENDIANITY_DW_SWAP |
4510 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4511 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4512 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4513 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4514 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4515 dmae->dst_addr_hi = 0;
4516 dmae->len = sizeof(struct host_port_stats) >> 2;
4517 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4518 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4519 dmae->comp_val = DMAE_COMP_VAL;
4522 bnx2x_hw_stats_post(bp);
4523 bnx2x_stats_comp(bp);
4526 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4528 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4529 int port = BP_PORT(bp);
4534 if (!bp->port.pmf || !bp->func_stx) {
4535 BNX2X_ERR("BUG!\n");
4539 /* save our func_stx */
4540 func_stx = bp->func_stx;
4542 for (vn = VN_0; vn < vn_max; vn++) {
4545 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4546 bnx2x_func_stats_init(bp);
4547 bnx2x_hw_stats_post(bp);
4548 bnx2x_stats_comp(bp);
4551 /* restore our func_stx */
4552 bp->func_stx = func_stx;
4555 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4557 struct dmae_command *dmae = &bp->stats_dmae;
4558 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4561 if (!bp->func_stx) {
4562 BNX2X_ERR("BUG!\n");
4566 bp->executer_idx = 0;
4567 memset(dmae, 0, sizeof(struct dmae_command));
4569 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4570 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4571 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4573 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4575 DMAE_CMD_ENDIANITY_DW_SWAP |
4577 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4578 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4579 dmae->src_addr_lo = bp->func_stx >> 2;
4580 dmae->src_addr_hi = 0;
4581 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4582 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4583 dmae->len = sizeof(struct host_func_stats) >> 2;
4584 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4585 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4586 dmae->comp_val = DMAE_COMP_VAL;
4589 bnx2x_hw_stats_post(bp);
4590 bnx2x_stats_comp(bp);
4593 static void bnx2x_stats_init(struct bnx2x *bp)
4595 int port = BP_PORT(bp);
4596 int func = BP_FUNC(bp);
4599 bp->stats_pending = 0;
4600 bp->executer_idx = 0;
4601 bp->stats_counter = 0;
4603 /* port and func stats for management */
4604 if (!BP_NOMCP(bp)) {
4605 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4606 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4609 bp->port.port_stx = 0;
4612 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4613 bp->port.port_stx, bp->func_stx);
4616 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4617 bp->port.old_nig_stats.brb_discard =
4618 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4619 bp->port.old_nig_stats.brb_truncate =
4620 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4621 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4622 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4623 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4624 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4626 /* function stats */
4627 for_each_queue(bp, i) {
4628 struct bnx2x_fastpath *fp = &bp->fp[i];
4630 memset(&fp->old_tclient, 0,
4631 sizeof(struct tstorm_per_client_stats));
4632 memset(&fp->old_uclient, 0,
4633 sizeof(struct ustorm_per_client_stats));
4634 memset(&fp->old_xclient, 0,
4635 sizeof(struct xstorm_per_client_stats));
4636 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4639 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4640 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4642 bp->stats_state = STATS_STATE_DISABLED;
4645 if (bp->port.port_stx)
4646 bnx2x_port_stats_base_init(bp);
4649 bnx2x_func_stats_base_init(bp);
4651 } else if (bp->func_stx)
4652 bnx2x_func_stats_base_update(bp);
4655 static void bnx2x_timer(unsigned long data)
4657 struct bnx2x *bp = (struct bnx2x *) data;
4659 if (!netif_running(bp->dev))
4662 if (atomic_read(&bp->intr_sem) != 0)
4666 struct bnx2x_fastpath *fp = &bp->fp[0];
4670 rc = bnx2x_rx_int(fp, 1000);
4673 if (!BP_NOMCP(bp)) {
4674 int func = BP_FUNC(bp);
4678 ++bp->fw_drv_pulse_wr_seq;
4679 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4680 /* TBD - add SYSTEM_TIME */
4681 drv_pulse = bp->fw_drv_pulse_wr_seq;
4682 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4684 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4685 MCP_PULSE_SEQ_MASK);
4686 /* The delta between driver pulse and mcp response
4687 * should be 1 (before mcp response) or 0 (after mcp response)
4689 if ((drv_pulse != mcp_pulse) &&
4690 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4691 /* someone lost a heartbeat... */
4692 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4693 drv_pulse, mcp_pulse);
4697 if ((bp->state == BNX2X_STATE_OPEN) ||
4698 (bp->state == BNX2X_STATE_DISABLED))
4699 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4702 mod_timer(&bp->timer, jiffies + bp->current_interval);
4705 /* end of Statistics */
4710 * nic init service functions
4713 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4715 int port = BP_PORT(bp);
4718 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4719 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4720 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4721 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4722 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4723 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4726 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4727 dma_addr_t mapping, int sb_id)
4729 int port = BP_PORT(bp);
4730 int func = BP_FUNC(bp);
4735 section = ((u64)mapping) + offsetof(struct host_status_block,
4737 sb->u_status_block.status_block_id = sb_id;
4739 REG_WR(bp, BAR_CSTRORM_INTMEM +
4740 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4741 REG_WR(bp, BAR_CSTRORM_INTMEM +
4742 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4745 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4747 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4749 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4752 section = ((u64)mapping) + offsetof(struct host_status_block,
4754 sb->c_status_block.status_block_id = sb_id;
4756 REG_WR(bp, BAR_CSTRORM_INTMEM +
4757 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4758 REG_WR(bp, BAR_CSTRORM_INTMEM +
4759 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4761 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4762 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4764 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4765 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4766 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4768 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4771 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4773 int func = BP_FUNC(bp);
4775 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4776 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4777 sizeof(struct tstorm_def_status_block)/4);
4778 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4779 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4780 sizeof(struct cstorm_def_status_block_u)/4);
4781 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4782 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4783 sizeof(struct cstorm_def_status_block_c)/4);
4784 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4785 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4786 sizeof(struct xstorm_def_status_block)/4);
4789 static void bnx2x_init_def_sb(struct bnx2x *bp,
4790 struct host_def_status_block *def_sb,
4791 dma_addr_t mapping, int sb_id)
4793 int port = BP_PORT(bp);
4794 int func = BP_FUNC(bp);
4795 int index, val, reg_offset;
4799 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4800 atten_status_block);
4801 def_sb->atten_status_block.status_block_id = sb_id;
4805 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4806 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4808 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4809 bp->attn_group[index].sig[0] = REG_RD(bp,
4810 reg_offset + 0x10*index);
4811 bp->attn_group[index].sig[1] = REG_RD(bp,
4812 reg_offset + 0x4 + 0x10*index);
4813 bp->attn_group[index].sig[2] = REG_RD(bp,
4814 reg_offset + 0x8 + 0x10*index);
4815 bp->attn_group[index].sig[3] = REG_RD(bp,
4816 reg_offset + 0xc + 0x10*index);
4819 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4820 HC_REG_ATTN_MSG0_ADDR_L);
4822 REG_WR(bp, reg_offset, U64_LO(section));
4823 REG_WR(bp, reg_offset + 4, U64_HI(section));
4825 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4827 val = REG_RD(bp, reg_offset);
4829 REG_WR(bp, reg_offset, val);
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 u_def_status_block);
4834 def_sb->u_def_status_block.status_block_id = sb_id;
4836 REG_WR(bp, BAR_CSTRORM_INTMEM +
4837 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4838 REG_WR(bp, BAR_CSTRORM_INTMEM +
4839 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4844 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 c_def_status_block);
4851 def_sb->c_def_status_block.status_block_id = sb_id;
4853 REG_WR(bp, BAR_CSTRORM_INTMEM +
4854 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4855 REG_WR(bp, BAR_CSTRORM_INTMEM +
4856 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4858 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4859 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4861 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4863 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867 t_def_status_block);
4868 def_sb->t_def_status_block.status_block_id = sb_id;
4870 REG_WR(bp, BAR_TSTRORM_INTMEM +
4871 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4872 REG_WR(bp, BAR_TSTRORM_INTMEM +
4873 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4875 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4876 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4878 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4879 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4880 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4883 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4884 x_def_status_block);
4885 def_sb->x_def_status_block.status_block_id = sb_id;
4887 REG_WR(bp, BAR_XSTRORM_INTMEM +
4888 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4889 REG_WR(bp, BAR_XSTRORM_INTMEM +
4890 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4892 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4893 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4895 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4896 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4897 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4899 bp->stats_pending = 0;
4900 bp->set_mac_pending = 0;
4902 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4905 static void bnx2x_update_coalesce(struct bnx2x *bp)
4907 int port = BP_PORT(bp);
4910 for_each_queue(bp, i) {
4911 int sb_id = bp->fp[i].sb_id;
4913 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4914 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4915 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4916 U_SB_ETH_RX_CQ_INDEX),
4918 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4919 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4920 U_SB_ETH_RX_CQ_INDEX),
4921 (bp->rx_ticks/12) ? 0 : 1);
4923 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4924 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4925 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4926 C_SB_ETH_TX_CQ_INDEX),
4928 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4929 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4930 C_SB_ETH_TX_CQ_INDEX),
4931 (bp->tx_ticks/12) ? 0 : 1);
4935 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4936 struct bnx2x_fastpath *fp, int last)
4940 for (i = 0; i < last; i++) {
4941 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4942 struct sk_buff *skb = rx_buf->skb;
4945 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4949 if (fp->tpa_state[i] == BNX2X_TPA_START)
4950 pci_unmap_single(bp->pdev,
4951 pci_unmap_addr(rx_buf, mapping),
4952 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4959 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4961 int func = BP_FUNC(bp);
4962 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4963 ETH_MAX_AGGREGATION_QUEUES_E1H;
4964 u16 ring_prod, cqe_ring_prod;
4967 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4969 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4971 if (bp->flags & TPA_ENABLE_FLAG) {
4973 for_each_rx_queue(bp, j) {
4974 struct bnx2x_fastpath *fp = &bp->fp[j];
4976 for (i = 0; i < max_agg_queues; i++) {
4977 fp->tpa_pool[i].skb =
4978 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4979 if (!fp->tpa_pool[i].skb) {
4980 BNX2X_ERR("Failed to allocate TPA "
4981 "skb pool for queue[%d] - "
4982 "disabling TPA on this "
4984 bnx2x_free_tpa_pool(bp, fp, i);
4985 fp->disable_tpa = 1;
4988 pci_unmap_addr_set((struct sw_rx_bd *)
4989 &bp->fp->tpa_pool[i],
4991 fp->tpa_state[i] = BNX2X_TPA_STOP;
4996 for_each_rx_queue(bp, j) {
4997 struct bnx2x_fastpath *fp = &bp->fp[j];
5000 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5001 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5003 /* Mark queue as Rx */
5004 fp->is_rx_queue = 1;
5006 /* "next page" elements initialization */
5008 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5009 struct eth_rx_sge *sge;
5011 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5013 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5014 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5016 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5017 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5020 bnx2x_init_sge_ring_bit_mask(fp);
5023 for (i = 1; i <= NUM_RX_RINGS; i++) {
5024 struct eth_rx_bd *rx_bd;
5026 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5028 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5029 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5031 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5032 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5036 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5037 struct eth_rx_cqe_next_page *nextpg;
5039 nextpg = (struct eth_rx_cqe_next_page *)
5040 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5042 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5043 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5045 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5046 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5049 /* Allocate SGEs and initialize the ring elements */
5050 for (i = 0, ring_prod = 0;
5051 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5053 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5054 BNX2X_ERR("was only able to allocate "
5056 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5057 /* Cleanup already allocated elements */
5058 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5059 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5060 fp->disable_tpa = 1;
5064 ring_prod = NEXT_SGE_IDX(ring_prod);
5066 fp->rx_sge_prod = ring_prod;
5068 /* Allocate BDs and initialize BD ring */
5069 fp->rx_comp_cons = 0;
5070 cqe_ring_prod = ring_prod = 0;
5071 for (i = 0; i < bp->rx_ring_size; i++) {
5072 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5073 BNX2X_ERR("was only able to allocate "
5074 "%d rx skbs on queue[%d]\n", i, j);
5075 fp->eth_q_stats.rx_skb_alloc_failed++;
5078 ring_prod = NEXT_RX_IDX(ring_prod);
5079 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5080 WARN_ON(ring_prod <= i);
5083 fp->rx_bd_prod = ring_prod;
5084 /* must not have more available CQEs than BDs */
5085 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5087 fp->rx_pkt = fp->rx_calls = 0;
5090 * this will generate an interrupt (to the TSTORM)
5091 * must only be done after chip is initialized
5093 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5098 REG_WR(bp, BAR_USTRORM_INTMEM +
5099 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5100 U64_LO(fp->rx_comp_mapping));
5101 REG_WR(bp, BAR_USTRORM_INTMEM +
5102 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5103 U64_HI(fp->rx_comp_mapping));
5107 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5111 for_each_tx_queue(bp, j) {
5112 struct bnx2x_fastpath *fp = &bp->fp[j];
5114 for (i = 1; i <= NUM_TX_RINGS; i++) {
5115 struct eth_tx_next_bd *tx_next_bd =
5116 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5118 tx_next_bd->addr_hi =
5119 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5120 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5121 tx_next_bd->addr_lo =
5122 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5123 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5126 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5127 fp->tx_db.data.zero_fill1 = 0;
5128 fp->tx_db.data.prod = 0;
5130 fp->tx_pkt_prod = 0;
5131 fp->tx_pkt_cons = 0;
5134 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5138 /* clean tx statistics */
5139 for_each_rx_queue(bp, i)
5140 bnx2x_fp(bp, i, tx_pkt) = 0;
5143 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5145 int func = BP_FUNC(bp);
5147 spin_lock_init(&bp->spq_lock);
5149 bp->spq_left = MAX_SPQ_PENDING;
5150 bp->spq_prod_idx = 0;
5151 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5152 bp->spq_prod_bd = bp->spq;
5153 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5155 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5156 U64_LO(bp->spq_mapping));
5158 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5159 U64_HI(bp->spq_mapping));
5161 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5165 static void bnx2x_init_context(struct bnx2x *bp)
5169 for_each_rx_queue(bp, i) {
5170 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5171 struct bnx2x_fastpath *fp = &bp->fp[i];
5172 u8 cl_id = fp->cl_id;
5174 context->ustorm_st_context.common.sb_index_numbers =
5175 BNX2X_RX_SB_INDEX_NUM;
5176 context->ustorm_st_context.common.clientId = cl_id;
5177 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5178 context->ustorm_st_context.common.flags =
5179 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5180 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5181 context->ustorm_st_context.common.statistics_counter_id =
5183 context->ustorm_st_context.common.mc_alignment_log_size =
5184 BNX2X_RX_ALIGN_SHIFT;
5185 context->ustorm_st_context.common.bd_buff_size =
5187 context->ustorm_st_context.common.bd_page_base_hi =
5188 U64_HI(fp->rx_desc_mapping);
5189 context->ustorm_st_context.common.bd_page_base_lo =
5190 U64_LO(fp->rx_desc_mapping);
5191 if (!fp->disable_tpa) {
5192 context->ustorm_st_context.common.flags |=
5193 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5194 context->ustorm_st_context.common.sge_buff_size =
5195 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5197 context->ustorm_st_context.common.sge_page_base_hi =
5198 U64_HI(fp->rx_sge_mapping);
5199 context->ustorm_st_context.common.sge_page_base_lo =
5200 U64_LO(fp->rx_sge_mapping);
5202 context->ustorm_st_context.common.max_sges_for_packet =
5203 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5204 context->ustorm_st_context.common.max_sges_for_packet =
5205 ((context->ustorm_st_context.common.
5206 max_sges_for_packet + PAGES_PER_SGE - 1) &
5207 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5210 context->ustorm_ag_context.cdu_usage =
5211 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5212 CDU_REGION_NUMBER_UCM_AG,
5213 ETH_CONNECTION_TYPE);
5215 context->xstorm_ag_context.cdu_reserved =
5216 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5217 CDU_REGION_NUMBER_XCM_AG,
5218 ETH_CONNECTION_TYPE);
5221 for_each_tx_queue(bp, i) {
5222 struct bnx2x_fastpath *fp = &bp->fp[i];
5223 struct eth_context *context =
5224 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5226 context->cstorm_st_context.sb_index_number =
5227 C_SB_ETH_TX_CQ_INDEX;
5228 context->cstorm_st_context.status_block_id = fp->sb_id;
5230 context->xstorm_st_context.tx_bd_page_base_hi =
5231 U64_HI(fp->tx_desc_mapping);
5232 context->xstorm_st_context.tx_bd_page_base_lo =
5233 U64_LO(fp->tx_desc_mapping);
5234 context->xstorm_st_context.statistics_data = (fp->cl_id |
5235 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5239 static void bnx2x_init_ind_table(struct bnx2x *bp)
5241 int func = BP_FUNC(bp);
5244 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5248 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5249 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5250 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5251 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5252 bp->fp->cl_id + (i % bp->num_rx_queues));
5255 static void bnx2x_set_client_config(struct bnx2x *bp)
5257 struct tstorm_eth_client_config tstorm_client = {0};
5258 int port = BP_PORT(bp);
5261 tstorm_client.mtu = bp->dev->mtu;
5262 tstorm_client.config_flags =
5263 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5264 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5266 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5267 tstorm_client.config_flags |=
5268 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5269 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5273 for_each_queue(bp, i) {
5274 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5276 REG_WR(bp, BAR_TSTRORM_INTMEM +
5277 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5278 ((u32 *)&tstorm_client)[0]);
5279 REG_WR(bp, BAR_TSTRORM_INTMEM +
5280 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5281 ((u32 *)&tstorm_client)[1]);
5284 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5285 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5288 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5290 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5291 int mode = bp->rx_mode;
5292 int mask = bp->rx_mode_cl_mask;
5293 int func = BP_FUNC(bp);
5294 int port = BP_PORT(bp);
5296 /* All but management unicast packets should pass to the host as well */
5298 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5299 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5300 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5301 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5303 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5306 case BNX2X_RX_MODE_NONE: /* no Rx */
5307 tstorm_mac_filter.ucast_drop_all = mask;
5308 tstorm_mac_filter.mcast_drop_all = mask;
5309 tstorm_mac_filter.bcast_drop_all = mask;
5312 case BNX2X_RX_MODE_NORMAL:
5313 tstorm_mac_filter.bcast_accept_all = mask;
5316 case BNX2X_RX_MODE_ALLMULTI:
5317 tstorm_mac_filter.mcast_accept_all = mask;
5318 tstorm_mac_filter.bcast_accept_all = mask;
5321 case BNX2X_RX_MODE_PROMISC:
5322 tstorm_mac_filter.ucast_accept_all = mask;
5323 tstorm_mac_filter.mcast_accept_all = mask;
5324 tstorm_mac_filter.bcast_accept_all = mask;
5325 /* pass management unicast packets as well */
5326 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5330 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5335 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5338 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5339 REG_WR(bp, BAR_TSTRORM_INTMEM +
5340 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5341 ((u32 *)&tstorm_mac_filter)[i]);
5343 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5344 ((u32 *)&tstorm_mac_filter)[i]); */
5347 if (mode != BNX2X_RX_MODE_NONE)
5348 bnx2x_set_client_config(bp);
5351 static void bnx2x_init_internal_common(struct bnx2x *bp)
5355 /* Zero this manually as its initialization is
5356 currently missing in the initTool */
5357 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5358 REG_WR(bp, BAR_USTRORM_INTMEM +
5359 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5362 static void bnx2x_init_internal_port(struct bnx2x *bp)
5364 int port = BP_PORT(bp);
5367 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5369 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5370 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5374 static void bnx2x_init_internal_func(struct bnx2x *bp)
5376 struct tstorm_eth_function_common_config tstorm_config = {0};
5377 struct stats_indication_flags stats_flags = {0};
5378 int port = BP_PORT(bp);
5379 int func = BP_FUNC(bp);
5385 tstorm_config.config_flags = MULTI_FLAGS(bp);
5386 tstorm_config.rss_result_mask = MULTI_MASK;
5389 /* Enable TPA if needed */
5390 if (bp->flags & TPA_ENABLE_FLAG)
5391 tstorm_config.config_flags |=
5392 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5395 tstorm_config.config_flags |=
5396 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5398 tstorm_config.leading_client_id = BP_L_ID(bp);
5400 REG_WR(bp, BAR_TSTRORM_INTMEM +
5401 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5402 (*(u32 *)&tstorm_config));
5404 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5405 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5406 bnx2x_set_storm_rx_mode(bp);
5408 for_each_queue(bp, i) {
5409 u8 cl_id = bp->fp[i].cl_id;
5411 /* reset xstorm per client statistics */
5412 offset = BAR_XSTRORM_INTMEM +
5413 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5415 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5416 REG_WR(bp, offset + j*4, 0);
5418 /* reset tstorm per client statistics */
5419 offset = BAR_TSTRORM_INTMEM +
5420 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5422 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5423 REG_WR(bp, offset + j*4, 0);
5425 /* reset ustorm per client statistics */
5426 offset = BAR_USTRORM_INTMEM +
5427 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5429 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5430 REG_WR(bp, offset + j*4, 0);
5433 /* Init statistics related context */
5434 stats_flags.collect_eth = 1;
5436 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5437 ((u32 *)&stats_flags)[0]);
5438 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5439 ((u32 *)&stats_flags)[1]);
5441 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5442 ((u32 *)&stats_flags)[0]);
5443 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5444 ((u32 *)&stats_flags)[1]);
5446 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5447 ((u32 *)&stats_flags)[0]);
5448 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5449 ((u32 *)&stats_flags)[1]);
5451 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5452 ((u32 *)&stats_flags)[0]);
5453 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5454 ((u32 *)&stats_flags)[1]);
5456 REG_WR(bp, BAR_XSTRORM_INTMEM +
5457 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5458 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5459 REG_WR(bp, BAR_XSTRORM_INTMEM +
5460 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5461 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5463 REG_WR(bp, BAR_TSTRORM_INTMEM +
5464 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5465 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5466 REG_WR(bp, BAR_TSTRORM_INTMEM +
5467 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5468 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5470 REG_WR(bp, BAR_USTRORM_INTMEM +
5471 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5472 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5473 REG_WR(bp, BAR_USTRORM_INTMEM +
5474 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5475 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5477 if (CHIP_IS_E1H(bp)) {
5478 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5480 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5482 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5484 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5487 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5491 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5493 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5494 SGE_PAGE_SIZE * PAGES_PER_SGE),
5496 for_each_rx_queue(bp, i) {
5497 struct bnx2x_fastpath *fp = &bp->fp[i];
5499 REG_WR(bp, BAR_USTRORM_INTMEM +
5500 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5501 U64_LO(fp->rx_comp_mapping));
5502 REG_WR(bp, BAR_USTRORM_INTMEM +
5503 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5504 U64_HI(fp->rx_comp_mapping));
5507 REG_WR(bp, BAR_USTRORM_INTMEM +
5508 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5509 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5510 REG_WR(bp, BAR_USTRORM_INTMEM +
5511 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5512 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5514 REG_WR16(bp, BAR_USTRORM_INTMEM +
5515 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5519 /* dropless flow control */
5520 if (CHIP_IS_E1H(bp)) {
5521 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5523 rx_pause.bd_thr_low = 250;
5524 rx_pause.cqe_thr_low = 250;
5526 rx_pause.sge_thr_low = 0;
5527 rx_pause.bd_thr_high = 350;
5528 rx_pause.cqe_thr_high = 350;
5529 rx_pause.sge_thr_high = 0;
5531 for_each_rx_queue(bp, i) {
5532 struct bnx2x_fastpath *fp = &bp->fp[i];
5534 if (!fp->disable_tpa) {
5535 rx_pause.sge_thr_low = 150;
5536 rx_pause.sge_thr_high = 250;
5540 offset = BAR_USTRORM_INTMEM +
5541 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5544 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5546 REG_WR(bp, offset + j*4,
5547 ((u32 *)&rx_pause)[j]);
5551 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5553 /* Init rate shaping and fairness contexts */
5557 /* During init there is no active link
5558 Until link is up, set link rate to 10Gbps */
5559 bp->link_vars.line_speed = SPEED_10000;
5560 bnx2x_init_port_minmax(bp);
5562 bnx2x_calc_vn_weight_sum(bp);
5564 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5565 bnx2x_init_vn_minmax(bp, 2*vn + port);
5567 /* Enable rate shaping and fairness */
5568 bp->cmng.flags.cmng_enables =
5569 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5570 if (bp->vn_weight_sum)
5571 bp->cmng.flags.cmng_enables |=
5572 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5574 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5575 " fairness will be disabled\n");
5577 /* rate shaping and fairness are disabled */
5579 "single function mode minmax will be disabled\n");
5583 /* Store it to internal memory */
5585 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5586 REG_WR(bp, BAR_XSTRORM_INTMEM +
5587 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5588 ((u32 *)(&bp->cmng))[i]);
5591 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5593 switch (load_code) {
5594 case FW_MSG_CODE_DRV_LOAD_COMMON:
5595 bnx2x_init_internal_common(bp);
5598 case FW_MSG_CODE_DRV_LOAD_PORT:
5599 bnx2x_init_internal_port(bp);
5602 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5603 bnx2x_init_internal_func(bp);
5607 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5612 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5616 for_each_queue(bp, i) {
5617 struct bnx2x_fastpath *fp = &bp->fp[i];
5620 fp->state = BNX2X_FP_STATE_CLOSED;
5622 fp->cl_id = BP_L_ID(bp) + i;
5624 fp->sb_id = fp->cl_id + 1;
5626 fp->sb_id = fp->cl_id;
5628 /* Suitable Rx and Tx SBs are served by the same client */
5629 if (i >= bp->num_rx_queues)
5630 fp->cl_id -= bp->num_rx_queues;
5632 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5633 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5634 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5636 bnx2x_update_fpsb_idx(fp);
5639 /* ensure status block indices were read */
5643 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5645 bnx2x_update_dsb_idx(bp);
5646 bnx2x_update_coalesce(bp);
5647 bnx2x_init_rx_rings(bp);
5648 bnx2x_init_tx_ring(bp);
5649 bnx2x_init_sp_ring(bp);
5650 bnx2x_init_context(bp);
5651 bnx2x_init_internal(bp, load_code);
5652 bnx2x_init_ind_table(bp);
5653 bnx2x_stats_init(bp);
5655 /* At this point, we are ready for interrupts */
5656 atomic_set(&bp->intr_sem, 0);
5658 /* flush all before enabling interrupts */
5662 bnx2x_int_enable(bp);
5664 /* Check for SPIO5 */
5665 bnx2x_attn_int_deasserted0(bp,
5666 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5667 AEU_INPUTS_ATTN_BITS_SPIO5);
5670 /* end of nic init */
5673 * gzip service functions
5676 static int bnx2x_gunzip_init(struct bnx2x *bp)
5678 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5679 &bp->gunzip_mapping);
5680 if (bp->gunzip_buf == NULL)
5683 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5684 if (bp->strm == NULL)
5687 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5689 if (bp->strm->workspace == NULL)
5699 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5700 bp->gunzip_mapping);
5701 bp->gunzip_buf = NULL;
5704 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5705 " un-compression\n", bp->dev->name);
5709 static void bnx2x_gunzip_end(struct bnx2x *bp)
5711 kfree(bp->strm->workspace);
5716 if (bp->gunzip_buf) {
5717 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5718 bp->gunzip_mapping);
5719 bp->gunzip_buf = NULL;
5723 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5727 /* check gzip header */
5728 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5729 BNX2X_ERR("Bad gzip header\n");
5737 if (zbuf[3] & FNAME)
5738 while ((zbuf[n++] != 0) && (n < len));
5740 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5741 bp->strm->avail_in = len - n;
5742 bp->strm->next_out = bp->gunzip_buf;
5743 bp->strm->avail_out = FW_BUF_SIZE;
5745 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5749 rc = zlib_inflate(bp->strm, Z_FINISH);
5750 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5751 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5752 bp->dev->name, bp->strm->msg);
5754 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5755 if (bp->gunzip_outlen & 0x3)
5756 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5757 " gunzip_outlen (%d) not aligned\n",
5758 bp->dev->name, bp->gunzip_outlen);
5759 bp->gunzip_outlen >>= 2;
5761 zlib_inflateEnd(bp->strm);
5763 if (rc == Z_STREAM_END)
5769 /* nic load/unload */
5772 * General service functions
5775 /* send a NIG loopback debug packet */
5776 static void bnx2x_lb_pckt(struct bnx2x *bp)
5780 /* Ethernet source and destination addresses */
5781 wb_write[0] = 0x55555555;
5782 wb_write[1] = 0x55555555;
5783 wb_write[2] = 0x20; /* SOP */
5784 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5786 /* NON-IP protocol */
5787 wb_write[0] = 0x09000000;
5788 wb_write[1] = 0x55555555;
5789 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5790 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5793 /* some of the internal memories
5794 * are not directly readable from the driver
5795 * to test them we send debug packets
5797 static int bnx2x_int_mem_test(struct bnx2x *bp)
5803 if (CHIP_REV_IS_FPGA(bp))
5805 else if (CHIP_REV_IS_EMUL(bp))
5810 DP(NETIF_MSG_HW, "start part1\n");
5812 /* Disable inputs of parser neighbor blocks */
5813 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5814 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5815 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5816 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5818 /* Write 0 to parser credits for CFC search request */
5819 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5821 /* send Ethernet packet */
5824 /* TODO do i reset NIG statistic? */
5825 /* Wait until NIG register shows 1 packet of size 0x10 */
5826 count = 1000 * factor;
5829 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5830 val = *bnx2x_sp(bp, wb_data[0]);
5838 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5842 /* Wait until PRS register shows 1 packet */
5843 count = 1000 * factor;
5845 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5853 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5857 /* Reset and init BRB, PRS */
5858 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5860 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5862 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5863 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5865 DP(NETIF_MSG_HW, "part2\n");
5867 /* Disable inputs of parser neighbor blocks */
5868 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5869 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5870 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5871 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5873 /* Write 0 to parser credits for CFC search request */
5874 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5876 /* send 10 Ethernet packets */
5877 for (i = 0; i < 10; i++)
5880 /* Wait until NIG register shows 10 + 1
5881 packets of size 11*0x10 = 0xb0 */
5882 count = 1000 * factor;
5885 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5886 val = *bnx2x_sp(bp, wb_data[0]);
5894 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5898 /* Wait until PRS register shows 2 packets */
5899 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5901 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5903 /* Write 1 to parser credits for CFC search request */
5904 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5906 /* Wait until PRS register shows 3 packets */
5907 msleep(10 * factor);
5908 /* Wait until NIG register shows 1 packet of size 0x10 */
5909 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5911 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5913 /* clear NIG EOP FIFO */
5914 for (i = 0; i < 11; i++)
5915 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5916 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5918 BNX2X_ERR("clear of NIG failed\n");
5922 /* Reset and init BRB, PRS, NIG */
5923 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5925 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5927 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5928 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5931 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5934 /* Enable inputs of parser neighbor blocks */
5935 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5936 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5937 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5938 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5940 DP(NETIF_MSG_HW, "done\n");
5945 static void enable_blocks_attention(struct bnx2x *bp)
5947 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5948 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5949 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5950 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5951 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5952 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5953 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5954 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5955 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5956 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5957 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5958 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5959 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5960 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5961 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5962 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5963 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5964 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5965 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5966 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5967 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5968 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5969 if (CHIP_REV_IS_FPGA(bp))
5970 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5972 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5973 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5974 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5975 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5976 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5977 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5978 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5979 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5980 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5981 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5985 static void bnx2x_reset_common(struct bnx2x *bp)
5988 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5990 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5993 static void bnx2x_init_pxp(struct bnx2x *bp)
5996 int r_order, w_order;
5998 pci_read_config_word(bp->pdev,
5999 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6000 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6001 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6003 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6005 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6009 bnx2x_init_pxp_arb(bp, r_order, w_order);
6012 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6018 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6019 SHARED_HW_CFG_FAN_FAILURE_MASK;
6021 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6025 * The fan failure mechanism is usually related to the PHY type since
6026 * the power consumption of the board is affected by the PHY. Currently,
6027 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6029 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6030 for (port = PORT_0; port < PORT_MAX; port++) {
6032 SHMEM_RD(bp, dev_info.port_hw_config[port].
6033 external_phy_config) &
6034 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6037 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6039 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6041 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6044 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6046 if (is_required == 0)
6049 /* Fan failure is indicated by SPIO 5 */
6050 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6051 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6053 /* set to active low mode */
6054 val = REG_RD(bp, MISC_REG_SPIO_INT);
6055 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6056 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6057 REG_WR(bp, MISC_REG_SPIO_INT, val);
6059 /* enable interrupt to signal the IGU */
6060 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6061 val |= (1 << MISC_REGISTERS_SPIO_5);
6062 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6065 static int bnx2x_init_common(struct bnx2x *bp)
6072 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6074 bnx2x_reset_common(bp);
6075 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6076 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6078 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6079 if (CHIP_IS_E1H(bp))
6080 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6082 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6084 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6086 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6087 if (CHIP_IS_E1(bp)) {
6088 /* enable HW interrupt from PXP on USDM overflow
6089 bit 16 on INT_MASK_0 */
6090 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6093 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6097 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6098 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6099 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6100 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6101 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6102 /* make sure this value is 0 */
6103 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6105 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6106 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6107 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6108 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6109 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6112 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6114 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6115 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6116 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6119 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6120 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6122 /* let the HW do it's magic ... */
6124 /* finish PXP init */
6125 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6127 BNX2X_ERR("PXP2 CFG failed\n");
6130 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6132 BNX2X_ERR("PXP2 RD_INIT failed\n");
6136 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6137 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6139 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6141 /* clean the DMAE memory */
6143 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6145 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6146 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6147 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6148 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6150 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6151 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6152 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6153 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6155 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6160 for (i = 0; i < 64; i++) {
6161 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6162 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6164 if (CHIP_IS_E1H(bp)) {
6165 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6166 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6171 /* soft reset pulse */
6172 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6173 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6176 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6179 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6180 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6181 if (!CHIP_REV_IS_SLOW(bp)) {
6182 /* enable hw interrupt from doorbell Q */
6183 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6186 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6187 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6188 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6191 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6193 if (CHIP_IS_E1H(bp))
6194 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6196 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6197 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6198 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6199 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6201 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6202 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6203 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6204 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6206 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6207 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6208 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6209 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6212 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6214 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6217 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6218 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6219 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6221 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6222 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6223 REG_WR(bp, i, 0xc0cac01a);
6224 /* TODO: replace with something meaningful */
6226 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6228 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6229 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6230 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6231 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6232 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6233 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6234 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6235 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6236 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6237 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6239 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6241 if (sizeof(union cdu_context) != 1024)
6242 /* we currently assume that a context is 1024 bytes */
6243 printk(KERN_ALERT PFX "please adjust the size of"
6244 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6246 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6247 val = (4 << 24) + (0 << 12) + 1024;
6248 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6250 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6251 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6252 /* enable context validation interrupt from CFC */
6253 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6255 /* set the thresholds to prevent CFC/CDU race */
6256 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6258 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6259 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6261 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6262 /* Reset PCIE errors for debug */
6263 REG_WR(bp, 0x2814, 0xffffffff);
6264 REG_WR(bp, 0x3820, 0xffffffff);
6266 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6267 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6268 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6269 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6271 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6272 if (CHIP_IS_E1H(bp)) {
6273 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6274 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6277 if (CHIP_REV_IS_SLOW(bp))
6280 /* finish CFC init */
6281 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6283 BNX2X_ERR("CFC LL_INIT failed\n");
6286 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6288 BNX2X_ERR("CFC AC_INIT failed\n");
6291 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6293 BNX2X_ERR("CFC CAM_INIT failed\n");
6296 REG_WR(bp, CFC_REG_DEBUG0, 0);
6298 /* read NIG statistic
6299 to see if this is our first up since powerup */
6300 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6301 val = *bnx2x_sp(bp, wb_data[0]);
6303 /* do internal memory self test */
6304 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6305 BNX2X_ERR("internal mem self test failed\n");
6309 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6310 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6312 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6313 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6314 bp->port.need_hw_lock = 1;
6321 bnx2x_setup_fan_failure_detection(bp);
6323 /* clear PXP2 attentions */
6324 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6326 enable_blocks_attention(bp);
6328 if (!BP_NOMCP(bp)) {
6329 bnx2x_acquire_phy_lock(bp);
6330 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6331 bnx2x_release_phy_lock(bp);
6333 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6338 static int bnx2x_init_port(struct bnx2x *bp)
6340 int port = BP_PORT(bp);
6341 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6345 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6347 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6349 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6350 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6352 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6353 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6354 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6355 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6358 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6360 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6361 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6362 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6364 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6366 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6367 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6368 /* no pause for emulation and FPGA */
6373 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6374 else if (bp->dev->mtu > 4096) {
6375 if (bp->flags & ONE_PORT_FLAG)
6379 /* (24*1024 + val*4)/256 */
6380 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6383 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6384 high = low + 56; /* 14*1024/256 */
6386 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6387 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6390 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6392 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6393 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6394 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6395 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6397 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6398 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6399 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6400 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6402 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6403 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6405 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6407 /* configure PBF to work without PAUSE mtu 9000 */
6408 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6410 /* update threshold */
6411 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6412 /* update init credit */
6413 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6416 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6418 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6421 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6423 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6424 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6426 if (CHIP_IS_E1(bp)) {
6427 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6428 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6430 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6432 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6433 /* init aeu_mask_attn_func_0/1:
6434 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6435 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6436 * bits 4-7 are used for "per vn group attention" */
6437 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6438 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6440 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6441 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6442 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6443 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6444 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6446 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6448 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6450 if (CHIP_IS_E1H(bp)) {
6451 /* 0x2 disable e1hov, 0x1 enable */
6452 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6453 (IS_E1HMF(bp) ? 0x1 : 0x2));
6456 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6457 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6458 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6462 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6463 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6465 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6466 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6468 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6470 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6471 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6473 /* The GPIO should be swapped if the swap register is
6475 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6476 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6478 /* Select function upon port-swap configuration */
6480 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6481 aeu_gpio_mask = (swap_val && swap_override) ?
6482 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6483 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6485 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6486 aeu_gpio_mask = (swap_val && swap_override) ?
6487 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6488 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6490 val = REG_RD(bp, offset);
6491 /* add GPIO3 to group */
6492 val |= aeu_gpio_mask;
6493 REG_WR(bp, offset, val);
6497 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6498 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6499 /* add SPIO 5 to group 0 */
6501 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6502 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6503 val = REG_RD(bp, reg_addr);
6504 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6505 REG_WR(bp, reg_addr, val);
6513 bnx2x__link_reset(bp);
6518 #define ILT_PER_FUNC (768/2)
6519 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6520 /* the phys address is shifted right 12 bits and has an added
6521 1=valid bit added to the 53rd bit
6522 then since this is a wide register(TM)
6523 we split it into two 32 bit writes
6525 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6526 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6527 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6528 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6531 #define CNIC_ILT_LINES 127
6532 #define CNIC_CTX_PER_ILT 16
6534 #define CNIC_ILT_LINES 0
6537 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6541 if (CHIP_IS_E1H(bp))
6542 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6544 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6546 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6549 static int bnx2x_init_func(struct bnx2x *bp)
6551 int port = BP_PORT(bp);
6552 int func = BP_FUNC(bp);
6556 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6558 /* set MSI reconfigure capability */
6559 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6560 val = REG_RD(bp, addr);
6561 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6562 REG_WR(bp, addr, val);
6564 i = FUNC_ILT_BASE(func);
6566 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6567 if (CHIP_IS_E1H(bp)) {
6568 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6569 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6571 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6572 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6575 i += 1 + CNIC_ILT_LINES;
6576 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6578 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6580 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6581 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6585 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6587 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6589 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6590 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6594 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6596 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6598 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6599 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6602 /* tell the searcher where the T2 table is */
6603 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6605 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6606 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6608 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6609 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6610 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6612 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6615 if (CHIP_IS_E1H(bp)) {
6616 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6617 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6618 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6619 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6620 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6621 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6622 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6623 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6624 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6626 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6627 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6630 /* HC init per function */
6631 if (CHIP_IS_E1H(bp)) {
6632 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6634 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6635 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6637 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6639 /* Reset PCIE errors for debug */
6640 REG_WR(bp, 0x2114, 0xffffffff);
6641 REG_WR(bp, 0x2120, 0xffffffff);
6646 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6650 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6651 BP_FUNC(bp), load_code);
6654 mutex_init(&bp->dmae_mutex);
6655 rc = bnx2x_gunzip_init(bp);
6659 switch (load_code) {
6660 case FW_MSG_CODE_DRV_LOAD_COMMON:
6661 rc = bnx2x_init_common(bp);
6666 case FW_MSG_CODE_DRV_LOAD_PORT:
6668 rc = bnx2x_init_port(bp);
6673 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6675 rc = bnx2x_init_func(bp);
6681 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6685 if (!BP_NOMCP(bp)) {
6686 int func = BP_FUNC(bp);
6688 bp->fw_drv_pulse_wr_seq =
6689 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6690 DRV_PULSE_SEQ_MASK);
6691 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6694 /* this needs to be done before gunzip end */
6695 bnx2x_zero_def_sb(bp);
6696 for_each_queue(bp, i)
6697 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6699 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6703 bnx2x_gunzip_end(bp);
6708 static void bnx2x_free_mem(struct bnx2x *bp)
6711 #define BNX2X_PCI_FREE(x, y, size) \
6714 pci_free_consistent(bp->pdev, size, x, y); \
6720 #define BNX2X_FREE(x) \
6732 for_each_queue(bp, i) {
6735 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6736 bnx2x_fp(bp, i, status_blk_mapping),
6737 sizeof(struct host_status_block));
6740 for_each_rx_queue(bp, i) {
6742 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6743 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6744 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6745 bnx2x_fp(bp, i, rx_desc_mapping),
6746 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6749 bnx2x_fp(bp, i, rx_comp_mapping),
6750 sizeof(struct eth_fast_path_rx_cqe) *
6754 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6755 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6756 bnx2x_fp(bp, i, rx_sge_mapping),
6757 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6760 for_each_tx_queue(bp, i) {
6762 /* fastpath tx rings: tx_buf tx_desc */
6763 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6764 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6765 bnx2x_fp(bp, i, tx_desc_mapping),
6766 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6768 /* end of fastpath */
6770 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6771 sizeof(struct host_def_status_block));
6773 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6774 sizeof(struct bnx2x_slowpath));
6777 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6778 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6779 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6780 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6781 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6782 sizeof(struct host_status_block));
6784 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6786 #undef BNX2X_PCI_FREE
6790 static int bnx2x_alloc_mem(struct bnx2x *bp)
6793 #define BNX2X_PCI_ALLOC(x, y, size) \
6795 x = pci_alloc_consistent(bp->pdev, size, y); \
6797 goto alloc_mem_err; \
6798 memset(x, 0, size); \
6801 #define BNX2X_ALLOC(x, size) \
6803 x = vmalloc(size); \
6805 goto alloc_mem_err; \
6806 memset(x, 0, size); \
6813 for_each_queue(bp, i) {
6814 bnx2x_fp(bp, i, bp) = bp;
6817 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6818 &bnx2x_fp(bp, i, status_blk_mapping),
6819 sizeof(struct host_status_block));
6822 for_each_rx_queue(bp, i) {
6824 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6825 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6826 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6827 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6828 &bnx2x_fp(bp, i, rx_desc_mapping),
6829 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6831 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6832 &bnx2x_fp(bp, i, rx_comp_mapping),
6833 sizeof(struct eth_fast_path_rx_cqe) *
6837 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6838 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6839 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6840 &bnx2x_fp(bp, i, rx_sge_mapping),
6841 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6844 for_each_tx_queue(bp, i) {
6846 /* fastpath tx rings: tx_buf tx_desc */
6847 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6848 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6849 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6850 &bnx2x_fp(bp, i, tx_desc_mapping),
6851 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6853 /* end of fastpath */
6855 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6856 sizeof(struct host_def_status_block));
6858 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6859 sizeof(struct bnx2x_slowpath));
6862 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6864 /* allocate searcher T2 table
6865 we allocate 1/4 of alloc num for T2
6866 (which is not entered into the ILT) */
6867 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6869 /* Initialize T2 (for 1024 connections) */
6870 for (i = 0; i < 16*1024; i += 64)
6871 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6873 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6874 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6876 /* QM queues (128*MAX_CONN) */
6877 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6879 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6880 sizeof(struct host_status_block));
6883 /* Slow path ring */
6884 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6892 #undef BNX2X_PCI_ALLOC
6896 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6900 for_each_tx_queue(bp, i) {
6901 struct bnx2x_fastpath *fp = &bp->fp[i];
6903 u16 bd_cons = fp->tx_bd_cons;
6904 u16 sw_prod = fp->tx_pkt_prod;
6905 u16 sw_cons = fp->tx_pkt_cons;
6907 while (sw_cons != sw_prod) {
6908 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6914 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6918 for_each_rx_queue(bp, j) {
6919 struct bnx2x_fastpath *fp = &bp->fp[j];
6921 for (i = 0; i < NUM_RX_BD; i++) {
6922 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6923 struct sk_buff *skb = rx_buf->skb;
6928 pci_unmap_single(bp->pdev,
6929 pci_unmap_addr(rx_buf, mapping),
6930 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6935 if (!fp->disable_tpa)
6936 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6937 ETH_MAX_AGGREGATION_QUEUES_E1 :
6938 ETH_MAX_AGGREGATION_QUEUES_E1H);
6942 static void bnx2x_free_skbs(struct bnx2x *bp)
6944 bnx2x_free_tx_skbs(bp);
6945 bnx2x_free_rx_skbs(bp);
6948 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6952 free_irq(bp->msix_table[0].vector, bp->dev);
6953 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6954 bp->msix_table[0].vector);
6959 for_each_queue(bp, i) {
6960 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6961 "state %x\n", i, bp->msix_table[i + offset].vector,
6962 bnx2x_fp(bp, i, state));
6964 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6968 static void bnx2x_free_irq(struct bnx2x *bp)
6970 if (bp->flags & USING_MSIX_FLAG) {
6971 bnx2x_free_msix_irqs(bp);
6972 pci_disable_msix(bp->pdev);
6973 bp->flags &= ~USING_MSIX_FLAG;
6975 } else if (bp->flags & USING_MSI_FLAG) {
6976 free_irq(bp->pdev->irq, bp->dev);
6977 pci_disable_msi(bp->pdev);
6978 bp->flags &= ~USING_MSI_FLAG;
6981 free_irq(bp->pdev->irq, bp->dev);
6984 static int bnx2x_enable_msix(struct bnx2x *bp)
6986 int i, rc, offset = 1;
6989 bp->msix_table[0].entry = igu_vec;
6990 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6993 igu_vec = BP_L_ID(bp) + offset;
6994 bp->msix_table[1].entry = igu_vec;
6995 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6998 for_each_queue(bp, i) {
6999 igu_vec = BP_L_ID(bp) + offset + i;
7000 bp->msix_table[i + offset].entry = igu_vec;
7001 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7002 "(fastpath #%u)\n", i + offset, igu_vec, i);
7005 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7006 BNX2X_NUM_QUEUES(bp) + offset);
7008 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7012 bp->flags |= USING_MSIX_FLAG;
7017 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7019 int i, rc, offset = 1;
7021 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7022 bp->dev->name, bp->dev);
7024 BNX2X_ERR("request sp irq failed\n");
7031 for_each_queue(bp, i) {
7032 struct bnx2x_fastpath *fp = &bp->fp[i];
7034 if (i < bp->num_rx_queues)
7035 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7037 sprintf(fp->name, "%s-tx-%d",
7038 bp->dev->name, i - bp->num_rx_queues);
7040 rc = request_irq(bp->msix_table[i + offset].vector,
7041 bnx2x_msix_fp_int, 0, fp->name, fp);
7043 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7044 bnx2x_free_msix_irqs(bp);
7048 fp->state = BNX2X_FP_STATE_IRQ;
7051 i = BNX2X_NUM_QUEUES(bp);
7052 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7054 bp->dev->name, bp->msix_table[0].vector,
7055 0, bp->msix_table[offset].vector,
7056 i - 1, bp->msix_table[offset + i - 1].vector);
7061 static int bnx2x_enable_msi(struct bnx2x *bp)
7065 rc = pci_enable_msi(bp->pdev);
7067 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7070 bp->flags |= USING_MSI_FLAG;
7075 static int bnx2x_req_irq(struct bnx2x *bp)
7077 unsigned long flags;
7080 if (bp->flags & USING_MSI_FLAG)
7083 flags = IRQF_SHARED;
7085 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7086 bp->dev->name, bp->dev);
7088 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7093 static void bnx2x_napi_enable(struct bnx2x *bp)
7097 for_each_rx_queue(bp, i)
7098 napi_enable(&bnx2x_fp(bp, i, napi));
7101 static void bnx2x_napi_disable(struct bnx2x *bp)
7105 for_each_rx_queue(bp, i)
7106 napi_disable(&bnx2x_fp(bp, i, napi));
7109 static void bnx2x_netif_start(struct bnx2x *bp)
7113 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7114 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7117 if (netif_running(bp->dev)) {
7118 bnx2x_napi_enable(bp);
7119 bnx2x_int_enable(bp);
7120 if (bp->state == BNX2X_STATE_OPEN)
7121 netif_tx_wake_all_queues(bp->dev);
7126 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7128 bnx2x_int_disable_sync(bp, disable_hw);
7129 bnx2x_napi_disable(bp);
7130 netif_tx_disable(bp->dev);
7131 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7135 * Init service functions
7139 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7141 * @param bp driver descriptor
7142 * @param set set or clear an entry (1 or 0)
7143 * @param mac pointer to a buffer containing a MAC
7144 * @param cl_bit_vec bit vector of clients to register a MAC for
7145 * @param cam_offset offset in a CAM to use
7146 * @param with_bcast set broadcast MAC as well
7148 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7149 u32 cl_bit_vec, u8 cam_offset,
7152 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7153 int port = BP_PORT(bp);
7156 * unicasts 0-31:port0 32-63:port1
7157 * multicast 64-127:port0 128-191:port1
7159 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7160 config->hdr.offset = cam_offset;
7161 config->hdr.client_id = 0xff;
7162 config->hdr.reserved1 = 0;
7165 config->config_table[0].cam_entry.msb_mac_addr =
7166 swab16(*(u16 *)&mac[0]);
7167 config->config_table[0].cam_entry.middle_mac_addr =
7168 swab16(*(u16 *)&mac[2]);
7169 config->config_table[0].cam_entry.lsb_mac_addr =
7170 swab16(*(u16 *)&mac[4]);
7171 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7173 config->config_table[0].target_table_entry.flags = 0;
7175 CAM_INVALIDATE(config->config_table[0]);
7176 config->config_table[0].target_table_entry.clients_bit_vector =
7177 cpu_to_le32(cl_bit_vec);
7178 config->config_table[0].target_table_entry.vlan_id = 0;
7180 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7181 (set ? "setting" : "clearing"),
7182 config->config_table[0].cam_entry.msb_mac_addr,
7183 config->config_table[0].cam_entry.middle_mac_addr,
7184 config->config_table[0].cam_entry.lsb_mac_addr);
7188 config->config_table[1].cam_entry.msb_mac_addr =
7189 cpu_to_le16(0xffff);
7190 config->config_table[1].cam_entry.middle_mac_addr =
7191 cpu_to_le16(0xffff);
7192 config->config_table[1].cam_entry.lsb_mac_addr =
7193 cpu_to_le16(0xffff);
7194 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7196 config->config_table[1].target_table_entry.flags =
7197 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7199 CAM_INVALIDATE(config->config_table[1]);
7200 config->config_table[1].target_table_entry.clients_bit_vector =
7201 cpu_to_le32(cl_bit_vec);
7202 config->config_table[1].target_table_entry.vlan_id = 0;
7205 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7206 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7207 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7211 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7213 * @param bp driver descriptor
7214 * @param set set or clear an entry (1 or 0)
7215 * @param mac pointer to a buffer containing a MAC
7216 * @param cl_bit_vec bit vector of clients to register a MAC for
7217 * @param cam_offset offset in a CAM to use
7219 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7220 u32 cl_bit_vec, u8 cam_offset)
7222 struct mac_configuration_cmd_e1h *config =
7223 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7225 config->hdr.length = 1;
7226 config->hdr.offset = cam_offset;
7227 config->hdr.client_id = 0xff;
7228 config->hdr.reserved1 = 0;
7231 config->config_table[0].msb_mac_addr =
7232 swab16(*(u16 *)&mac[0]);
7233 config->config_table[0].middle_mac_addr =
7234 swab16(*(u16 *)&mac[2]);
7235 config->config_table[0].lsb_mac_addr =
7236 swab16(*(u16 *)&mac[4]);
7237 config->config_table[0].clients_bit_vector =
7238 cpu_to_le32(cl_bit_vec);
7239 config->config_table[0].vlan_id = 0;
7240 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7242 config->config_table[0].flags = BP_PORT(bp);
7244 config->config_table[0].flags =
7245 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7247 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7248 (set ? "setting" : "clearing"),
7249 config->config_table[0].msb_mac_addr,
7250 config->config_table[0].middle_mac_addr,
7251 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7253 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7254 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7255 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7258 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7259 int *state_p, int poll)
7261 /* can take a while if any port is running */
7264 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7265 poll ? "polling" : "waiting", state, idx);
7270 bnx2x_rx_int(bp->fp, 10);
7271 /* if index is different from 0
7272 * the reply for some commands will
7273 * be on the non default queue
7276 bnx2x_rx_int(&bp->fp[idx], 10);
7279 mb(); /* state is changed by bnx2x_sp_event() */
7280 if (*state_p == state) {
7281 #ifdef BNX2X_STOP_ON_ERROR
7282 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7294 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7295 poll ? "polling" : "waiting", state, idx);
7296 #ifdef BNX2X_STOP_ON_ERROR
7303 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7305 bp->set_mac_pending++;
7308 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7309 (1 << bp->fp->cl_id), BP_FUNC(bp));
7311 /* Wait for a completion */
7312 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7315 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7317 bp->set_mac_pending++;
7320 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7321 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7324 /* Wait for a completion */
7325 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7330 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7331 * MAC(s). This function will wait until the ramdord completion
7334 * @param bp driver handle
7335 * @param set set or clear the CAM entry
7337 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7339 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7341 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7343 bp->set_mac_pending++;
7346 /* Send a SET_MAC ramrod */
7348 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7349 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7352 /* CAM allocation for E1H
7353 * unicasts: by func number
7354 * multicast: 20+FUNC*20, 20 each
7356 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7357 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7359 /* Wait for a completion when setting */
7360 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7366 static int bnx2x_setup_leading(struct bnx2x *bp)
7370 /* reset IGU state */
7371 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7374 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7376 /* Wait for completion */
7377 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7382 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7384 struct bnx2x_fastpath *fp = &bp->fp[index];
7386 /* reset IGU state */
7387 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7390 fp->state = BNX2X_FP_STATE_OPENING;
7391 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7394 /* Wait for completion */
7395 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7399 static int bnx2x_poll(struct napi_struct *napi, int budget);
7401 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7402 int *num_tx_queues_out)
7404 int _num_rx_queues = 0, _num_tx_queues = 0;
7406 switch (bp->multi_mode) {
7407 case ETH_RSS_MODE_DISABLED:
7412 case ETH_RSS_MODE_REGULAR:
7414 _num_rx_queues = min_t(u32, num_rx_queues,
7415 BNX2X_MAX_QUEUES(bp));
7417 _num_rx_queues = min_t(u32, num_online_cpus(),
7418 BNX2X_MAX_QUEUES(bp));
7421 _num_tx_queues = min_t(u32, num_tx_queues,
7422 BNX2X_MAX_QUEUES(bp));
7424 _num_tx_queues = min_t(u32, num_online_cpus(),
7425 BNX2X_MAX_QUEUES(bp));
7427 /* There must be not more Tx queues than Rx queues */
7428 if (_num_tx_queues > _num_rx_queues) {
7429 BNX2X_ERR("number of tx queues (%d) > "
7430 "number of rx queues (%d)"
7431 " defaulting to %d\n",
7432 _num_tx_queues, _num_rx_queues,
7434 _num_tx_queues = _num_rx_queues;
7445 *num_rx_queues_out = _num_rx_queues;
7446 *num_tx_queues_out = _num_tx_queues;
7449 static int bnx2x_set_int_mode(struct bnx2x *bp)
7456 bp->num_rx_queues = 1;
7457 bp->num_tx_queues = 1;
7458 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7463 /* Set interrupt mode according to bp->multi_mode value */
7464 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7465 &bp->num_tx_queues);
7467 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7468 bp->num_rx_queues, bp->num_tx_queues);
7470 /* if we can't use MSI-X we only need one fp,
7471 * so try to enable MSI-X with the requested number of fp's
7472 * and fallback to MSI or legacy INTx with one fp
7474 rc = bnx2x_enable_msix(bp);
7476 /* failed to enable MSI-X */
7478 BNX2X_ERR("Multi requested but failed to "
7479 "enable MSI-X (rx %d tx %d), "
7480 "set number of queues to 1\n",
7481 bp->num_rx_queues, bp->num_tx_queues);
7482 bp->num_rx_queues = 1;
7483 bp->num_tx_queues = 1;
7487 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7492 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7493 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7496 /* must be called with rtnl_lock */
7497 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7502 #ifdef BNX2X_STOP_ON_ERROR
7503 if (unlikely(bp->panic))
7507 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7509 rc = bnx2x_set_int_mode(bp);
7511 if (bnx2x_alloc_mem(bp))
7514 for_each_rx_queue(bp, i)
7515 bnx2x_fp(bp, i, disable_tpa) =
7516 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7518 for_each_rx_queue(bp, i)
7519 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7522 bnx2x_napi_enable(bp);
7524 if (bp->flags & USING_MSIX_FLAG) {
7525 rc = bnx2x_req_msix_irqs(bp);
7527 pci_disable_msix(bp->pdev);
7531 /* Fall to INTx if failed to enable MSI-X due to lack of
7532 memory (in bnx2x_set_int_mode()) */
7533 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7534 bnx2x_enable_msi(bp);
7536 rc = bnx2x_req_irq(bp);
7538 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7539 if (bp->flags & USING_MSI_FLAG)
7540 pci_disable_msi(bp->pdev);
7543 if (bp->flags & USING_MSI_FLAG) {
7544 bp->dev->irq = bp->pdev->irq;
7545 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7546 bp->dev->name, bp->pdev->irq);
7550 /* Send LOAD_REQUEST command to MCP
7551 Returns the type of LOAD command:
7552 if it is the first port to be initialized
7553 common blocks should be initialized, otherwise - not
7555 if (!BP_NOMCP(bp)) {
7556 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7558 BNX2X_ERR("MCP response failure, aborting\n");
7562 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7563 rc = -EBUSY; /* other port in diagnostic mode */
7568 int port = BP_PORT(bp);
7570 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7571 load_count[0], load_count[1], load_count[2]);
7573 load_count[1 + port]++;
7574 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7575 load_count[0], load_count[1], load_count[2]);
7576 if (load_count[0] == 1)
7577 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7578 else if (load_count[1 + port] == 1)
7579 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7581 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7584 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7585 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7589 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7592 rc = bnx2x_init_hw(bp, load_code);
7594 BNX2X_ERR("HW init failed, aborting\n");
7598 /* Setup NIC internals and enable interrupts */
7599 bnx2x_nic_init(bp, load_code);
7601 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7602 (bp->common.shmem2_base))
7603 SHMEM2_WR(bp, dcc_support,
7604 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7605 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7607 /* Send LOAD_DONE command to MCP */
7608 if (!BP_NOMCP(bp)) {
7609 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7611 BNX2X_ERR("MCP response failure, aborting\n");
7617 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7619 rc = bnx2x_setup_leading(bp);
7621 BNX2X_ERR("Setup leading failed!\n");
7622 #ifndef BNX2X_STOP_ON_ERROR
7630 if (CHIP_IS_E1H(bp))
7631 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7632 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7633 bp->state = BNX2X_STATE_DISABLED;
7636 if (bp->state == BNX2X_STATE_OPEN) {
7638 /* Enable Timer scan */
7639 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7641 for_each_nondefault_queue(bp, i) {
7642 rc = bnx2x_setup_multi(bp, i);
7652 bnx2x_set_eth_mac_addr_e1(bp, 1);
7654 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7656 /* Set iSCSI L2 MAC */
7657 mutex_lock(&bp->cnic_mutex);
7658 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7659 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7660 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7662 mutex_unlock(&bp->cnic_mutex);
7667 bnx2x_initial_phy_init(bp, load_mode);
7669 /* Start fast path */
7670 switch (load_mode) {
7672 if (bp->state == BNX2X_STATE_OPEN) {
7673 /* Tx queue should be only reenabled */
7674 netif_tx_wake_all_queues(bp->dev);
7676 /* Initialize the receive filter. */
7677 bnx2x_set_rx_mode(bp->dev);
7681 netif_tx_start_all_queues(bp->dev);
7682 if (bp->state != BNX2X_STATE_OPEN)
7683 netif_tx_disable(bp->dev);
7684 /* Initialize the receive filter. */
7685 bnx2x_set_rx_mode(bp->dev);
7689 /* Initialize the receive filter. */
7690 bnx2x_set_rx_mode(bp->dev);
7691 bp->state = BNX2X_STATE_DIAG;
7699 bnx2x__link_status_update(bp);
7701 /* start the timer */
7702 mod_timer(&bp->timer, jiffies + bp->current_interval);
7705 bnx2x_setup_cnic_irq_info(bp);
7706 if (bp->state == BNX2X_STATE_OPEN)
7707 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7714 /* Disable Timer scan */
7715 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7718 bnx2x_int_disable_sync(bp, 1);
7719 if (!BP_NOMCP(bp)) {
7720 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7721 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7724 /* Free SKBs, SGEs, TPA pool and driver internals */
7725 bnx2x_free_skbs(bp);
7726 for_each_rx_queue(bp, i)
7727 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7732 bnx2x_napi_disable(bp);
7733 for_each_rx_queue(bp, i)
7734 netif_napi_del(&bnx2x_fp(bp, i, napi));
7740 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7742 struct bnx2x_fastpath *fp = &bp->fp[index];
7745 /* halt the connection */
7746 fp->state = BNX2X_FP_STATE_HALTING;
7747 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7749 /* Wait for completion */
7750 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7752 if (rc) /* timeout */
7755 /* delete cfc entry */
7756 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7758 /* Wait for completion */
7759 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7764 static int bnx2x_stop_leading(struct bnx2x *bp)
7766 __le16 dsb_sp_prod_idx;
7767 /* if the other port is handling traffic,
7768 this can take a lot of time */
7774 /* Send HALT ramrod */
7775 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7776 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7778 /* Wait for completion */
7779 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7780 &(bp->fp[0].state), 1);
7781 if (rc) /* timeout */
7784 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7786 /* Send PORT_DELETE ramrod */
7787 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7789 /* Wait for completion to arrive on default status block
7790 we are going to reset the chip anyway
7791 so there is not much to do if this times out
7793 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7795 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7796 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7797 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7798 #ifdef BNX2X_STOP_ON_ERROR
7806 rmb(); /* Refresh the dsb_sp_prod */
7808 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7809 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7814 static void bnx2x_reset_func(struct bnx2x *bp)
7816 int port = BP_PORT(bp);
7817 int func = BP_FUNC(bp);
7821 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7822 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7825 /* Disable Timer scan */
7826 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7828 * Wait for at least 10ms and up to 2 second for the timers scan to
7831 for (i = 0; i < 200; i++) {
7833 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7838 base = FUNC_ILT_BASE(func);
7839 for (i = base; i < base + ILT_PER_FUNC; i++)
7840 bnx2x_ilt_wr(bp, i, 0);
7843 static void bnx2x_reset_port(struct bnx2x *bp)
7845 int port = BP_PORT(bp);
7848 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7850 /* Do not rcv packets to BRB */
7851 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7852 /* Do not direct rcv packets that are not for MCP to the BRB */
7853 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7854 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7857 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7860 /* Check for BRB port occupancy */
7861 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7863 DP(NETIF_MSG_IFDOWN,
7864 "BRB1 is not empty %d blocks are occupied\n", val);
7866 /* TODO: Close Doorbell port? */
7869 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7871 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7872 BP_FUNC(bp), reset_code);
7874 switch (reset_code) {
7875 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7876 bnx2x_reset_port(bp);
7877 bnx2x_reset_func(bp);
7878 bnx2x_reset_common(bp);
7881 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7882 bnx2x_reset_port(bp);
7883 bnx2x_reset_func(bp);
7886 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7887 bnx2x_reset_func(bp);
7891 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7896 /* must be called with rtnl_lock */
7897 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7899 int port = BP_PORT(bp);
7904 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7906 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7908 /* Set "drop all" */
7909 bp->rx_mode = BNX2X_RX_MODE_NONE;
7910 bnx2x_set_storm_rx_mode(bp);
7912 /* Disable HW interrupts, NAPI and Tx */
7913 bnx2x_netif_stop(bp, 1);
7915 del_timer_sync(&bp->timer);
7916 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7917 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7918 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7923 /* Wait until tx fastpath tasks complete */
7924 for_each_tx_queue(bp, i) {
7925 struct bnx2x_fastpath *fp = &bp->fp[i];
7928 while (bnx2x_has_tx_work_unload(fp)) {
7932 BNX2X_ERR("timeout waiting for queue[%d]\n",
7934 #ifdef BNX2X_STOP_ON_ERROR
7945 /* Give HW time to discard old tx messages */
7948 if (CHIP_IS_E1(bp)) {
7949 struct mac_configuration_cmd *config =
7950 bnx2x_sp(bp, mcast_config);
7952 bnx2x_set_eth_mac_addr_e1(bp, 0);
7954 for (i = 0; i < config->hdr.length; i++)
7955 CAM_INVALIDATE(config->config_table[i]);
7957 config->hdr.length = i;
7958 if (CHIP_REV_IS_SLOW(bp))
7959 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7961 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7962 config->hdr.client_id = bp->fp->cl_id;
7963 config->hdr.reserved1 = 0;
7965 bp->set_mac_pending++;
7968 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7969 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7970 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7973 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7975 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7977 for (i = 0; i < MC_HASH_SIZE; i++)
7978 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7980 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7983 /* Clear iSCSI L2 MAC */
7984 mutex_lock(&bp->cnic_mutex);
7985 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7986 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7987 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7989 mutex_unlock(&bp->cnic_mutex);
7992 if (unload_mode == UNLOAD_NORMAL)
7993 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7995 else if (bp->flags & NO_WOL_FLAG)
7996 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7999 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8000 u8 *mac_addr = bp->dev->dev_addr;
8002 /* The mac address is written to entries 1-4 to
8003 preserve entry 0 which is used by the PMF */
8004 u8 entry = (BP_E1HVN(bp) + 1)*8;
8006 val = (mac_addr[0] << 8) | mac_addr[1];
8007 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8009 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8010 (mac_addr[4] << 8) | mac_addr[5];
8011 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8013 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8016 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8018 /* Close multi and leading connections
8019 Completions for ramrods are collected in a synchronous way */
8020 for_each_nondefault_queue(bp, i)
8021 if (bnx2x_stop_multi(bp, i))
8024 rc = bnx2x_stop_leading(bp);
8026 BNX2X_ERR("Stop leading failed!\n");
8027 #ifdef BNX2X_STOP_ON_ERROR
8036 reset_code = bnx2x_fw_command(bp, reset_code);
8038 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8039 load_count[0], load_count[1], load_count[2]);
8041 load_count[1 + port]--;
8042 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8043 load_count[0], load_count[1], load_count[2]);
8044 if (load_count[0] == 0)
8045 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8046 else if (load_count[1 + port] == 0)
8047 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8049 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8052 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8053 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8054 bnx2x__link_reset(bp);
8056 /* Reset the chip */
8057 bnx2x_reset_chip(bp, reset_code);
8059 /* Report UNLOAD_DONE to MCP */
8061 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8065 /* Free SKBs, SGEs, TPA pool and driver internals */
8066 bnx2x_free_skbs(bp);
8067 for_each_rx_queue(bp, i)
8068 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8069 for_each_rx_queue(bp, i)
8070 netif_napi_del(&bnx2x_fp(bp, i, napi));
8073 bp->state = BNX2X_STATE_CLOSED;
8075 netif_carrier_off(bp->dev);
8080 static void bnx2x_reset_task(struct work_struct *work)
8082 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8084 #ifdef BNX2X_STOP_ON_ERROR
8085 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8086 " so reset not done to allow debug dump,\n"
8087 " you will need to reboot when done\n");
8093 if (!netif_running(bp->dev))
8094 goto reset_task_exit;
8096 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8097 bnx2x_nic_load(bp, LOAD_NORMAL);
8103 /* end of nic load/unload */
8108 * Init service functions
8111 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8114 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8115 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8116 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8117 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8118 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8119 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8120 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8121 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8123 BNX2X_ERR("Unsupported function index: %d\n", func);
8128 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8130 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8132 /* Flush all outstanding writes */
8135 /* Pretend to be function 0 */
8137 /* Flush the GRC transaction (in the chip) */
8138 new_val = REG_RD(bp, reg);
8140 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8145 /* From now we are in the "like-E1" mode */
8146 bnx2x_int_disable(bp);
8148 /* Flush all outstanding writes */
8151 /* Restore the original funtion settings */
8152 REG_WR(bp, reg, orig_func);
8153 new_val = REG_RD(bp, reg);
8154 if (new_val != orig_func) {
8155 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8156 orig_func, new_val);
8161 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8163 if (CHIP_IS_E1H(bp))
8164 bnx2x_undi_int_disable_e1h(bp, func);
8166 bnx2x_int_disable(bp);
8169 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8173 /* Check if there is any driver already loaded */
8174 val = REG_RD(bp, MISC_REG_UNPREPARED);
8176 /* Check if it is the UNDI driver
8177 * UNDI driver initializes CID offset for normal bell to 0x7
8179 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8180 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8182 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8184 int func = BP_FUNC(bp);
8188 /* clear the UNDI indication */
8189 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8191 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8193 /* try unload UNDI on port 0 */
8196 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8197 DRV_MSG_SEQ_NUMBER_MASK);
8198 reset_code = bnx2x_fw_command(bp, reset_code);
8200 /* if UNDI is loaded on the other port */
8201 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8203 /* send "DONE" for previous unload */
8204 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8206 /* unload UNDI on port 1 */
8209 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8210 DRV_MSG_SEQ_NUMBER_MASK);
8211 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8213 bnx2x_fw_command(bp, reset_code);
8216 /* now it's safe to release the lock */
8217 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8219 bnx2x_undi_int_disable(bp, func);
8221 /* close input traffic and wait for it */
8222 /* Do not rcv packets to BRB */
8224 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8225 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8226 /* Do not direct rcv packets that are not for MCP to
8229 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8230 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8233 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8234 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8237 /* save NIG port swap info */
8238 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8239 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8242 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8245 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8247 /* take the NIG out of reset and restore swap values */
8249 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8250 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8251 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8252 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8254 /* send unload done to the MCP */
8255 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8257 /* restore our func and fw_seq */
8260 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8261 DRV_MSG_SEQ_NUMBER_MASK);
8264 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8268 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8270 u32 val, val2, val3, val4, id;
8273 /* Get the chip revision id and number. */
8274 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8275 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8276 id = ((val & 0xffff) << 16);
8277 val = REG_RD(bp, MISC_REG_CHIP_REV);
8278 id |= ((val & 0xf) << 12);
8279 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8280 id |= ((val & 0xff) << 4);
8281 val = REG_RD(bp, MISC_REG_BOND_ID);
8283 bp->common.chip_id = id;
8284 bp->link_params.chip_id = bp->common.chip_id;
8285 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8287 val = (REG_RD(bp, 0x2874) & 0x55);
8288 if ((bp->common.chip_id & 0x1) ||
8289 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8290 bp->flags |= ONE_PORT_FLAG;
8291 BNX2X_DEV_INFO("single port device\n");
8294 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8295 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8296 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8297 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8298 bp->common.flash_size, bp->common.flash_size);
8300 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8301 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8302 bp->link_params.shmem_base = bp->common.shmem_base;
8303 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8304 bp->common.shmem_base, bp->common.shmem2_base);
8306 if (!bp->common.shmem_base ||
8307 (bp->common.shmem_base < 0xA0000) ||
8308 (bp->common.shmem_base >= 0xC0000)) {
8309 BNX2X_DEV_INFO("MCP not active\n");
8310 bp->flags |= NO_MCP_FLAG;
8314 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8315 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8316 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8317 BNX2X_ERR("BAD MCP validity signature\n");
8319 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8320 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8322 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8323 SHARED_HW_CFG_LED_MODE_MASK) >>
8324 SHARED_HW_CFG_LED_MODE_SHIFT);
8326 bp->link_params.feature_config_flags = 0;
8327 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8328 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8329 bp->link_params.feature_config_flags |=
8330 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8332 bp->link_params.feature_config_flags &=
8333 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8335 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8336 bp->common.bc_ver = val;
8337 BNX2X_DEV_INFO("bc_ver %X\n", val);
8338 if (val < BNX2X_BC_VER) {
8339 /* for now only warn
8340 * later we might need to enforce this */
8341 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8342 " please upgrade BC\n", BNX2X_BC_VER, val);
8344 bp->link_params.feature_config_flags |=
8345 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8346 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8348 if (BP_E1HVN(bp) == 0) {
8349 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8350 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8352 /* no WOL capability for E1HVN != 0 */
8353 bp->flags |= NO_WOL_FLAG;
8355 BNX2X_DEV_INFO("%sWoL capable\n",
8356 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8358 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8359 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8360 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8361 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8363 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8364 val, val2, val3, val4);
8367 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8370 int port = BP_PORT(bp);
8373 switch (switch_cfg) {
8375 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8378 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8379 switch (ext_phy_type) {
8380 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8381 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8384 bp->port.supported |= (SUPPORTED_10baseT_Half |
8385 SUPPORTED_10baseT_Full |
8386 SUPPORTED_100baseT_Half |
8387 SUPPORTED_100baseT_Full |
8388 SUPPORTED_1000baseT_Full |
8389 SUPPORTED_2500baseX_Full |
8394 SUPPORTED_Asym_Pause);
8397 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8398 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8401 bp->port.supported |= (SUPPORTED_10baseT_Half |
8402 SUPPORTED_10baseT_Full |
8403 SUPPORTED_100baseT_Half |
8404 SUPPORTED_100baseT_Full |
8405 SUPPORTED_1000baseT_Full |
8410 SUPPORTED_Asym_Pause);
8414 BNX2X_ERR("NVRAM config error. "
8415 "BAD SerDes ext_phy_config 0x%x\n",
8416 bp->link_params.ext_phy_config);
8420 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8422 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8425 case SWITCH_CFG_10G:
8426 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8429 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8430 switch (ext_phy_type) {
8431 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8432 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8435 bp->port.supported |= (SUPPORTED_10baseT_Half |
8436 SUPPORTED_10baseT_Full |
8437 SUPPORTED_100baseT_Half |
8438 SUPPORTED_100baseT_Full |
8439 SUPPORTED_1000baseT_Full |
8440 SUPPORTED_2500baseX_Full |
8441 SUPPORTED_10000baseT_Full |
8446 SUPPORTED_Asym_Pause);
8449 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8450 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8453 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8454 SUPPORTED_1000baseT_Full |
8458 SUPPORTED_Asym_Pause);
8461 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8462 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8465 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8466 SUPPORTED_2500baseX_Full |
8467 SUPPORTED_1000baseT_Full |
8471 SUPPORTED_Asym_Pause);
8474 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8475 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8478 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8481 SUPPORTED_Asym_Pause);
8484 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8485 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8488 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8489 SUPPORTED_1000baseT_Full |
8492 SUPPORTED_Asym_Pause);
8495 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8496 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8499 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8500 SUPPORTED_1000baseT_Full |
8504 SUPPORTED_Asym_Pause);
8507 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8508 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8511 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8512 SUPPORTED_1000baseT_Full |
8516 SUPPORTED_Asym_Pause);
8519 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8520 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8523 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8527 SUPPORTED_Asym_Pause);
8530 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8531 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8534 bp->port.supported |= (SUPPORTED_10baseT_Half |
8535 SUPPORTED_10baseT_Full |
8536 SUPPORTED_100baseT_Half |
8537 SUPPORTED_100baseT_Full |
8538 SUPPORTED_1000baseT_Full |
8539 SUPPORTED_10000baseT_Full |
8543 SUPPORTED_Asym_Pause);
8546 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8547 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8548 bp->link_params.ext_phy_config);
8552 BNX2X_ERR("NVRAM config error. "
8553 "BAD XGXS ext_phy_config 0x%x\n",
8554 bp->link_params.ext_phy_config);
8558 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8560 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8565 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8566 bp->port.link_config);
8569 bp->link_params.phy_addr = bp->port.phy_addr;
8571 /* mask what we support according to speed_cap_mask */
8572 if (!(bp->link_params.speed_cap_mask &
8573 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8574 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8576 if (!(bp->link_params.speed_cap_mask &
8577 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8578 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8580 if (!(bp->link_params.speed_cap_mask &
8581 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8582 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8584 if (!(bp->link_params.speed_cap_mask &
8585 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8586 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8588 if (!(bp->link_params.speed_cap_mask &
8589 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8590 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8591 SUPPORTED_1000baseT_Full);
8593 if (!(bp->link_params.speed_cap_mask &
8594 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8595 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8597 if (!(bp->link_params.speed_cap_mask &
8598 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8599 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8601 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8604 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8606 bp->link_params.req_duplex = DUPLEX_FULL;
8608 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8609 case PORT_FEATURE_LINK_SPEED_AUTO:
8610 if (bp->port.supported & SUPPORTED_Autoneg) {
8611 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8612 bp->port.advertising = bp->port.supported;
8615 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8617 if ((ext_phy_type ==
8618 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8620 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8621 /* force 10G, no AN */
8622 bp->link_params.req_line_speed = SPEED_10000;
8623 bp->port.advertising =
8624 (ADVERTISED_10000baseT_Full |
8628 BNX2X_ERR("NVRAM config error. "
8629 "Invalid link_config 0x%x"
8630 " Autoneg not supported\n",
8631 bp->port.link_config);
8636 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8637 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8638 bp->link_params.req_line_speed = SPEED_10;
8639 bp->port.advertising = (ADVERTISED_10baseT_Full |
8642 BNX2X_ERR("NVRAM config error. "
8643 "Invalid link_config 0x%x"
8644 " speed_cap_mask 0x%x\n",
8645 bp->port.link_config,
8646 bp->link_params.speed_cap_mask);
8651 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8652 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8653 bp->link_params.req_line_speed = SPEED_10;
8654 bp->link_params.req_duplex = DUPLEX_HALF;
8655 bp->port.advertising = (ADVERTISED_10baseT_Half |
8658 BNX2X_ERR("NVRAM config error. "
8659 "Invalid link_config 0x%x"
8660 " speed_cap_mask 0x%x\n",
8661 bp->port.link_config,
8662 bp->link_params.speed_cap_mask);
8667 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8668 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8669 bp->link_params.req_line_speed = SPEED_100;
8670 bp->port.advertising = (ADVERTISED_100baseT_Full |
8673 BNX2X_ERR("NVRAM config error. "
8674 "Invalid link_config 0x%x"
8675 " speed_cap_mask 0x%x\n",
8676 bp->port.link_config,
8677 bp->link_params.speed_cap_mask);
8682 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8683 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8684 bp->link_params.req_line_speed = SPEED_100;
8685 bp->link_params.req_duplex = DUPLEX_HALF;
8686 bp->port.advertising = (ADVERTISED_100baseT_Half |
8689 BNX2X_ERR("NVRAM config error. "
8690 "Invalid link_config 0x%x"
8691 " speed_cap_mask 0x%x\n",
8692 bp->port.link_config,
8693 bp->link_params.speed_cap_mask);
8698 case PORT_FEATURE_LINK_SPEED_1G:
8699 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8700 bp->link_params.req_line_speed = SPEED_1000;
8701 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8704 BNX2X_ERR("NVRAM config error. "
8705 "Invalid link_config 0x%x"
8706 " speed_cap_mask 0x%x\n",
8707 bp->port.link_config,
8708 bp->link_params.speed_cap_mask);
8713 case PORT_FEATURE_LINK_SPEED_2_5G:
8714 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8715 bp->link_params.req_line_speed = SPEED_2500;
8716 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8719 BNX2X_ERR("NVRAM config error. "
8720 "Invalid link_config 0x%x"
8721 " speed_cap_mask 0x%x\n",
8722 bp->port.link_config,
8723 bp->link_params.speed_cap_mask);
8728 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8729 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8730 case PORT_FEATURE_LINK_SPEED_10G_KR:
8731 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8732 bp->link_params.req_line_speed = SPEED_10000;
8733 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8736 BNX2X_ERR("NVRAM config error. "
8737 "Invalid link_config 0x%x"
8738 " speed_cap_mask 0x%x\n",
8739 bp->port.link_config,
8740 bp->link_params.speed_cap_mask);
8746 BNX2X_ERR("NVRAM config error. "
8747 "BAD link speed link_config 0x%x\n",
8748 bp->port.link_config);
8749 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8750 bp->port.advertising = bp->port.supported;
8754 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8755 PORT_FEATURE_FLOW_CONTROL_MASK);
8756 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8757 !(bp->port.supported & SUPPORTED_Autoneg))
8758 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8760 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8761 " advertising 0x%x\n",
8762 bp->link_params.req_line_speed,
8763 bp->link_params.req_duplex,
8764 bp->link_params.req_flow_ctrl, bp->port.advertising);
8767 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8769 mac_hi = cpu_to_be16(mac_hi);
8770 mac_lo = cpu_to_be32(mac_lo);
8771 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8772 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8775 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8777 int port = BP_PORT(bp);
8783 bp->link_params.bp = bp;
8784 bp->link_params.port = port;
8786 bp->link_params.lane_config =
8787 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8788 bp->link_params.ext_phy_config =
8790 dev_info.port_hw_config[port].external_phy_config);
8791 /* BCM8727_NOC => BCM8727 no over current */
8792 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8793 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8794 bp->link_params.ext_phy_config &=
8795 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8796 bp->link_params.ext_phy_config |=
8797 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8798 bp->link_params.feature_config_flags |=
8799 FEATURE_CONFIG_BCM8727_NOC;
8802 bp->link_params.speed_cap_mask =
8804 dev_info.port_hw_config[port].speed_capability_mask);
8806 bp->port.link_config =
8807 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8809 /* Get the 4 lanes xgxs config rx and tx */
8810 for (i = 0; i < 2; i++) {
8812 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8813 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8814 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8817 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8818 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8819 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8822 /* If the device is capable of WoL, set the default state according
8825 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8826 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8827 (config & PORT_FEATURE_WOL_ENABLED));
8829 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8830 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8831 bp->link_params.lane_config,
8832 bp->link_params.ext_phy_config,
8833 bp->link_params.speed_cap_mask, bp->port.link_config);
8835 bp->link_params.switch_cfg |= (bp->port.link_config &
8836 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8837 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8839 bnx2x_link_settings_requested(bp);
8842 * If connected directly, work with the internal PHY, otherwise, work
8843 * with the external PHY
8845 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8846 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8847 bp->mdio.prtad = bp->link_params.phy_addr;
8849 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8850 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8852 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8854 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8855 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8856 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8857 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8858 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8861 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8862 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8863 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8867 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8869 int func = BP_FUNC(bp);
8873 bnx2x_get_common_hwinfo(bp);
8877 if (CHIP_IS_E1H(bp)) {
8879 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8881 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8882 FUNC_MF_CFG_E1HOV_TAG_MASK);
8883 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8885 BNX2X_DEV_INFO("%s function mode\n",
8886 IS_E1HMF(bp) ? "multi" : "single");
8889 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8891 FUNC_MF_CFG_E1HOV_TAG_MASK);
8892 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8894 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8896 func, bp->e1hov, bp->e1hov);
8898 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8899 " aborting\n", func);
8904 BNX2X_ERR("!!! VN %d in single function mode,"
8905 " aborting\n", BP_E1HVN(bp));
8911 if (!BP_NOMCP(bp)) {
8912 bnx2x_get_port_hwinfo(bp);
8914 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8915 DRV_MSG_SEQ_NUMBER_MASK);
8916 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8920 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8921 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8922 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8923 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8924 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8925 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8926 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8927 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8928 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8929 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8930 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8932 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8940 /* only supposed to happen on emulation/FPGA */
8941 BNX2X_ERR("warning random MAC workaround active\n");
8942 random_ether_addr(bp->dev->dev_addr);
8943 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8949 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8951 int func = BP_FUNC(bp);
8955 /* Disable interrupt handling until HW is initialized */
8956 atomic_set(&bp->intr_sem, 1);
8957 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8959 mutex_init(&bp->port.phy_mutex);
8961 mutex_init(&bp->cnic_mutex);
8964 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8965 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8967 rc = bnx2x_get_hwinfo(bp);
8969 /* need to reset chip if undi was active */
8971 bnx2x_undi_unload(bp);
8973 if (CHIP_REV_IS_FPGA(bp))
8974 printk(KERN_ERR PFX "FPGA detected\n");
8976 if (BP_NOMCP(bp) && (func == 0))
8978 "MCP disabled, must load devices in order!\n");
8980 /* Set multi queue mode */
8981 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8982 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8984 "Multi disabled since int_mode requested is not MSI-X\n");
8985 multi_mode = ETH_RSS_MODE_DISABLED;
8987 bp->multi_mode = multi_mode;
8992 bp->flags &= ~TPA_ENABLE_FLAG;
8993 bp->dev->features &= ~NETIF_F_LRO;
8995 bp->flags |= TPA_ENABLE_FLAG;
8996 bp->dev->features |= NETIF_F_LRO;
9000 bp->dropless_fc = 0;
9002 bp->dropless_fc = dropless_fc;
9006 bp->tx_ring_size = MAX_TX_AVAIL;
9007 bp->rx_ring_size = MAX_RX_AVAIL;
9014 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9015 bp->current_interval = (poll ? poll : timer_interval);
9017 init_timer(&bp->timer);
9018 bp->timer.expires = jiffies + bp->current_interval;
9019 bp->timer.data = (unsigned long) bp;
9020 bp->timer.function = bnx2x_timer;
9026 * ethtool service functions
9029 /* All ethtool functions called with rtnl_lock */
9031 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9033 struct bnx2x *bp = netdev_priv(dev);
9035 cmd->supported = bp->port.supported;
9036 cmd->advertising = bp->port.advertising;
9038 if (netif_carrier_ok(dev)) {
9039 cmd->speed = bp->link_vars.line_speed;
9040 cmd->duplex = bp->link_vars.duplex;
9042 cmd->speed = bp->link_params.req_line_speed;
9043 cmd->duplex = bp->link_params.req_duplex;
9048 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9049 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9050 if (vn_max_rate < cmd->speed)
9051 cmd->speed = vn_max_rate;
9054 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9056 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9058 switch (ext_phy_type) {
9059 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9060 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9061 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9062 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9063 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9064 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9065 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9066 cmd->port = PORT_FIBRE;
9069 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9071 cmd->port = PORT_TP;
9074 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9075 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9076 bp->link_params.ext_phy_config);
9080 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9081 bp->link_params.ext_phy_config);
9085 cmd->port = PORT_TP;
9087 cmd->phy_address = bp->mdio.prtad;
9088 cmd->transceiver = XCVR_INTERNAL;
9090 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9091 cmd->autoneg = AUTONEG_ENABLE;
9093 cmd->autoneg = AUTONEG_DISABLE;
9098 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9099 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9100 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9101 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9102 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9103 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9104 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9109 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9111 struct bnx2x *bp = netdev_priv(dev);
9117 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9118 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9119 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9120 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9121 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9122 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9123 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9125 if (cmd->autoneg == AUTONEG_ENABLE) {
9126 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9127 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9131 /* advertise the requested speed and duplex if supported */
9132 cmd->advertising &= bp->port.supported;
9134 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9135 bp->link_params.req_duplex = DUPLEX_FULL;
9136 bp->port.advertising |= (ADVERTISED_Autoneg |
9139 } else { /* forced speed */
9140 /* advertise the requested speed and duplex if supported */
9141 switch (cmd->speed) {
9143 if (cmd->duplex == DUPLEX_FULL) {
9144 if (!(bp->port.supported &
9145 SUPPORTED_10baseT_Full)) {
9147 "10M full not supported\n");
9151 advertising = (ADVERTISED_10baseT_Full |
9154 if (!(bp->port.supported &
9155 SUPPORTED_10baseT_Half)) {
9157 "10M half not supported\n");
9161 advertising = (ADVERTISED_10baseT_Half |
9167 if (cmd->duplex == DUPLEX_FULL) {
9168 if (!(bp->port.supported &
9169 SUPPORTED_100baseT_Full)) {
9171 "100M full not supported\n");
9175 advertising = (ADVERTISED_100baseT_Full |
9178 if (!(bp->port.supported &
9179 SUPPORTED_100baseT_Half)) {
9181 "100M half not supported\n");
9185 advertising = (ADVERTISED_100baseT_Half |
9191 if (cmd->duplex != DUPLEX_FULL) {
9192 DP(NETIF_MSG_LINK, "1G half not supported\n");
9196 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9197 DP(NETIF_MSG_LINK, "1G full not supported\n");
9201 advertising = (ADVERTISED_1000baseT_Full |
9206 if (cmd->duplex != DUPLEX_FULL) {
9208 "2.5G half not supported\n");
9212 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9214 "2.5G full not supported\n");
9218 advertising = (ADVERTISED_2500baseX_Full |
9223 if (cmd->duplex != DUPLEX_FULL) {
9224 DP(NETIF_MSG_LINK, "10G half not supported\n");
9228 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9229 DP(NETIF_MSG_LINK, "10G full not supported\n");
9233 advertising = (ADVERTISED_10000baseT_Full |
9238 DP(NETIF_MSG_LINK, "Unsupported speed\n");
9242 bp->link_params.req_line_speed = cmd->speed;
9243 bp->link_params.req_duplex = cmd->duplex;
9244 bp->port.advertising = advertising;
9247 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9248 DP_LEVEL " req_duplex %d advertising 0x%x\n",
9249 bp->link_params.req_line_speed, bp->link_params.req_duplex,
9250 bp->port.advertising);
9252 if (netif_running(dev)) {
9253 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9260 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9261 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9263 static int bnx2x_get_regs_len(struct net_device *dev)
9265 struct bnx2x *bp = netdev_priv(dev);
9266 int regdump_len = 0;
9269 if (CHIP_IS_E1(bp)) {
9270 for (i = 0; i < REGS_COUNT; i++)
9271 if (IS_E1_ONLINE(reg_addrs[i].info))
9272 regdump_len += reg_addrs[i].size;
9274 for (i = 0; i < WREGS_COUNT_E1; i++)
9275 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9276 regdump_len += wreg_addrs_e1[i].size *
9277 (1 + wreg_addrs_e1[i].read_regs_count);
9280 for (i = 0; i < REGS_COUNT; i++)
9281 if (IS_E1H_ONLINE(reg_addrs[i].info))
9282 regdump_len += reg_addrs[i].size;
9284 for (i = 0; i < WREGS_COUNT_E1H; i++)
9285 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9286 regdump_len += wreg_addrs_e1h[i].size *
9287 (1 + wreg_addrs_e1h[i].read_regs_count);
9290 regdump_len += sizeof(struct dump_hdr);
9295 static void bnx2x_get_regs(struct net_device *dev,
9296 struct ethtool_regs *regs, void *_p)
9299 struct bnx2x *bp = netdev_priv(dev);
9300 struct dump_hdr dump_hdr = {0};
9303 memset(p, 0, regs->len);
9305 if (!netif_running(bp->dev))
9308 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9309 dump_hdr.dump_sign = dump_sign_all;
9310 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9311 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9312 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9313 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9314 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9316 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9317 p += dump_hdr.hdr_size + 1;
9319 if (CHIP_IS_E1(bp)) {
9320 for (i = 0; i < REGS_COUNT; i++)
9321 if (IS_E1_ONLINE(reg_addrs[i].info))
9322 for (j = 0; j < reg_addrs[i].size; j++)
9324 reg_addrs[i].addr + j*4);
9327 for (i = 0; i < REGS_COUNT; i++)
9328 if (IS_E1H_ONLINE(reg_addrs[i].info))
9329 for (j = 0; j < reg_addrs[i].size; j++)
9331 reg_addrs[i].addr + j*4);
9335 #define PHY_FW_VER_LEN 10
9337 static void bnx2x_get_drvinfo(struct net_device *dev,
9338 struct ethtool_drvinfo *info)
9340 struct bnx2x *bp = netdev_priv(dev);
9341 u8 phy_fw_ver[PHY_FW_VER_LEN];
9343 strcpy(info->driver, DRV_MODULE_NAME);
9344 strcpy(info->version, DRV_MODULE_VERSION);
9346 phy_fw_ver[0] = '\0';
9348 bnx2x_acquire_phy_lock(bp);
9349 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9350 (bp->state != BNX2X_STATE_CLOSED),
9351 phy_fw_ver, PHY_FW_VER_LEN);
9352 bnx2x_release_phy_lock(bp);
9355 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9356 (bp->common.bc_ver & 0xff0000) >> 16,
9357 (bp->common.bc_ver & 0xff00) >> 8,
9358 (bp->common.bc_ver & 0xff),
9359 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9360 strcpy(info->bus_info, pci_name(bp->pdev));
9361 info->n_stats = BNX2X_NUM_STATS;
9362 info->testinfo_len = BNX2X_NUM_TESTS;
9363 info->eedump_len = bp->common.flash_size;
9364 info->regdump_len = bnx2x_get_regs_len(dev);
9367 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9369 struct bnx2x *bp = netdev_priv(dev);
9371 if (bp->flags & NO_WOL_FLAG) {
9375 wol->supported = WAKE_MAGIC;
9377 wol->wolopts = WAKE_MAGIC;
9381 memset(&wol->sopass, 0, sizeof(wol->sopass));
9384 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9386 struct bnx2x *bp = netdev_priv(dev);
9388 if (wol->wolopts & ~WAKE_MAGIC)
9391 if (wol->wolopts & WAKE_MAGIC) {
9392 if (bp->flags & NO_WOL_FLAG)
9402 static u32 bnx2x_get_msglevel(struct net_device *dev)
9404 struct bnx2x *bp = netdev_priv(dev);
9406 return bp->msglevel;
9409 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9411 struct bnx2x *bp = netdev_priv(dev);
9413 if (capable(CAP_NET_ADMIN))
9414 bp->msglevel = level;
9417 static int bnx2x_nway_reset(struct net_device *dev)
9419 struct bnx2x *bp = netdev_priv(dev);
9424 if (netif_running(dev)) {
9425 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9432 static u32 bnx2x_get_link(struct net_device *dev)
9434 struct bnx2x *bp = netdev_priv(dev);
9436 return bp->link_vars.link_up;
9439 static int bnx2x_get_eeprom_len(struct net_device *dev)
9441 struct bnx2x *bp = netdev_priv(dev);
9443 return bp->common.flash_size;
9446 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9448 int port = BP_PORT(bp);
9452 /* adjust timeout for emulation/FPGA */
9453 count = NVRAM_TIMEOUT_COUNT;
9454 if (CHIP_REV_IS_SLOW(bp))
9457 /* request access to nvram interface */
9458 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9459 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9461 for (i = 0; i < count*10; i++) {
9462 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9463 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9469 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9470 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9477 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9479 int port = BP_PORT(bp);
9483 /* adjust timeout for emulation/FPGA */
9484 count = NVRAM_TIMEOUT_COUNT;
9485 if (CHIP_REV_IS_SLOW(bp))
9488 /* relinquish nvram interface */
9489 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9490 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9492 for (i = 0; i < count*10; i++) {
9493 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9494 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9500 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9501 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9508 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9512 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9514 /* enable both bits, even on read */
9515 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9516 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9517 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9520 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9524 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9526 /* disable both bits, even after read */
9527 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9528 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9529 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9532 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9538 /* build the command word */
9539 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9541 /* need to clear DONE bit separately */
9542 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9544 /* address of the NVRAM to read from */
9545 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9546 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9548 /* issue a read command */
9549 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9551 /* adjust timeout for emulation/FPGA */
9552 count = NVRAM_TIMEOUT_COUNT;
9553 if (CHIP_REV_IS_SLOW(bp))
9556 /* wait for completion */
9559 for (i = 0; i < count; i++) {
9561 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9563 if (val & MCPR_NVM_COMMAND_DONE) {
9564 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9565 /* we read nvram data in cpu order
9566 * but ethtool sees it as an array of bytes
9567 * converting to big-endian will do the work */
9568 *ret_val = cpu_to_be32(val);
9577 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9584 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9586 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9591 if (offset + buf_size > bp->common.flash_size) {
9592 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9593 " buf_size (0x%x) > flash_size (0x%x)\n",
9594 offset, buf_size, bp->common.flash_size);
9598 /* request access to nvram interface */
9599 rc = bnx2x_acquire_nvram_lock(bp);
9603 /* enable access to nvram interface */
9604 bnx2x_enable_nvram_access(bp);
9606 /* read the first word(s) */
9607 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9608 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9609 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9610 memcpy(ret_buf, &val, 4);
9612 /* advance to the next dword */
9613 offset += sizeof(u32);
9614 ret_buf += sizeof(u32);
9615 buf_size -= sizeof(u32);
9620 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9621 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9622 memcpy(ret_buf, &val, 4);
9625 /* disable access to nvram interface */
9626 bnx2x_disable_nvram_access(bp);
9627 bnx2x_release_nvram_lock(bp);
9632 static int bnx2x_get_eeprom(struct net_device *dev,
9633 struct ethtool_eeprom *eeprom, u8 *eebuf)
9635 struct bnx2x *bp = netdev_priv(dev);
9638 if (!netif_running(dev))
9641 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9642 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9643 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9644 eeprom->len, eeprom->len);
9646 /* parameters already validated in ethtool_get_eeprom */
9648 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9653 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9658 /* build the command word */
9659 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9661 /* need to clear DONE bit separately */
9662 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9664 /* write the data */
9665 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9667 /* address of the NVRAM to write to */
9668 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9669 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9671 /* issue the write command */
9672 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9674 /* adjust timeout for emulation/FPGA */
9675 count = NVRAM_TIMEOUT_COUNT;
9676 if (CHIP_REV_IS_SLOW(bp))
9679 /* wait for completion */
9681 for (i = 0; i < count; i++) {
9683 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9684 if (val & MCPR_NVM_COMMAND_DONE) {
9693 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9695 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9703 if (offset + buf_size > bp->common.flash_size) {
9704 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9705 " buf_size (0x%x) > flash_size (0x%x)\n",
9706 offset, buf_size, bp->common.flash_size);
9710 /* request access to nvram interface */
9711 rc = bnx2x_acquire_nvram_lock(bp);
9715 /* enable access to nvram interface */
9716 bnx2x_enable_nvram_access(bp);
9718 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9719 align_offset = (offset & ~0x03);
9720 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9723 val &= ~(0xff << BYTE_OFFSET(offset));
9724 val |= (*data_buf << BYTE_OFFSET(offset));
9726 /* nvram data is returned as an array of bytes
9727 * convert it back to cpu order */
9728 val = be32_to_cpu(val);
9730 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9734 /* disable access to nvram interface */
9735 bnx2x_disable_nvram_access(bp);
9736 bnx2x_release_nvram_lock(bp);
9741 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9749 if (buf_size == 1) /* ethtool */
9750 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9752 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9754 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9759 if (offset + buf_size > bp->common.flash_size) {
9760 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9761 " buf_size (0x%x) > flash_size (0x%x)\n",
9762 offset, buf_size, bp->common.flash_size);
9766 /* request access to nvram interface */
9767 rc = bnx2x_acquire_nvram_lock(bp);
9771 /* enable access to nvram interface */
9772 bnx2x_enable_nvram_access(bp);
9775 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9776 while ((written_so_far < buf_size) && (rc == 0)) {
9777 if (written_so_far == (buf_size - sizeof(u32)))
9778 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9779 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9780 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9781 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9782 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9784 memcpy(&val, data_buf, 4);
9786 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9788 /* advance to the next dword */
9789 offset += sizeof(u32);
9790 data_buf += sizeof(u32);
9791 written_so_far += sizeof(u32);
9795 /* disable access to nvram interface */
9796 bnx2x_disable_nvram_access(bp);
9797 bnx2x_release_nvram_lock(bp);
9802 static int bnx2x_set_eeprom(struct net_device *dev,
9803 struct ethtool_eeprom *eeprom, u8 *eebuf)
9805 struct bnx2x *bp = netdev_priv(dev);
9806 int port = BP_PORT(bp);
9809 if (!netif_running(dev))
9812 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9813 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9814 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9815 eeprom->len, eeprom->len);
9817 /* parameters already validated in ethtool_set_eeprom */
9819 /* PHY eeprom can be accessed only by the PMF */
9820 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9824 if (eeprom->magic == 0x50485950) {
9825 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9826 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9828 bnx2x_acquire_phy_lock(bp);
9829 rc |= bnx2x_link_reset(&bp->link_params,
9831 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9832 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9833 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9834 MISC_REGISTERS_GPIO_HIGH, port);
9835 bnx2x_release_phy_lock(bp);
9836 bnx2x_link_report(bp);
9838 } else if (eeprom->magic == 0x50485952) {
9839 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9840 if ((bp->state == BNX2X_STATE_OPEN) ||
9841 (bp->state == BNX2X_STATE_DISABLED)) {
9842 bnx2x_acquire_phy_lock(bp);
9843 rc |= bnx2x_link_reset(&bp->link_params,
9846 rc |= bnx2x_phy_init(&bp->link_params,
9848 bnx2x_release_phy_lock(bp);
9849 bnx2x_calc_fc_adv(bp);
9851 } else if (eeprom->magic == 0x53985943) {
9852 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9853 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9854 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9856 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9858 /* DSP Remove Download Mode */
9859 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9860 MISC_REGISTERS_GPIO_LOW, port);
9862 bnx2x_acquire_phy_lock(bp);
9864 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9866 /* wait 0.5 sec to allow it to run */
9868 bnx2x_ext_phy_hw_reset(bp, port);
9870 bnx2x_release_phy_lock(bp);
9873 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9878 static int bnx2x_get_coalesce(struct net_device *dev,
9879 struct ethtool_coalesce *coal)
9881 struct bnx2x *bp = netdev_priv(dev);
9883 memset(coal, 0, sizeof(struct ethtool_coalesce));
9885 coal->rx_coalesce_usecs = bp->rx_ticks;
9886 coal->tx_coalesce_usecs = bp->tx_ticks;
9891 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9892 static int bnx2x_set_coalesce(struct net_device *dev,
9893 struct ethtool_coalesce *coal)
9895 struct bnx2x *bp = netdev_priv(dev);
9897 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9898 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9899 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9901 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9902 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9903 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9905 if (netif_running(dev))
9906 bnx2x_update_coalesce(bp);
9911 static void bnx2x_get_ringparam(struct net_device *dev,
9912 struct ethtool_ringparam *ering)
9914 struct bnx2x *bp = netdev_priv(dev);
9916 ering->rx_max_pending = MAX_RX_AVAIL;
9917 ering->rx_mini_max_pending = 0;
9918 ering->rx_jumbo_max_pending = 0;
9920 ering->rx_pending = bp->rx_ring_size;
9921 ering->rx_mini_pending = 0;
9922 ering->rx_jumbo_pending = 0;
9924 ering->tx_max_pending = MAX_TX_AVAIL;
9925 ering->tx_pending = bp->tx_ring_size;
9928 static int bnx2x_set_ringparam(struct net_device *dev,
9929 struct ethtool_ringparam *ering)
9931 struct bnx2x *bp = netdev_priv(dev);
9934 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9935 (ering->tx_pending > MAX_TX_AVAIL) ||
9936 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9939 bp->rx_ring_size = ering->rx_pending;
9940 bp->tx_ring_size = ering->tx_pending;
9942 if (netif_running(dev)) {
9943 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9944 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9950 static void bnx2x_get_pauseparam(struct net_device *dev,
9951 struct ethtool_pauseparam *epause)
9953 struct bnx2x *bp = netdev_priv(dev);
9955 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9956 BNX2X_FLOW_CTRL_AUTO) &&
9957 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9959 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9960 BNX2X_FLOW_CTRL_RX);
9961 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9962 BNX2X_FLOW_CTRL_TX);
9964 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9965 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9966 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9969 static int bnx2x_set_pauseparam(struct net_device *dev,
9970 struct ethtool_pauseparam *epause)
9972 struct bnx2x *bp = netdev_priv(dev);
9977 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9978 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9979 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9981 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9983 if (epause->rx_pause)
9984 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9986 if (epause->tx_pause)
9987 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9989 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9990 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9992 if (epause->autoneg) {
9993 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9994 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9998 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9999 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10003 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10005 if (netif_running(dev)) {
10006 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10007 bnx2x_link_set(bp);
10013 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10015 struct bnx2x *bp = netdev_priv(dev);
10019 /* TPA requires Rx CSUM offloading */
10020 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10021 if (!(dev->features & NETIF_F_LRO)) {
10022 dev->features |= NETIF_F_LRO;
10023 bp->flags |= TPA_ENABLE_FLAG;
10027 } else if (dev->features & NETIF_F_LRO) {
10028 dev->features &= ~NETIF_F_LRO;
10029 bp->flags &= ~TPA_ENABLE_FLAG;
10033 if (changed && netif_running(dev)) {
10034 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10035 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10041 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10043 struct bnx2x *bp = netdev_priv(dev);
10045 return bp->rx_csum;
10048 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10050 struct bnx2x *bp = netdev_priv(dev);
10053 bp->rx_csum = data;
10055 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10056 TPA'ed packets will be discarded due to wrong TCP CSUM */
10058 u32 flags = ethtool_op_get_flags(dev);
10060 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10066 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10069 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10070 dev->features |= NETIF_F_TSO6;
10072 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10073 dev->features &= ~NETIF_F_TSO6;
10079 static const struct {
10080 char string[ETH_GSTRING_LEN];
10081 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10082 { "register_test (offline)" },
10083 { "memory_test (offline)" },
10084 { "loopback_test (offline)" },
10085 { "nvram_test (online)" },
10086 { "interrupt_test (online)" },
10087 { "link_test (online)" },
10088 { "idle check (online)" }
10091 static int bnx2x_test_registers(struct bnx2x *bp)
10093 int idx, i, rc = -ENODEV;
10095 int port = BP_PORT(bp);
10096 static const struct {
10101 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10102 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10103 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10104 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10105 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10106 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10107 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10108 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10109 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10110 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10111 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10112 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10113 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10114 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10115 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10116 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10117 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10118 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
10119 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
10120 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10121 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
10122 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10123 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10124 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10125 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10126 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10127 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10128 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10129 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
10130 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10131 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
10132 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10133 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10134 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10135 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10136 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10137 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10139 { 0xffffffff, 0, 0x00000000 }
10142 if (!netif_running(bp->dev))
10145 /* Repeat the test twice:
10146 First by writing 0x00000000, second by writing 0xffffffff */
10147 for (idx = 0; idx < 2; idx++) {
10154 wr_val = 0xffffffff;
10158 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10159 u32 offset, mask, save_val, val;
10161 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10162 mask = reg_tbl[i].mask;
10164 save_val = REG_RD(bp, offset);
10166 REG_WR(bp, offset, wr_val);
10167 val = REG_RD(bp, offset);
10169 /* Restore the original register's value */
10170 REG_WR(bp, offset, save_val);
10172 /* verify that value is as expected value */
10173 if ((val & mask) != (wr_val & mask))
10174 goto test_reg_exit;
10184 static int bnx2x_test_memory(struct bnx2x *bp)
10186 int i, j, rc = -ENODEV;
10188 static const struct {
10192 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10193 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10194 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10195 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10196 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10197 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10198 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10202 static const struct {
10208 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10209 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10210 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10211 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10212 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10213 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10215 { NULL, 0xffffffff, 0, 0 }
10218 if (!netif_running(bp->dev))
10221 /* Go through all the memories */
10222 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10223 for (j = 0; j < mem_tbl[i].size; j++)
10224 REG_RD(bp, mem_tbl[i].offset + j*4);
10226 /* Check the parity status */
10227 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10228 val = REG_RD(bp, prty_tbl[i].offset);
10229 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10230 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10232 "%s is 0x%x\n", prty_tbl[i].name, val);
10233 goto test_mem_exit;
10243 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10248 while (bnx2x_link_test(bp) && cnt--)
10252 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10254 unsigned int pkt_size, num_pkts, i;
10255 struct sk_buff *skb;
10256 unsigned char *packet;
10257 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10258 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10259 u16 tx_start_idx, tx_idx;
10260 u16 rx_start_idx, rx_idx;
10261 u16 pkt_prod, bd_prod;
10262 struct sw_tx_bd *tx_buf;
10263 struct eth_tx_start_bd *tx_start_bd;
10264 struct eth_tx_parse_bd *pbd = NULL;
10265 dma_addr_t mapping;
10266 union eth_rx_cqe *cqe;
10268 struct sw_rx_bd *rx_buf;
10272 /* check the loopback mode */
10273 switch (loopback_mode) {
10274 case BNX2X_PHY_LOOPBACK:
10275 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10278 case BNX2X_MAC_LOOPBACK:
10279 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10280 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10286 /* prepare the loopback packet */
10287 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10288 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10289 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10292 goto test_loopback_exit;
10294 packet = skb_put(skb, pkt_size);
10295 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10296 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10297 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10298 for (i = ETH_HLEN; i < pkt_size; i++)
10299 packet[i] = (unsigned char) (i & 0xff);
10301 /* send the loopback packet */
10303 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10304 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10306 pkt_prod = fp_tx->tx_pkt_prod++;
10307 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10308 tx_buf->first_bd = fp_tx->tx_bd_prod;
10312 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10313 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10314 mapping = pci_map_single(bp->pdev, skb->data,
10315 skb_headlen(skb), PCI_DMA_TODEVICE);
10316 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10317 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10318 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10319 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10320 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10321 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10322 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10323 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10325 /* turn on parsing and get a BD */
10326 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10327 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10329 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10333 fp_tx->tx_db.data.prod += 2;
10335 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10340 fp_tx->tx_bd_prod += 2; /* start + pbd */
10341 bp->dev->trans_start = jiffies;
10345 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10346 if (tx_idx != tx_start_idx + num_pkts)
10347 goto test_loopback_exit;
10349 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10350 if (rx_idx != rx_start_idx + num_pkts)
10351 goto test_loopback_exit;
10353 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10354 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10355 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10356 goto test_loopback_rx_exit;
10358 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10359 if (len != pkt_size)
10360 goto test_loopback_rx_exit;
10362 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10364 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10365 for (i = ETH_HLEN; i < pkt_size; i++)
10366 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10367 goto test_loopback_rx_exit;
10371 test_loopback_rx_exit:
10373 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10374 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10375 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10376 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10378 /* Update producers */
10379 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10380 fp_rx->rx_sge_prod);
10382 test_loopback_exit:
10383 bp->link_params.loopback_mode = LOOPBACK_NONE;
10388 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10392 if (!netif_running(bp->dev))
10393 return BNX2X_LOOPBACK_FAILED;
10395 bnx2x_netif_stop(bp, 1);
10396 bnx2x_acquire_phy_lock(bp);
10398 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10400 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10401 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10404 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10406 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10407 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10410 bnx2x_release_phy_lock(bp);
10411 bnx2x_netif_start(bp);
10416 #define CRC32_RESIDUAL 0xdebb20e3
10418 static int bnx2x_test_nvram(struct bnx2x *bp)
10420 static const struct {
10424 { 0, 0x14 }, /* bootstrap */
10425 { 0x14, 0xec }, /* dir */
10426 { 0x100, 0x350 }, /* manuf_info */
10427 { 0x450, 0xf0 }, /* feature_info */
10428 { 0x640, 0x64 }, /* upgrade_key_info */
10430 { 0x708, 0x70 }, /* manuf_key_info */
10434 __be32 buf[0x350 / 4];
10435 u8 *data = (u8 *)buf;
10439 rc = bnx2x_nvram_read(bp, 0, data, 4);
10441 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10442 goto test_nvram_exit;
10445 magic = be32_to_cpu(buf[0]);
10446 if (magic != 0x669955aa) {
10447 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10449 goto test_nvram_exit;
10452 for (i = 0; nvram_tbl[i].size; i++) {
10454 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10455 nvram_tbl[i].size);
10457 DP(NETIF_MSG_PROBE,
10458 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10459 goto test_nvram_exit;
10462 crc = ether_crc_le(nvram_tbl[i].size, data);
10463 if (crc != CRC32_RESIDUAL) {
10464 DP(NETIF_MSG_PROBE,
10465 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10467 goto test_nvram_exit;
10475 static int bnx2x_test_intr(struct bnx2x *bp)
10477 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10480 if (!netif_running(bp->dev))
10483 config->hdr.length = 0;
10484 if (CHIP_IS_E1(bp))
10485 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10487 config->hdr.offset = BP_FUNC(bp);
10488 config->hdr.client_id = bp->fp->cl_id;
10489 config->hdr.reserved1 = 0;
10491 bp->set_mac_pending++;
10493 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10494 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10495 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10497 for (i = 0; i < 10; i++) {
10498 if (!bp->set_mac_pending)
10501 msleep_interruptible(10);
10510 static void bnx2x_self_test(struct net_device *dev,
10511 struct ethtool_test *etest, u64 *buf)
10513 struct bnx2x *bp = netdev_priv(dev);
10515 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10517 if (!netif_running(dev))
10520 /* offline tests are not supported in MF mode */
10522 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10524 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10525 int port = BP_PORT(bp);
10529 /* save current value of input enable for TX port IF */
10530 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10531 /* disable input for TX port IF */
10532 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10534 link_up = bp->link_vars.link_up;
10535 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10536 bnx2x_nic_load(bp, LOAD_DIAG);
10537 /* wait until link state is restored */
10538 bnx2x_wait_for_link(bp, link_up);
10540 if (bnx2x_test_registers(bp) != 0) {
10542 etest->flags |= ETH_TEST_FL_FAILED;
10544 if (bnx2x_test_memory(bp) != 0) {
10546 etest->flags |= ETH_TEST_FL_FAILED;
10548 buf[2] = bnx2x_test_loopback(bp, link_up);
10550 etest->flags |= ETH_TEST_FL_FAILED;
10552 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10554 /* restore input for TX port IF */
10555 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10557 bnx2x_nic_load(bp, LOAD_NORMAL);
10558 /* wait until link state is restored */
10559 bnx2x_wait_for_link(bp, link_up);
10561 if (bnx2x_test_nvram(bp) != 0) {
10563 etest->flags |= ETH_TEST_FL_FAILED;
10565 if (bnx2x_test_intr(bp) != 0) {
10567 etest->flags |= ETH_TEST_FL_FAILED;
10570 if (bnx2x_link_test(bp) != 0) {
10572 etest->flags |= ETH_TEST_FL_FAILED;
10575 #ifdef BNX2X_EXTRA_DEBUG
10576 bnx2x_panic_dump(bp);
10580 static const struct {
10583 u8 string[ETH_GSTRING_LEN];
10584 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10585 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10586 { Q_STATS_OFFSET32(error_bytes_received_hi),
10587 8, "[%d]: rx_error_bytes" },
10588 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10589 8, "[%d]: rx_ucast_packets" },
10590 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10591 8, "[%d]: rx_mcast_packets" },
10592 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10593 8, "[%d]: rx_bcast_packets" },
10594 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10595 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10596 4, "[%d]: rx_phy_ip_err_discards"},
10597 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10598 4, "[%d]: rx_skb_alloc_discard" },
10599 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10601 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10602 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10603 8, "[%d]: tx_packets" }
10606 static const struct {
10610 #define STATS_FLAGS_PORT 1
10611 #define STATS_FLAGS_FUNC 2
10612 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10613 u8 string[ETH_GSTRING_LEN];
10614 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10615 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10616 8, STATS_FLAGS_BOTH, "rx_bytes" },
10617 { STATS_OFFSET32(error_bytes_received_hi),
10618 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10619 { STATS_OFFSET32(total_unicast_packets_received_hi),
10620 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10621 { STATS_OFFSET32(total_multicast_packets_received_hi),
10622 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10623 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10624 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10625 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10626 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10627 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10628 8, STATS_FLAGS_PORT, "rx_align_errors" },
10629 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10630 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10631 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10632 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10633 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10634 8, STATS_FLAGS_PORT, "rx_fragments" },
10635 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10636 8, STATS_FLAGS_PORT, "rx_jabbers" },
10637 { STATS_OFFSET32(no_buff_discard_hi),
10638 8, STATS_FLAGS_BOTH, "rx_discards" },
10639 { STATS_OFFSET32(mac_filter_discard),
10640 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10641 { STATS_OFFSET32(xxoverflow_discard),
10642 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10643 { STATS_OFFSET32(brb_drop_hi),
10644 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10645 { STATS_OFFSET32(brb_truncate_hi),
10646 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10647 { STATS_OFFSET32(pause_frames_received_hi),
10648 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10649 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10650 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10651 { STATS_OFFSET32(nig_timer_max),
10652 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10653 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10654 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10655 { STATS_OFFSET32(rx_skb_alloc_failed),
10656 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10657 { STATS_OFFSET32(hw_csum_err),
10658 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10660 { STATS_OFFSET32(total_bytes_transmitted_hi),
10661 8, STATS_FLAGS_BOTH, "tx_bytes" },
10662 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10663 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10664 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10665 8, STATS_FLAGS_BOTH, "tx_packets" },
10666 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10667 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10668 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10669 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10670 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10671 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10672 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10673 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10674 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10675 8, STATS_FLAGS_PORT, "tx_deferred" },
10676 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10677 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10678 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10679 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10680 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10681 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10682 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10683 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10684 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10685 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10686 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10687 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10688 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10689 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10690 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10691 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10692 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10693 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10694 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10695 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10696 { STATS_OFFSET32(pause_frames_sent_hi),
10697 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10700 #define IS_PORT_STAT(i) \
10701 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10702 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10703 #define IS_E1HMF_MODE_STAT(bp) \
10704 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10706 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10708 struct bnx2x *bp = netdev_priv(dev);
10711 switch(stringset) {
10713 if (is_multi(bp)) {
10714 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10715 if (!IS_E1HMF_MODE_STAT(bp))
10716 num_stats += BNX2X_NUM_STATS;
10718 if (IS_E1HMF_MODE_STAT(bp)) {
10720 for (i = 0; i < BNX2X_NUM_STATS; i++)
10721 if (IS_FUNC_STAT(i))
10724 num_stats = BNX2X_NUM_STATS;
10729 return BNX2X_NUM_TESTS;
10736 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10738 struct bnx2x *bp = netdev_priv(dev);
10741 switch (stringset) {
10743 if (is_multi(bp)) {
10745 for_each_rx_queue(bp, i) {
10746 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10747 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10748 bnx2x_q_stats_arr[j].string, i);
10749 k += BNX2X_NUM_Q_STATS;
10751 if (IS_E1HMF_MODE_STAT(bp))
10753 for (j = 0; j < BNX2X_NUM_STATS; j++)
10754 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10755 bnx2x_stats_arr[j].string);
10757 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10758 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10760 strcpy(buf + j*ETH_GSTRING_LEN,
10761 bnx2x_stats_arr[i].string);
10768 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10773 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10774 struct ethtool_stats *stats, u64 *buf)
10776 struct bnx2x *bp = netdev_priv(dev);
10777 u32 *hw_stats, *offset;
10780 if (is_multi(bp)) {
10782 for_each_rx_queue(bp, i) {
10783 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10784 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10785 if (bnx2x_q_stats_arr[j].size == 0) {
10786 /* skip this counter */
10790 offset = (hw_stats +
10791 bnx2x_q_stats_arr[j].offset);
10792 if (bnx2x_q_stats_arr[j].size == 4) {
10793 /* 4-byte counter */
10794 buf[k + j] = (u64) *offset;
10797 /* 8-byte counter */
10798 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10800 k += BNX2X_NUM_Q_STATS;
10802 if (IS_E1HMF_MODE_STAT(bp))
10804 hw_stats = (u32 *)&bp->eth_stats;
10805 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10806 if (bnx2x_stats_arr[j].size == 0) {
10807 /* skip this counter */
10811 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10812 if (bnx2x_stats_arr[j].size == 4) {
10813 /* 4-byte counter */
10814 buf[k + j] = (u64) *offset;
10817 /* 8-byte counter */
10818 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10821 hw_stats = (u32 *)&bp->eth_stats;
10822 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10823 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10825 if (bnx2x_stats_arr[i].size == 0) {
10826 /* skip this counter */
10831 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10832 if (bnx2x_stats_arr[i].size == 4) {
10833 /* 4-byte counter */
10834 buf[j] = (u64) *offset;
10838 /* 8-byte counter */
10839 buf[j] = HILO_U64(*offset, *(offset + 1));
10845 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10847 struct bnx2x *bp = netdev_priv(dev);
10848 int port = BP_PORT(bp);
10851 if (!netif_running(dev))
10860 for (i = 0; i < (data * 2); i++) {
10862 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10863 bp->link_params.hw_led_mode,
10864 bp->link_params.chip_id);
10866 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10867 bp->link_params.hw_led_mode,
10868 bp->link_params.chip_id);
10870 msleep_interruptible(500);
10871 if (signal_pending(current))
10875 if (bp->link_vars.link_up)
10876 bnx2x_set_led(bp, port, LED_MODE_OPER,
10877 bp->link_vars.line_speed,
10878 bp->link_params.hw_led_mode,
10879 bp->link_params.chip_id);
10884 static const struct ethtool_ops bnx2x_ethtool_ops = {
10885 .get_settings = bnx2x_get_settings,
10886 .set_settings = bnx2x_set_settings,
10887 .get_drvinfo = bnx2x_get_drvinfo,
10888 .get_regs_len = bnx2x_get_regs_len,
10889 .get_regs = bnx2x_get_regs,
10890 .get_wol = bnx2x_get_wol,
10891 .set_wol = bnx2x_set_wol,
10892 .get_msglevel = bnx2x_get_msglevel,
10893 .set_msglevel = bnx2x_set_msglevel,
10894 .nway_reset = bnx2x_nway_reset,
10895 .get_link = bnx2x_get_link,
10896 .get_eeprom_len = bnx2x_get_eeprom_len,
10897 .get_eeprom = bnx2x_get_eeprom,
10898 .set_eeprom = bnx2x_set_eeprom,
10899 .get_coalesce = bnx2x_get_coalesce,
10900 .set_coalesce = bnx2x_set_coalesce,
10901 .get_ringparam = bnx2x_get_ringparam,
10902 .set_ringparam = bnx2x_set_ringparam,
10903 .get_pauseparam = bnx2x_get_pauseparam,
10904 .set_pauseparam = bnx2x_set_pauseparam,
10905 .get_rx_csum = bnx2x_get_rx_csum,
10906 .set_rx_csum = bnx2x_set_rx_csum,
10907 .get_tx_csum = ethtool_op_get_tx_csum,
10908 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10909 .set_flags = bnx2x_set_flags,
10910 .get_flags = ethtool_op_get_flags,
10911 .get_sg = ethtool_op_get_sg,
10912 .set_sg = ethtool_op_set_sg,
10913 .get_tso = ethtool_op_get_tso,
10914 .set_tso = bnx2x_set_tso,
10915 .self_test = bnx2x_self_test,
10916 .get_sset_count = bnx2x_get_sset_count,
10917 .get_strings = bnx2x_get_strings,
10918 .phys_id = bnx2x_phys_id,
10919 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10922 /* end of ethtool_ops */
10924 /****************************************************************************
10925 * General service functions
10926 ****************************************************************************/
10928 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10932 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10936 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10937 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10938 PCI_PM_CTRL_PME_STATUS));
10940 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10941 /* delay required during transition out of D3hot */
10946 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10950 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10952 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10955 /* No more memory access after this point until
10956 * device is brought back to D0.
10966 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10970 /* Tell compiler that status block fields can change */
10972 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10973 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10975 return (fp->rx_comp_cons != rx_cons_sb);
10979 * net_device service functions
10982 static int bnx2x_poll(struct napi_struct *napi, int budget)
10984 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10986 struct bnx2x *bp = fp->bp;
10989 #ifdef BNX2X_STOP_ON_ERROR
10990 if (unlikely(bp->panic))
10994 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10995 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10997 bnx2x_update_fpsb_idx(fp);
10999 if (bnx2x_has_rx_work(fp)) {
11000 work_done = bnx2x_rx_int(fp, budget);
11002 /* must not complete if we consumed full budget */
11003 if (work_done >= budget)
11007 /* bnx2x_has_rx_work() reads the status block, thus we need to
11008 * ensure that status block indices have been actually read
11009 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
11010 * so that we won't write the "newer" value of the status block to IGU
11011 * (if there was a DMA right after bnx2x_has_rx_work and
11012 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11013 * may be postponed to right before bnx2x_ack_sb). In this case
11014 * there will never be another interrupt until there is another update
11015 * of the status block, while there is still unhandled work.
11019 if (!bnx2x_has_rx_work(fp)) {
11020 #ifdef BNX2X_STOP_ON_ERROR
11023 napi_complete(napi);
11025 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11026 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
11027 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11028 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11036 /* we split the first BD into headers and data BDs
11037 * to ease the pain of our fellow microcode engineers
11038 * we use one mapping for both BDs
11039 * So far this has only been observed to happen
11040 * in Other Operating Systems(TM)
11042 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11043 struct bnx2x_fastpath *fp,
11044 struct sw_tx_bd *tx_buf,
11045 struct eth_tx_start_bd **tx_bd, u16 hlen,
11046 u16 bd_prod, int nbd)
11048 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11049 struct eth_tx_bd *d_tx_bd;
11050 dma_addr_t mapping;
11051 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11053 /* first fix first BD */
11054 h_tx_bd->nbd = cpu_to_le16(nbd);
11055 h_tx_bd->nbytes = cpu_to_le16(hlen);
11057 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11058 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11059 h_tx_bd->addr_lo, h_tx_bd->nbd);
11061 /* now get a new data BD
11062 * (after the pbd) and fill it */
11063 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11064 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11066 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11067 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11069 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11070 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11071 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11073 /* this marks the BD as one that has no individual mapping */
11074 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11076 DP(NETIF_MSG_TX_QUEUED,
11077 "TSO split data size is %d (%x:%x)\n",
11078 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11081 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11086 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11089 csum = (u16) ~csum_fold(csum_sub(csum,
11090 csum_partial(t_header - fix, fix, 0)));
11093 csum = (u16) ~csum_fold(csum_add(csum,
11094 csum_partial(t_header, -fix, 0)));
11096 return swab16(csum);
11099 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11103 if (skb->ip_summed != CHECKSUM_PARTIAL)
11107 if (skb->protocol == htons(ETH_P_IPV6)) {
11109 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11110 rc |= XMIT_CSUM_TCP;
11114 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11115 rc |= XMIT_CSUM_TCP;
11119 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11122 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11128 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11129 /* check if packet requires linearization (packet is too fragmented)
11130 no need to check fragmentation if page size > 8K (there will be no
11131 violation to FW restrictions) */
11132 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11137 int first_bd_sz = 0;
11139 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11140 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11142 if (xmit_type & XMIT_GSO) {
11143 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11144 /* Check if LSO packet needs to be copied:
11145 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11146 int wnd_size = MAX_FETCH_BD - 3;
11147 /* Number of windows to check */
11148 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11153 /* Headers length */
11154 hlen = (int)(skb_transport_header(skb) - skb->data) +
11157 /* Amount of data (w/o headers) on linear part of SKB*/
11158 first_bd_sz = skb_headlen(skb) - hlen;
11160 wnd_sum = first_bd_sz;
11162 /* Calculate the first sum - it's special */
11163 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11165 skb_shinfo(skb)->frags[frag_idx].size;
11167 /* If there was data on linear skb data - check it */
11168 if (first_bd_sz > 0) {
11169 if (unlikely(wnd_sum < lso_mss)) {
11174 wnd_sum -= first_bd_sz;
11177 /* Others are easier: run through the frag list and
11178 check all windows */
11179 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11181 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11183 if (unlikely(wnd_sum < lso_mss)) {
11188 skb_shinfo(skb)->frags[wnd_idx].size;
11191 /* in non-LSO too fragmented packet should always
11198 if (unlikely(to_copy))
11199 DP(NETIF_MSG_TX_QUEUED,
11200 "Linearization IS REQUIRED for %s packet. "
11201 "num_frags %d hlen %d first_bd_sz %d\n",
11202 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11203 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11209 /* called with netif_tx_lock
11210 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11211 * netif_wake_queue()
11213 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11215 struct bnx2x *bp = netdev_priv(dev);
11216 struct bnx2x_fastpath *fp, *fp_stat;
11217 struct netdev_queue *txq;
11218 struct sw_tx_bd *tx_buf;
11219 struct eth_tx_start_bd *tx_start_bd;
11220 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11221 struct eth_tx_parse_bd *pbd = NULL;
11222 u16 pkt_prod, bd_prod;
11224 dma_addr_t mapping;
11225 u32 xmit_type = bnx2x_xmit_type(bp, skb);
11228 __le16 pkt_size = 0;
11230 #ifdef BNX2X_STOP_ON_ERROR
11231 if (unlikely(bp->panic))
11232 return NETDEV_TX_BUSY;
11235 fp_index = skb_get_queue_mapping(skb);
11236 txq = netdev_get_tx_queue(dev, fp_index);
11238 fp = &bp->fp[fp_index + bp->num_rx_queues];
11239 fp_stat = &bp->fp[fp_index];
11241 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11242 fp_stat->eth_q_stats.driver_xoff++;
11243 netif_tx_stop_queue(txq);
11244 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11245 return NETDEV_TX_BUSY;
11248 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11249 " gso type %x xmit_type %x\n",
11250 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11251 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11253 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11254 /* First, check if we need to linearize the skb (due to FW
11255 restrictions). No need to check fragmentation if page size > 8K
11256 (there will be no violation to FW restrictions) */
11257 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11258 /* Statistics of linearization */
11260 if (skb_linearize(skb) != 0) {
11261 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11262 "silently dropping this SKB\n");
11263 dev_kfree_skb_any(skb);
11264 return NETDEV_TX_OK;
11270 Please read carefully. First we use one BD which we mark as start,
11271 then we have a parsing info BD (used for TSO or xsum),
11272 and only then we have the rest of the TSO BDs.
11273 (don't forget to mark the last one as last,
11274 and to unmap only AFTER you write to the BD ...)
11275 And above all, all pdb sizes are in words - NOT DWORDS!
11278 pkt_prod = fp->tx_pkt_prod++;
11279 bd_prod = TX_BD(fp->tx_bd_prod);
11281 /* get a tx_buf and first BD */
11282 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11283 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11285 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11286 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11287 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11289 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11291 /* remember the first BD of the packet */
11292 tx_buf->first_bd = fp->tx_bd_prod;
11296 DP(NETIF_MSG_TX_QUEUED,
11297 "sending pkt %u @%p next_idx %u bd %u @%p\n",
11298 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11301 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11302 (bp->flags & HW_VLAN_TX_FLAG)) {
11303 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11304 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11307 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11309 /* turn on parsing and get a BD */
11310 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11311 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11313 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11315 if (xmit_type & XMIT_CSUM) {
11316 hlen = (skb_network_header(skb) - skb->data) / 2;
11318 /* for now NS flag is not used in Linux */
11320 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11321 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11323 pbd->ip_hlen = (skb_transport_header(skb) -
11324 skb_network_header(skb)) / 2;
11326 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11328 pbd->total_hlen = cpu_to_le16(hlen);
11331 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11333 if (xmit_type & XMIT_CSUM_V4)
11334 tx_start_bd->bd_flags.as_bitfield |=
11335 ETH_TX_BD_FLAGS_IP_CSUM;
11337 tx_start_bd->bd_flags.as_bitfield |=
11338 ETH_TX_BD_FLAGS_IPV6;
11340 if (xmit_type & XMIT_CSUM_TCP) {
11341 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11344 s8 fix = SKB_CS_OFF(skb); /* signed! */
11346 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11348 DP(NETIF_MSG_TX_QUEUED,
11349 "hlen %d fix %d csum before fix %x\n",
11350 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11352 /* HW bug: fixup the CSUM */
11353 pbd->tcp_pseudo_csum =
11354 bnx2x_csum_fix(skb_transport_header(skb),
11357 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11358 pbd->tcp_pseudo_csum);
11362 mapping = pci_map_single(bp->pdev, skb->data,
11363 skb_headlen(skb), PCI_DMA_TODEVICE);
11365 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11366 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11367 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11368 tx_start_bd->nbd = cpu_to_le16(nbd);
11369 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11370 pkt_size = tx_start_bd->nbytes;
11372 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11373 " nbytes %d flags %x vlan %x\n",
11374 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11375 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11376 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11378 if (xmit_type & XMIT_GSO) {
11380 DP(NETIF_MSG_TX_QUEUED,
11381 "TSO packet len %d hlen %d total len %d tso size %d\n",
11382 skb->len, hlen, skb_headlen(skb),
11383 skb_shinfo(skb)->gso_size);
11385 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11387 if (unlikely(skb_headlen(skb) > hlen))
11388 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11389 hlen, bd_prod, ++nbd);
11391 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11392 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11393 pbd->tcp_flags = pbd_tcp_flags(skb);
11395 if (xmit_type & XMIT_GSO_V4) {
11396 pbd->ip_id = swab16(ip_hdr(skb)->id);
11397 pbd->tcp_pseudo_csum =
11398 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11399 ip_hdr(skb)->daddr,
11400 0, IPPROTO_TCP, 0));
11403 pbd->tcp_pseudo_csum =
11404 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11405 &ipv6_hdr(skb)->daddr,
11406 0, IPPROTO_TCP, 0));
11408 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11410 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11412 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11413 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11415 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11416 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11417 if (total_pkt_bd == NULL)
11418 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11420 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11421 frag->size, PCI_DMA_TODEVICE);
11423 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11424 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11425 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11426 le16_add_cpu(&pkt_size, frag->size);
11428 DP(NETIF_MSG_TX_QUEUED,
11429 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11430 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11431 le16_to_cpu(tx_data_bd->nbytes));
11434 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11436 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11438 /* now send a tx doorbell, counting the next BD
11439 * if the packet contains or ends with it
11441 if (TX_BD_POFF(bd_prod) < nbd)
11444 if (total_pkt_bd != NULL)
11445 total_pkt_bd->total_pkt_bytes = pkt_size;
11448 DP(NETIF_MSG_TX_QUEUED,
11449 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11450 " tcp_flags %x xsum %x seq %u hlen %u\n",
11451 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11452 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11453 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11455 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11458 * Make sure that the BD data is updated before updating the producer
11459 * since FW might read the BD right after the producer is updated.
11460 * This is only applicable for weak-ordered memory model archs such
11461 * as IA-64. The following barrier is also mandatory since FW will
11462 * assumes packets must have BDs.
11466 fp->tx_db.data.prod += nbd;
11468 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11472 fp->tx_bd_prod += nbd;
11474 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11475 netif_tx_stop_queue(txq);
11476 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11477 if we put Tx into XOFF state. */
11479 fp_stat->eth_q_stats.driver_xoff++;
11480 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11481 netif_tx_wake_queue(txq);
11485 return NETDEV_TX_OK;
11488 /* called with rtnl_lock */
11489 static int bnx2x_open(struct net_device *dev)
11491 struct bnx2x *bp = netdev_priv(dev);
11493 netif_carrier_off(dev);
11495 bnx2x_set_power_state(bp, PCI_D0);
11497 return bnx2x_nic_load(bp, LOAD_OPEN);
11500 /* called with rtnl_lock */
11501 static int bnx2x_close(struct net_device *dev)
11503 struct bnx2x *bp = netdev_priv(dev);
11505 /* Unload the driver, release IRQs */
11506 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11507 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11508 if (!CHIP_REV_IS_SLOW(bp))
11509 bnx2x_set_power_state(bp, PCI_D3hot);
11514 /* called with netif_tx_lock from dev_mcast.c */
11515 static void bnx2x_set_rx_mode(struct net_device *dev)
11517 struct bnx2x *bp = netdev_priv(dev);
11518 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11519 int port = BP_PORT(bp);
11521 if (bp->state != BNX2X_STATE_OPEN) {
11522 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11526 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11528 if (dev->flags & IFF_PROMISC)
11529 rx_mode = BNX2X_RX_MODE_PROMISC;
11531 else if ((dev->flags & IFF_ALLMULTI) ||
11532 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11533 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11535 else { /* some multicasts */
11536 if (CHIP_IS_E1(bp)) {
11537 int i, old, offset;
11538 struct dev_mc_list *mclist;
11539 struct mac_configuration_cmd *config =
11540 bnx2x_sp(bp, mcast_config);
11542 for (i = 0, mclist = dev->mc_list;
11543 mclist && (i < dev->mc_count);
11544 i++, mclist = mclist->next) {
11546 config->config_table[i].
11547 cam_entry.msb_mac_addr =
11548 swab16(*(u16 *)&mclist->dmi_addr[0]);
11549 config->config_table[i].
11550 cam_entry.middle_mac_addr =
11551 swab16(*(u16 *)&mclist->dmi_addr[2]);
11552 config->config_table[i].
11553 cam_entry.lsb_mac_addr =
11554 swab16(*(u16 *)&mclist->dmi_addr[4]);
11555 config->config_table[i].cam_entry.flags =
11557 config->config_table[i].
11558 target_table_entry.flags = 0;
11559 config->config_table[i].target_table_entry.
11560 clients_bit_vector =
11561 cpu_to_le32(1 << BP_L_ID(bp));
11562 config->config_table[i].
11563 target_table_entry.vlan_id = 0;
11566 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11567 config->config_table[i].
11568 cam_entry.msb_mac_addr,
11569 config->config_table[i].
11570 cam_entry.middle_mac_addr,
11571 config->config_table[i].
11572 cam_entry.lsb_mac_addr);
11574 old = config->hdr.length;
11576 for (; i < old; i++) {
11577 if (CAM_IS_INVALID(config->
11578 config_table[i])) {
11579 /* already invalidated */
11583 CAM_INVALIDATE(config->
11588 if (CHIP_REV_IS_SLOW(bp))
11589 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11591 offset = BNX2X_MAX_MULTICAST*(1 + port);
11593 config->hdr.length = i;
11594 config->hdr.offset = offset;
11595 config->hdr.client_id = bp->fp->cl_id;
11596 config->hdr.reserved1 = 0;
11598 bp->set_mac_pending++;
11601 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11602 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11603 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11606 /* Accept one or more multicasts */
11607 struct dev_mc_list *mclist;
11608 u32 mc_filter[MC_HASH_SIZE];
11609 u32 crc, bit, regidx;
11612 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11614 for (i = 0, mclist = dev->mc_list;
11615 mclist && (i < dev->mc_count);
11616 i++, mclist = mclist->next) {
11618 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11621 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11622 bit = (crc >> 24) & 0xff;
11625 mc_filter[regidx] |= (1 << bit);
11628 for (i = 0; i < MC_HASH_SIZE; i++)
11629 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11634 bp->rx_mode = rx_mode;
11635 bnx2x_set_storm_rx_mode(bp);
11638 /* called with rtnl_lock */
11639 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11641 struct sockaddr *addr = p;
11642 struct bnx2x *bp = netdev_priv(dev);
11644 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11647 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11648 if (netif_running(dev)) {
11649 if (CHIP_IS_E1(bp))
11650 bnx2x_set_eth_mac_addr_e1(bp, 1);
11652 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11658 /* called with rtnl_lock */
11659 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11660 int devad, u16 addr)
11662 struct bnx2x *bp = netdev_priv(netdev);
11665 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11667 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11668 prtad, devad, addr);
11670 if (prtad != bp->mdio.prtad) {
11671 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11672 prtad, bp->mdio.prtad);
11676 /* The HW expects different devad if CL22 is used */
11677 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11679 bnx2x_acquire_phy_lock(bp);
11680 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11681 devad, addr, &value);
11682 bnx2x_release_phy_lock(bp);
11683 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11690 /* called with rtnl_lock */
11691 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11692 u16 addr, u16 value)
11694 struct bnx2x *bp = netdev_priv(netdev);
11695 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11698 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11699 " value 0x%x\n", prtad, devad, addr, value);
11701 if (prtad != bp->mdio.prtad) {
11702 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11703 prtad, bp->mdio.prtad);
11707 /* The HW expects different devad if CL22 is used */
11708 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11710 bnx2x_acquire_phy_lock(bp);
11711 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11712 devad, addr, value);
11713 bnx2x_release_phy_lock(bp);
11717 /* called with rtnl_lock */
11718 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11720 struct bnx2x *bp = netdev_priv(dev);
11721 struct mii_ioctl_data *mdio = if_mii(ifr);
11723 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11724 mdio->phy_id, mdio->reg_num, mdio->val_in);
11726 if (!netif_running(dev))
11729 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11732 /* called with rtnl_lock */
11733 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11735 struct bnx2x *bp = netdev_priv(dev);
11738 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11739 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11742 /* This does not race with packet allocation
11743 * because the actual alloc size is
11744 * only updated as part of load
11746 dev->mtu = new_mtu;
11748 if (netif_running(dev)) {
11749 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11750 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11756 static void bnx2x_tx_timeout(struct net_device *dev)
11758 struct bnx2x *bp = netdev_priv(dev);
11760 #ifdef BNX2X_STOP_ON_ERROR
11764 /* This allows the netif to be shutdown gracefully before resetting */
11765 schedule_work(&bp->reset_task);
11769 /* called with rtnl_lock */
11770 static void bnx2x_vlan_rx_register(struct net_device *dev,
11771 struct vlan_group *vlgrp)
11773 struct bnx2x *bp = netdev_priv(dev);
11777 /* Set flags according to the required capabilities */
11778 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11780 if (dev->features & NETIF_F_HW_VLAN_TX)
11781 bp->flags |= HW_VLAN_TX_FLAG;
11783 if (dev->features & NETIF_F_HW_VLAN_RX)
11784 bp->flags |= HW_VLAN_RX_FLAG;
11786 if (netif_running(dev))
11787 bnx2x_set_client_config(bp);
11792 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11793 static void poll_bnx2x(struct net_device *dev)
11795 struct bnx2x *bp = netdev_priv(dev);
11797 disable_irq(bp->pdev->irq);
11798 bnx2x_interrupt(bp->pdev->irq, dev);
11799 enable_irq(bp->pdev->irq);
11803 static const struct net_device_ops bnx2x_netdev_ops = {
11804 .ndo_open = bnx2x_open,
11805 .ndo_stop = bnx2x_close,
11806 .ndo_start_xmit = bnx2x_start_xmit,
11807 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11808 .ndo_set_mac_address = bnx2x_change_mac_addr,
11809 .ndo_validate_addr = eth_validate_addr,
11810 .ndo_do_ioctl = bnx2x_ioctl,
11811 .ndo_change_mtu = bnx2x_change_mtu,
11812 .ndo_tx_timeout = bnx2x_tx_timeout,
11814 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11816 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11817 .ndo_poll_controller = poll_bnx2x,
11821 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11822 struct net_device *dev)
11827 SET_NETDEV_DEV(dev, &pdev->dev);
11828 bp = netdev_priv(dev);
11833 bp->func = PCI_FUNC(pdev->devfn);
11835 rc = pci_enable_device(pdev);
11837 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11841 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11842 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11845 goto err_out_disable;
11848 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11849 printk(KERN_ERR PFX "Cannot find second PCI device"
11850 " base address, aborting\n");
11852 goto err_out_disable;
11855 if (atomic_read(&pdev->enable_cnt) == 1) {
11856 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11858 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11860 goto err_out_disable;
11863 pci_set_master(pdev);
11864 pci_save_state(pdev);
11867 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11868 if (bp->pm_cap == 0) {
11869 printk(KERN_ERR PFX "Cannot find power management"
11870 " capability, aborting\n");
11872 goto err_out_release;
11875 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11876 if (bp->pcie_cap == 0) {
11877 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11880 goto err_out_release;
11883 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11884 bp->flags |= USING_DAC_FLAG;
11885 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11886 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11887 " failed, aborting\n");
11889 goto err_out_release;
11892 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11893 printk(KERN_ERR PFX "System does not support DMA,"
11896 goto err_out_release;
11899 dev->mem_start = pci_resource_start(pdev, 0);
11900 dev->base_addr = dev->mem_start;
11901 dev->mem_end = pci_resource_end(pdev, 0);
11903 dev->irq = pdev->irq;
11905 bp->regview = pci_ioremap_bar(pdev, 0);
11906 if (!bp->regview) {
11907 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11909 goto err_out_release;
11912 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11913 min_t(u64, BNX2X_DB_SIZE,
11914 pci_resource_len(pdev, 2)));
11915 if (!bp->doorbells) {
11916 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11918 goto err_out_unmap;
11921 bnx2x_set_power_state(bp, PCI_D0);
11923 /* clean indirect addresses */
11924 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11925 PCICFG_VENDOR_ID_OFFSET);
11926 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11927 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11928 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11929 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11931 dev->watchdog_timeo = TX_TIMEOUT;
11933 dev->netdev_ops = &bnx2x_netdev_ops;
11934 dev->ethtool_ops = &bnx2x_ethtool_ops;
11935 dev->features |= NETIF_F_SG;
11936 dev->features |= NETIF_F_HW_CSUM;
11937 if (bp->flags & USING_DAC_FLAG)
11938 dev->features |= NETIF_F_HIGHDMA;
11939 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11940 dev->features |= NETIF_F_TSO6;
11942 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11943 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11945 dev->vlan_features |= NETIF_F_SG;
11946 dev->vlan_features |= NETIF_F_HW_CSUM;
11947 if (bp->flags & USING_DAC_FLAG)
11948 dev->vlan_features |= NETIF_F_HIGHDMA;
11949 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11950 dev->vlan_features |= NETIF_F_TSO6;
11953 /* get_port_hwinfo() will set prtad and mmds properly */
11954 bp->mdio.prtad = MDIO_PRTAD_NONE;
11956 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11957 bp->mdio.dev = dev;
11958 bp->mdio.mdio_read = bnx2x_mdio_read;
11959 bp->mdio.mdio_write = bnx2x_mdio_write;
11965 iounmap(bp->regview);
11966 bp->regview = NULL;
11968 if (bp->doorbells) {
11969 iounmap(bp->doorbells);
11970 bp->doorbells = NULL;
11974 if (atomic_read(&pdev->enable_cnt) == 1)
11975 pci_release_regions(pdev);
11978 pci_disable_device(pdev);
11979 pci_set_drvdata(pdev, NULL);
11985 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11986 int *width, int *speed)
11988 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11990 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11992 /* return value of 1=2.5GHz 2=5GHz */
11993 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11996 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11998 const struct firmware *firmware = bp->firmware;
11999 struct bnx2x_fw_file_hdr *fw_hdr;
12000 struct bnx2x_fw_file_section *sections;
12001 u32 offset, len, num_ops;
12006 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12009 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12010 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12012 /* Make sure none of the offsets and sizes make us read beyond
12013 * the end of the firmware data */
12014 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12015 offset = be32_to_cpu(sections[i].offset);
12016 len = be32_to_cpu(sections[i].len);
12017 if (offset + len > firmware->size) {
12018 printk(KERN_ERR PFX "Section %d length is out of "
12024 /* Likewise for the init_ops offsets */
12025 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12026 ops_offsets = (u16 *)(firmware->data + offset);
12027 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12029 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12030 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12031 printk(KERN_ERR PFX "Section offset %d is out of "
12037 /* Check FW version */
12038 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12039 fw_ver = firmware->data + offset;
12040 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12041 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12042 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12043 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12044 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12045 " Should be %d.%d.%d.%d\n",
12046 fw_ver[0], fw_ver[1], fw_ver[2],
12047 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12048 BCM_5710_FW_MINOR_VERSION,
12049 BCM_5710_FW_REVISION_VERSION,
12050 BCM_5710_FW_ENGINEERING_VERSION);
12057 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12059 const __be32 *source = (const __be32 *)_source;
12060 u32 *target = (u32 *)_target;
12063 for (i = 0; i < n/4; i++)
12064 target[i] = be32_to_cpu(source[i]);
12068 Ops array is stored in the following format:
12069 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12071 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12073 const __be32 *source = (const __be32 *)_source;
12074 struct raw_op *target = (struct raw_op *)_target;
12077 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12078 tmp = be32_to_cpu(source[j]);
12079 target[i].op = (tmp >> 24) & 0xff;
12080 target[i].offset = tmp & 0xffffff;
12081 target[i].raw_data = be32_to_cpu(source[j+1]);
12085 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12087 const __be16 *source = (const __be16 *)_source;
12088 u16 *target = (u16 *)_target;
12091 for (i = 0; i < n/2; i++)
12092 target[i] = be16_to_cpu(source[i]);
12095 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12097 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12098 bp->arr = kmalloc(len, GFP_KERNEL); \
12100 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12101 "for "#arr"\n", len); \
12104 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12105 (u8 *)bp->arr, len); \
12108 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12110 char fw_file_name[40] = {0};
12111 struct bnx2x_fw_file_hdr *fw_hdr;
12114 /* Create a FW file name */
12115 if (CHIP_IS_E1(bp))
12116 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
12118 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12120 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12121 BCM_5710_FW_MAJOR_VERSION,
12122 BCM_5710_FW_MINOR_VERSION,
12123 BCM_5710_FW_REVISION_VERSION,
12124 BCM_5710_FW_ENGINEERING_VERSION);
12126 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12128 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12130 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12132 goto request_firmware_exit;
12135 rc = bnx2x_check_firmware(bp);
12137 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12138 goto request_firmware_exit;
12141 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12143 /* Initialize the pointers to the init arrays */
12145 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12148 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12151 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12154 /* STORMs firmware */
12155 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12156 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12157 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12158 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12159 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12160 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12161 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12162 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12163 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12164 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12165 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12166 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12167 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12168 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12169 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12170 be32_to_cpu(fw_hdr->csem_pram_data.offset);
12174 init_offsets_alloc_err:
12175 kfree(bp->init_ops);
12176 init_ops_alloc_err:
12177 kfree(bp->init_data);
12178 request_firmware_exit:
12179 release_firmware(bp->firmware);
12185 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12186 const struct pci_device_id *ent)
12188 struct net_device *dev = NULL;
12190 int pcie_width, pcie_speed;
12193 /* dev zeroed in init_etherdev */
12194 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12196 printk(KERN_ERR PFX "Cannot allocate net device\n");
12200 bp = netdev_priv(dev);
12201 bp->msglevel = debug;
12203 pci_set_drvdata(pdev, dev);
12205 rc = bnx2x_init_dev(pdev, dev);
12211 rc = bnx2x_init_bp(bp);
12213 goto init_one_exit;
12215 /* Set init arrays */
12216 rc = bnx2x_init_firmware(bp, &pdev->dev);
12218 printk(KERN_ERR PFX "Error loading firmware\n");
12219 goto init_one_exit;
12222 rc = register_netdev(dev);
12224 dev_err(&pdev->dev, "Cannot register net device\n");
12225 goto init_one_exit;
12228 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12229 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12230 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12231 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12232 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12233 dev->base_addr, bp->pdev->irq);
12234 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12240 iounmap(bp->regview);
12243 iounmap(bp->doorbells);
12247 if (atomic_read(&pdev->enable_cnt) == 1)
12248 pci_release_regions(pdev);
12250 pci_disable_device(pdev);
12251 pci_set_drvdata(pdev, NULL);
12256 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12258 struct net_device *dev = pci_get_drvdata(pdev);
12262 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12265 bp = netdev_priv(dev);
12267 unregister_netdev(dev);
12269 kfree(bp->init_ops_offsets);
12270 kfree(bp->init_ops);
12271 kfree(bp->init_data);
12272 release_firmware(bp->firmware);
12275 iounmap(bp->regview);
12278 iounmap(bp->doorbells);
12282 if (atomic_read(&pdev->enable_cnt) == 1)
12283 pci_release_regions(pdev);
12285 pci_disable_device(pdev);
12286 pci_set_drvdata(pdev, NULL);
12289 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12291 struct net_device *dev = pci_get_drvdata(pdev);
12295 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12298 bp = netdev_priv(dev);
12302 pci_save_state(pdev);
12304 if (!netif_running(dev)) {
12309 netif_device_detach(dev);
12311 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12313 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12320 static int bnx2x_resume(struct pci_dev *pdev)
12322 struct net_device *dev = pci_get_drvdata(pdev);
12327 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12330 bp = netdev_priv(dev);
12334 pci_restore_state(pdev);
12336 if (!netif_running(dev)) {
12341 bnx2x_set_power_state(bp, PCI_D0);
12342 netif_device_attach(dev);
12344 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12351 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12355 bp->state = BNX2X_STATE_ERROR;
12357 bp->rx_mode = BNX2X_RX_MODE_NONE;
12359 bnx2x_netif_stop(bp, 0);
12361 del_timer_sync(&bp->timer);
12362 bp->stats_state = STATS_STATE_DISABLED;
12363 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12366 bnx2x_free_irq(bp);
12368 if (CHIP_IS_E1(bp)) {
12369 struct mac_configuration_cmd *config =
12370 bnx2x_sp(bp, mcast_config);
12372 for (i = 0; i < config->hdr.length; i++)
12373 CAM_INVALIDATE(config->config_table[i]);
12376 /* Free SKBs, SGEs, TPA pool and driver internals */
12377 bnx2x_free_skbs(bp);
12378 for_each_rx_queue(bp, i)
12379 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12380 for_each_rx_queue(bp, i)
12381 netif_napi_del(&bnx2x_fp(bp, i, napi));
12382 bnx2x_free_mem(bp);
12384 bp->state = BNX2X_STATE_CLOSED;
12386 netif_carrier_off(bp->dev);
12391 static void bnx2x_eeh_recover(struct bnx2x *bp)
12395 mutex_init(&bp->port.phy_mutex);
12397 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12398 bp->link_params.shmem_base = bp->common.shmem_base;
12399 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12401 if (!bp->common.shmem_base ||
12402 (bp->common.shmem_base < 0xA0000) ||
12403 (bp->common.shmem_base >= 0xC0000)) {
12404 BNX2X_DEV_INFO("MCP not active\n");
12405 bp->flags |= NO_MCP_FLAG;
12409 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12410 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12411 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12412 BNX2X_ERR("BAD MCP validity signature\n");
12414 if (!BP_NOMCP(bp)) {
12415 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12416 & DRV_MSG_SEQ_NUMBER_MASK);
12417 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12422 * bnx2x_io_error_detected - called when PCI error is detected
12423 * @pdev: Pointer to PCI device
12424 * @state: The current pci connection state
12426 * This function is called after a PCI bus error affecting
12427 * this device has been detected.
12429 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12430 pci_channel_state_t state)
12432 struct net_device *dev = pci_get_drvdata(pdev);
12433 struct bnx2x *bp = netdev_priv(dev);
12437 netif_device_detach(dev);
12439 if (state == pci_channel_io_perm_failure) {
12441 return PCI_ERS_RESULT_DISCONNECT;
12444 if (netif_running(dev))
12445 bnx2x_eeh_nic_unload(bp);
12447 pci_disable_device(pdev);
12451 /* Request a slot reset */
12452 return PCI_ERS_RESULT_NEED_RESET;
12456 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12457 * @pdev: Pointer to PCI device
12459 * Restart the card from scratch, as if from a cold-boot.
12461 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12463 struct net_device *dev = pci_get_drvdata(pdev);
12464 struct bnx2x *bp = netdev_priv(dev);
12468 if (pci_enable_device(pdev)) {
12469 dev_err(&pdev->dev,
12470 "Cannot re-enable PCI device after reset\n");
12472 return PCI_ERS_RESULT_DISCONNECT;
12475 pci_set_master(pdev);
12476 pci_restore_state(pdev);
12478 if (netif_running(dev))
12479 bnx2x_set_power_state(bp, PCI_D0);
12483 return PCI_ERS_RESULT_RECOVERED;
12487 * bnx2x_io_resume - called when traffic can start flowing again
12488 * @pdev: Pointer to PCI device
12490 * This callback is called when the error recovery driver tells us that
12491 * its OK to resume normal operation.
12493 static void bnx2x_io_resume(struct pci_dev *pdev)
12495 struct net_device *dev = pci_get_drvdata(pdev);
12496 struct bnx2x *bp = netdev_priv(dev);
12500 bnx2x_eeh_recover(bp);
12502 if (netif_running(dev))
12503 bnx2x_nic_load(bp, LOAD_NORMAL);
12505 netif_device_attach(dev);
12510 static struct pci_error_handlers bnx2x_err_handler = {
12511 .error_detected = bnx2x_io_error_detected,
12512 .slot_reset = bnx2x_io_slot_reset,
12513 .resume = bnx2x_io_resume,
12516 static struct pci_driver bnx2x_pci_driver = {
12517 .name = DRV_MODULE_NAME,
12518 .id_table = bnx2x_pci_tbl,
12519 .probe = bnx2x_init_one,
12520 .remove = __devexit_p(bnx2x_remove_one),
12521 .suspend = bnx2x_suspend,
12522 .resume = bnx2x_resume,
12523 .err_handler = &bnx2x_err_handler,
12526 static int __init bnx2x_init(void)
12530 printk(KERN_INFO "%s", version);
12532 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12533 if (bnx2x_wq == NULL) {
12534 printk(KERN_ERR PFX "Cannot create workqueue\n");
12538 ret = pci_register_driver(&bnx2x_pci_driver);
12540 printk(KERN_ERR PFX "Cannot register driver\n");
12541 destroy_workqueue(bnx2x_wq);
12546 static void __exit bnx2x_cleanup(void)
12548 pci_unregister_driver(&bnx2x_pci_driver);
12550 destroy_workqueue(bnx2x_wq);
12553 module_init(bnx2x_init);
12554 module_exit(bnx2x_cleanup);
12558 /* count denotes the number of new completions we have seen */
12559 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12561 struct eth_spe *spe;
12563 #ifdef BNX2X_STOP_ON_ERROR
12564 if (unlikely(bp->panic))
12568 spin_lock_bh(&bp->spq_lock);
12569 bp->cnic_spq_pending -= count;
12571 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12572 bp->cnic_spq_pending++) {
12574 if (!bp->cnic_kwq_pending)
12577 spe = bnx2x_sp_get_next(bp);
12578 *spe = *bp->cnic_kwq_cons;
12580 bp->cnic_kwq_pending--;
12582 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12583 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12585 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12586 bp->cnic_kwq_cons = bp->cnic_kwq;
12588 bp->cnic_kwq_cons++;
12590 bnx2x_sp_prod_update(bp);
12591 spin_unlock_bh(&bp->spq_lock);
12594 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12595 struct kwqe_16 *kwqes[], u32 count)
12597 struct bnx2x *bp = netdev_priv(dev);
12600 #ifdef BNX2X_STOP_ON_ERROR
12601 if (unlikely(bp->panic))
12605 spin_lock_bh(&bp->spq_lock);
12607 for (i = 0; i < count; i++) {
12608 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12610 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12613 *bp->cnic_kwq_prod = *spe;
12615 bp->cnic_kwq_pending++;
12617 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12618 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12619 spe->data.mac_config_addr.hi,
12620 spe->data.mac_config_addr.lo,
12621 bp->cnic_kwq_pending);
12623 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12624 bp->cnic_kwq_prod = bp->cnic_kwq;
12626 bp->cnic_kwq_prod++;
12629 spin_unlock_bh(&bp->spq_lock);
12631 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12632 bnx2x_cnic_sp_post(bp, 0);
12637 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12639 struct cnic_ops *c_ops;
12642 mutex_lock(&bp->cnic_mutex);
12643 c_ops = bp->cnic_ops;
12645 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12646 mutex_unlock(&bp->cnic_mutex);
12651 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12653 struct cnic_ops *c_ops;
12657 c_ops = rcu_dereference(bp->cnic_ops);
12659 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12666 * for commands that have no data
12668 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12670 struct cnic_ctl_info ctl = {0};
12674 return bnx2x_cnic_ctl_send(bp, &ctl);
12677 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12679 struct cnic_ctl_info ctl;
12681 /* first we tell CNIC and only then we count this as a completion */
12682 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12683 ctl.data.comp.cid = cid;
12685 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12686 bnx2x_cnic_sp_post(bp, 1);
12689 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12691 struct bnx2x *bp = netdev_priv(dev);
12694 switch (ctl->cmd) {
12695 case DRV_CTL_CTXTBL_WR_CMD: {
12696 u32 index = ctl->data.io.offset;
12697 dma_addr_t addr = ctl->data.io.dma_addr;
12699 bnx2x_ilt_wr(bp, index, addr);
12703 case DRV_CTL_COMPLETION_CMD: {
12704 int count = ctl->data.comp.comp_count;
12706 bnx2x_cnic_sp_post(bp, count);
12710 /* rtnl_lock is held. */
12711 case DRV_CTL_START_L2_CMD: {
12712 u32 cli = ctl->data.ring.client_id;
12714 bp->rx_mode_cl_mask |= (1 << cli);
12715 bnx2x_set_storm_rx_mode(bp);
12719 /* rtnl_lock is held. */
12720 case DRV_CTL_STOP_L2_CMD: {
12721 u32 cli = ctl->data.ring.client_id;
12723 bp->rx_mode_cl_mask &= ~(1 << cli);
12724 bnx2x_set_storm_rx_mode(bp);
12729 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12736 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12738 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12740 if (bp->flags & USING_MSIX_FLAG) {
12741 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12742 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12743 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12745 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12746 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12748 cp->irq_arr[0].status_blk = bp->cnic_sb;
12749 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12750 cp->irq_arr[1].status_blk = bp->def_status_blk;
12751 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12756 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12759 struct bnx2x *bp = netdev_priv(dev);
12760 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12765 if (atomic_read(&bp->intr_sem) != 0)
12768 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12772 bp->cnic_kwq_cons = bp->cnic_kwq;
12773 bp->cnic_kwq_prod = bp->cnic_kwq;
12774 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12776 bp->cnic_spq_pending = 0;
12777 bp->cnic_kwq_pending = 0;
12779 bp->cnic_data = data;
12782 cp->drv_state = CNIC_DRV_STATE_REGD;
12784 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12786 bnx2x_setup_cnic_irq_info(bp);
12787 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12788 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12789 rcu_assign_pointer(bp->cnic_ops, ops);
12794 static int bnx2x_unregister_cnic(struct net_device *dev)
12796 struct bnx2x *bp = netdev_priv(dev);
12797 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12799 mutex_lock(&bp->cnic_mutex);
12800 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12801 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12802 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12805 rcu_assign_pointer(bp->cnic_ops, NULL);
12806 mutex_unlock(&bp->cnic_mutex);
12808 kfree(bp->cnic_kwq);
12809 bp->cnic_kwq = NULL;
12814 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12816 struct bnx2x *bp = netdev_priv(dev);
12817 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12819 cp->drv_owner = THIS_MODULE;
12820 cp->chip_id = CHIP_ID(bp);
12821 cp->pdev = bp->pdev;
12822 cp->io_base = bp->regview;
12823 cp->io_base2 = bp->doorbells;
12824 cp->max_kwqe_pending = 8;
12825 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12826 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12827 cp->ctx_tbl_len = CNIC_ILT_LINES;
12828 cp->starting_cid = BCM_CNIC_CID_START;
12829 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12830 cp->drv_ctl = bnx2x_drv_ctl;
12831 cp->drv_register_cnic = bnx2x_register_cnic;
12832 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12836 EXPORT_SYMBOL(bnx2x_cnic_probe);
12838 #endif /* BCM_CNIC */