]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x.c
[BNX2X]: Spelling fixes
[net-next-2.6.git] / drivers / net / bnx2x.c
CommitLineData
a2fbb9ea
ET
1/* bnx2x.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
c14423fe 13 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
14 *
15 */
16
17/* define this to make the driver freeze on error
18 * to allow getting debug info
c14423fe 19 * (you will need to reboot afterwards)
a2fbb9ea
ET
20 */
21/*#define BNX2X_STOP_ON_ERROR*/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/kernel.h>
26#include <linux/device.h> /* for dev_info() */
27#include <linux/timer.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/slab.h>
31#include <linux/vmalloc.h>
32#include <linux/interrupt.h>
33#include <linux/pci.h>
34#include <linux/init.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/dma-mapping.h>
39#include <linux/bitops.h>
40#include <linux/irq.h>
41#include <linux/delay.h>
42#include <asm/byteorder.h>
43#include <linux/time.h>
44#include <linux/ethtool.h>
45#include <linux/mii.h>
46#ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
48 #define BCM_VLAN 1
49#endif
50#include <net/ip.h>
51#include <net/tcp.h>
52#include <net/checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/prefetch.h>
56#include <linux/zlib.h>
57#include <linux/version.h>
58#include <linux/io.h>
59
60#include "bnx2x_reg.h"
61#include "bnx2x_fw_defs.h"
62#include "bnx2x_hsi.h"
63#include "bnx2x.h"
64#include "bnx2x_init.h"
65
66#define DRV_MODULE_VERSION "0.40.15"
67#define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $"
68#define BNX2X_BC_VER 0x040009
69
70/* Time in jiffies before concluding the transmitter is hung. */
71#define TX_TIMEOUT (5*HZ)
72
53a10565 73static char version[] __devinitdata =
c14423fe 74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
a2fbb9ea
ET
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
81MODULE_INFO(cvs_version, "$Revision: #356 $");
82
83static int use_inta;
84static int poll;
85static int onefunc;
86static int nomcp;
87static int debug;
88static int use_multi;
89
90module_param(use_inta, int, 0);
91module_param(poll, int, 0);
92module_param(onefunc, int, 0);
93module_param(debug, int, 0);
94MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95MODULE_PARM_DESC(poll, "use polling (for debug)");
96MODULE_PARM_DESC(onefunc, "enable only first function");
c14423fe
ET
97MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
99
100#ifdef BNX2X_MULTI
101module_param(use_multi, int, 0);
102MODULE_PARM_DESC(use_multi, "use per-CPU queues");
103#endif
104
105enum bnx2x_board_type {
106 BCM57710 = 0,
107};
108
109/* indexed by board_t, above */
53a10565 110static struct {
a2fbb9ea
ET
111 char *name;
112} board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" }
114};
115
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { 0 }
120};
121
122MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
123
124/****************************************************************************
125* General service functions
126****************************************************************************/
127
128/* used only at init
129 * locking is done by mcp
130 */
131static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
132{
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
137}
138
139#ifdef BNX2X_IND_RD
140static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
141{
142 u32 val;
143
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
148
149 return val;
150}
151#endif
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171/* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
177static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178 u32 dst_addr, u32 len32)
179{
180 struct dmae_command *dmae = &bp->dmae;
181 int port = bp->port;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183 int timeout = 200;
184
185 memset(dmae, 0, sizeof(struct dmae_command));
186
187 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
190#ifdef __BIG_ENDIAN
191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
192#else
193 DMAE_CMD_ENDIANITY_DW_SWAP |
194#endif
195 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196 dmae->src_addr_lo = U64_LO(dma_addr);
197 dmae->src_addr_hi = U64_HI(dma_addr);
198 dmae->dst_addr_lo = dst_addr >> 2;
199 dmae->dst_addr_hi = 0;
200 dmae->len = len32;
201 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_val = BNX2X_WB_COMP_VAL;
204
205/*
206 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
208 "dst_addr [%x:%08x (%08x)]\n"
209 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
210 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
213*/
214/*
215 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
218*/
219
220 *wb_comp = 0;
221
222 bnx2x_post_dmae(bp, dmae, port * 8);
223
224 udelay(5);
225 /* adjust timeout for emulation/FPGA */
226 if (CHIP_REV_IS_SLOW(bp))
227 timeout *= 100;
228 while (*wb_comp != BNX2X_WB_COMP_VAL) {
229/* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
230 udelay(5);
231 if (!timeout) {
232 BNX2X_ERR("dmae timeout!\n");
233 break;
234 }
235 timeout--;
236 }
237}
238
239#ifdef BNX2X_DMAE_RD
240static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
241{
242 struct dmae_command *dmae = &bp->dmae;
243 int port = bp->port;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
245 int timeout = 200;
246
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
249
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
253#ifdef __BIG_ENDIAN
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
255#else
256 DMAE_CMD_ENDIANITY_DW_SWAP |
257#endif
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
263 dmae->len = len32;
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
267
268/*
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
276*/
277
278 *wb_comp = 0;
279
280 bnx2x_post_dmae(bp, dmae, port * 8);
281
282 udelay(5);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
284 udelay(5);
285 if (!timeout) {
286 BNX2X_ERR("dmae timeout!\n");
287 break;
288 }
289 timeout--;
290 }
291/*
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
295*/
296}
297#endif
298
299static int bnx2x_mc_assert(struct bnx2x *bp)
300{
301 int i, j;
302 int rc = 0;
303 char last_idx;
304 const char storm[] = {"XTCU"};
305 const u32 intmem_base[] = {
306 BAR_XSTRORM_INTMEM,
307 BAR_TSTRORM_INTMEM,
308 BAR_CSTRORM_INTMEM,
309 BAR_USTRORM_INTMEM
310 };
311
312 /* Go through all instances of all SEMIs */
313 for (i = 0; i < 4; i++) {
314 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
315 intmem_base[i]);
316 BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
317 storm[i], last_idx);
318
319 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321 u32 row0, row1, row2, row3;
322
323 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
324 intmem_base[i]);
325 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
326 intmem_base[i]);
327 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
328 intmem_base[i]);
329 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
330 intmem_base[i]);
331
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333 BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0);
336 rc++;
337 } else {
338 break;
339 }
340 }
341 }
342 return rc;
343}
c14423fe 344
a2fbb9ea
ET
345static void bnx2x_fw_dump(struct bnx2x *bp)
346{
347 u32 mark, offset;
348 u32 data[9];
349 int word;
350
351 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
352 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
353
354 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
355 for (word = 0; word < 8; word++)
356 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
357 offset + 4*word));
358 data[8] = 0x0;
359 printk(KERN_ERR PFX "%s", (char *)data);
360 }
361 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
362 for (word = 0; word < 8; word++)
363 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
364 offset + 4*word));
365 data[8] = 0x0;
366 printk(KERN_ERR PFX "%s", (char *)data);
367 }
368 printk("\n" KERN_ERR PFX "end of fw dump\n");
369}
370
371static void bnx2x_panic_dump(struct bnx2x *bp)
372{
373 int i;
374 u16 j, start, end;
375
376 BNX2X_ERR("begin crash dump -----------------\n");
377
378 for_each_queue(bp, i) {
379 struct bnx2x_fastpath *fp = &bp->fp[i];
380 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
381
382 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
383 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
384 " *rx_cons_sb(%x) rx_comp_prod(%x)"
385 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
386 " bd data(%x,%x)\n",
387 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
388 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
389 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
390 fp->fp_u_idx, hw_prods->packets_prod,
391 hw_prods->bds_prod);
392
393 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
394 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
395 for (j = start; j < end; j++) {
396 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
397
398 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
399 sw_bd->skb, sw_bd->first_bd);
400 }
401
402 start = TX_BD(fp->tx_bd_cons - 10);
403 end = TX_BD(fp->tx_bd_cons + 254);
404 for (j = start; j < end; j++) {
405 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
406
407 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
408 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
409 }
410
411 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
412 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
413 for (j = start; j < end; j++) {
414 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
415 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
416
417 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
418 j, rx_bd[0], rx_bd[1], sw_bd->skb);
419 }
420
421 start = RCQ_BD(fp->rx_comp_cons - 10);
422 end = RCQ_BD(fp->rx_comp_cons + 503);
423 for (j = start; j < end; j++) {
424 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
425
426 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
427 j, cqe[0], cqe[1], cqe[2], cqe[3]);
428 }
429 }
430
431 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_t_idx(%u)"
432 " def_x_idx(%u) def_att_idx(%u) attn_state(%u)"
433 " spq_prod_idx(%u)\n",
434 bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx,
435 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
436
437
438 bnx2x_mc_assert(bp);
439 BNX2X_ERR("end crash dump -----------------\n");
440
441 bp->stats_state = STATS_STATE_DISABLE;
442 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
443}
444
445static void bnx2x_enable_int(struct bnx2x *bp)
446{
447 int port = bp->port;
448 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
449 u32 val = REG_RD(bp, addr);
450 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
451
452 if (msix) {
453 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
454 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
455 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
456 } else {
457 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458 HC_CONFIG_0_REG_INT_LINE_EN_0 |
459 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
460 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
461 }
462
463 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) msi %d\n",
464 val, port, addr, msix);
465
466 REG_WR(bp, addr, val);
467}
468
469static void bnx2x_disable_int(struct bnx2x *bp)
470{
471 int port = bp->port;
472 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
473 u32 val = REG_RD(bp, addr);
474
475 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
476 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
477 HC_CONFIG_0_REG_INT_LINE_EN_0 |
478 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
479
480 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
481 val, port, addr);
482
483 REG_WR(bp, addr, val);
484 if (REG_RD(bp, addr) != val)
485 BNX2X_ERR("BUG! proper val not read from IGU!\n");
486}
487
488static void bnx2x_disable_int_sync(struct bnx2x *bp)
489{
490
491 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
492 int i;
493
494 atomic_inc(&bp->intr_sem);
c14423fe 495 /* prevent the HW from sending interrupts */
a2fbb9ea
ET
496 bnx2x_disable_int(bp);
497
498 /* make sure all ISRs are done */
499 if (msix) {
500 for_each_queue(bp, i)
501 synchronize_irq(bp->msix_table[i].vector);
502
503 /* one more for the Slow Path IRQ */
504 synchronize_irq(bp->msix_table[i].vector);
505 } else
506 synchronize_irq(bp->pdev->irq);
507
508 /* make sure sp_task is not running */
509 cancel_work_sync(&bp->sp_task);
510
511}
512
513/* fast path code */
514
515/*
516 * general service functions
517 */
518
519static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
520 u8 storm, u16 index, u8 op, u8 update)
521{
522 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
523 struct igu_ack_register igu_ack;
524
525 igu_ack.status_block_index = index;
526 igu_ack.sb_id_and_flags =
527 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
528 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
529 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
530 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
531
532/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
533 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
534 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
535}
536
537static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
538{
539 struct host_status_block *fpsb = fp->status_blk;
540 u16 rc = 0;
541
542 barrier(); /* status block is written to by the chip */
543 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
544 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
545 rc |= 1;
546 }
547 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
548 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
549 rc |= 2;
550 }
551 return rc;
552}
553
554static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
555{
556 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
557
558 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
559 rx_cons_sb++;
560
561 if ((rx_cons_sb != fp->rx_comp_cons) ||
562 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
563 return 1;
564
565 return 0;
566}
567
568static u16 bnx2x_ack_int(struct bnx2x *bp)
569{
570 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
571 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
572
573/* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
574 result, BAR_IGU_INTMEM + igu_addr); */
575
576#ifdef IGU_DEBUG
577#warning IGU_DEBUG active
578 if (result == 0) {
579 BNX2X_ERR("read %x from IGU\n", result);
580 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
581 }
582#endif
583 return result;
584}
585
586
587/*
588 * fast path service functions
589 */
590
591/* free skb in the packet ring at pos idx
592 * return idx of last bd freed
593 */
594static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
595 u16 idx)
596{
597 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
598 struct eth_tx_bd *tx_bd;
599 struct sk_buff *skb = tx_buf->skb;
600 u16 bd_idx = tx_buf->first_bd;
601 int nbd;
602
603 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
604 idx, tx_buf, skb);
605
606 /* unmap first bd */
607 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
608 tx_bd = &fp->tx_desc_ring[bd_idx];
609 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
610 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
611
612 nbd = le16_to_cpu(tx_bd->nbd) - 1;
613#ifdef BNX2X_STOP_ON_ERROR
614 if (nbd > (MAX_SKB_FRAGS + 2)) {
615 BNX2X_ERR("bad nbd!\n");
616 bnx2x_panic();
617 }
618#endif
619
620 /* Skip a parse bd and the TSO split header bd
621 since they have no mapping */
622 if (nbd)
623 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
624
625 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
626 ETH_TX_BD_FLAGS_TCP_CSUM |
627 ETH_TX_BD_FLAGS_SW_LSO)) {
628 if (--nbd)
629 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
630 tx_bd = &fp->tx_desc_ring[bd_idx];
631 /* is this a TSO split header bd? */
632 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
633 if (--nbd)
634 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
635 }
636 }
637
638 /* now free frags */
639 while (nbd > 0) {
640
641 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
642 tx_bd = &fp->tx_desc_ring[bd_idx];
643 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
644 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
645 if (--nbd)
646 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
647 }
648
649 /* release skb */
650 BUG_TRAP(skb);
651 dev_kfree_skb(skb);
652 tx_buf->first_bd = 0;
653 tx_buf->skb = NULL;
654
655 return bd_idx;
656}
657
658static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
659{
660 u16 used;
661 u32 prod;
662 u32 cons;
663
664 /* Tell compiler that prod and cons can change */
665 barrier();
666 prod = fp->tx_bd_prod;
667 cons = fp->tx_bd_cons;
668
669 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
670 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
671
672 if (prod >= cons) {
673 /* used = prod - cons - prod/size + cons/size */
674 used -= NUM_TX_BD - NUM_TX_RINGS;
675 }
676
677 BUG_TRAP(used <= fp->bp->tx_ring_size);
678 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
679
680 return (fp->bp->tx_ring_size - used);
681}
682
683static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
684{
685 struct bnx2x *bp = fp->bp;
686 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
687 int done = 0;
688
689#ifdef BNX2X_STOP_ON_ERROR
690 if (unlikely(bp->panic))
691 return;
692#endif
693
694 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
695 sw_cons = fp->tx_pkt_cons;
696
697 while (sw_cons != hw_cons) {
698 u16 pkt_cons;
699
700 pkt_cons = TX_BD(sw_cons);
701
702 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
703
704 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
705 hw_cons, sw_cons, pkt_cons);
706
707/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
708 rmb();
709 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
710 }
711*/
712 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
713 sw_cons++;
714 done++;
715
716 if (done == work)
717 break;
718 }
719
720 fp->tx_pkt_cons = sw_cons;
721 fp->tx_bd_cons = bd_cons;
722
723 /* Need to make the tx_cons update visible to start_xmit()
724 * before checking for netif_queue_stopped(). Without the
725 * memory barrier, there is a small possibility that start_xmit()
726 * will miss it and cause the queue to be stopped forever.
727 */
728 smp_mb();
729
730 /* TBD need a thresh? */
731 if (unlikely(netif_queue_stopped(bp->dev))) {
732
733 netif_tx_lock(bp->dev);
734
735 if (netif_queue_stopped(bp->dev) &&
736 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
737 netif_wake_queue(bp->dev);
738
739 netif_tx_unlock(bp->dev);
740
741 }
742}
743
744static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
745 union eth_rx_cqe *rr_cqe)
746{
747 struct bnx2x *bp = fp->bp;
748 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
749 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
750
751 DP(NETIF_MSG_RX_STATUS,
752 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
753 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
754
755 bp->spq_left++;
756
757 if (fp->index) {
758 switch (command | fp->state) {
759 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
760 BNX2X_FP_STATE_OPENING):
761 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
762 cid);
763 fp->state = BNX2X_FP_STATE_OPEN;
764 break;
765
766 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
767 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
768 cid);
769 fp->state = BNX2X_FP_STATE_HALTED;
770 break;
771
772 default:
773 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
774 command, fp->state);
775 }
776 mb(); /* force bnx2x_wait_ramrod to see the change */
777 return;
778 }
c14423fe 779
a2fbb9ea
ET
780 switch (command | bp->state) {
781 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
782 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
783 bp->state = BNX2X_STATE_OPEN;
784 break;
785
786 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
787 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
788 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
789 fp->state = BNX2X_FP_STATE_HALTED;
790 break;
791
792 case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE):
793 DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
794 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
795 break;
796
797 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
798 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
799 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED;
800 break;
801
802 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
803 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
804 break;
805
806 default:
807 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
808 command, bp->state);
809 }
810
811 mb(); /* force bnx2x_wait_ramrod to see the change */
812}
813
814static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
815 struct bnx2x_fastpath *fp, u16 index)
816{
817 struct sk_buff *skb;
818 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
819 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
820 dma_addr_t mapping;
821
822 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
823 if (unlikely(skb == NULL))
824 return -ENOMEM;
825
826 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
827 PCI_DMA_FROMDEVICE);
828 if (unlikely(dma_mapping_error(mapping))) {
829
830 dev_kfree_skb(skb);
831 return -ENOMEM;
832 }
833
834 rx_buf->skb = skb;
835 pci_unmap_addr_set(rx_buf, mapping, mapping);
836
837 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
838 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
839
840 return 0;
841}
842
843/* note that we are not allocating a new skb,
844 * we are just moving one from cons to prod
845 * we are not creating a new mapping,
846 * so there is no need to check for dma_mapping_error().
847 */
848static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
849 struct sk_buff *skb, u16 cons, u16 prod)
850{
851 struct bnx2x *bp = fp->bp;
852 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
853 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
854 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
855 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
856
857 pci_dma_sync_single_for_device(bp->pdev,
858 pci_unmap_addr(cons_rx_buf, mapping),
859 bp->rx_offset + RX_COPY_THRESH,
860 PCI_DMA_FROMDEVICE);
861
862 prod_rx_buf->skb = cons_rx_buf->skb;
863 pci_unmap_addr_set(prod_rx_buf, mapping,
864 pci_unmap_addr(cons_rx_buf, mapping));
865 *prod_bd = *cons_bd;
866}
867
868static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
869{
870 struct bnx2x *bp = fp->bp;
871 u16 bd_cons, bd_prod, comp_ring_cons;
872 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
873 int rx_pkt = 0;
874
875#ifdef BNX2X_STOP_ON_ERROR
876 if (unlikely(bp->panic))
877 return 0;
878#endif
879
880 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
881 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
882 hw_comp_cons++;
883
884 bd_cons = fp->rx_bd_cons;
885 bd_prod = fp->rx_bd_prod;
886 sw_comp_cons = fp->rx_comp_cons;
887 sw_comp_prod = fp->rx_comp_prod;
888
889 /* Memory barrier necessary as speculative reads of the rx
890 * buffer can be ahead of the index in the status block
891 */
892 rmb();
893
894 DP(NETIF_MSG_RX_STATUS,
895 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
896 fp->index, hw_comp_cons, sw_comp_cons);
897
898 while (sw_comp_cons != hw_comp_cons) {
899 unsigned int len, pad;
900 struct sw_rx_bd *rx_buf;
901 struct sk_buff *skb;
902 union eth_rx_cqe *cqe;
903
904 comp_ring_cons = RCQ_BD(sw_comp_cons);
905 bd_prod = RX_BD(bd_prod);
906 bd_cons = RX_BD(bd_cons);
907
908 cqe = &fp->rx_comp_ring[comp_ring_cons];
909
910 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
911 " comp_ring (%u) bd_ring (%u,%u)\n",
912 hw_comp_cons, sw_comp_cons,
913 comp_ring_cons, bd_prod, bd_cons);
914 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
915 " queue %x vlan %x len %x\n",
916 cqe->fast_path_cqe.type,
917 cqe->fast_path_cqe.error_type_flags,
918 cqe->fast_path_cqe.status_flags,
919 cqe->fast_path_cqe.rss_hash_result,
920 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
921
922 /* is this a slowpath msg? */
923 if (unlikely(cqe->fast_path_cqe.type)) {
924 bnx2x_sp_event(fp, cqe);
925 goto next_cqe;
926
927 /* this is an rx packet */
928 } else {
929 rx_buf = &fp->rx_buf_ring[bd_cons];
930 skb = rx_buf->skb;
931
932 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
933 pad = cqe->fast_path_cqe.placement_offset;
934
935 pci_dma_sync_single_for_device(bp->pdev,
936 pci_unmap_addr(rx_buf, mapping),
937 pad + RX_COPY_THRESH,
938 PCI_DMA_FROMDEVICE);
939 prefetch(skb);
940 prefetch(((char *)(skb)) + 128);
941
942 /* is this an error packet? */
943 if (unlikely(cqe->fast_path_cqe.error_type_flags &
944 ETH_RX_ERROR_FALGS)) {
945 /* do we sometimes forward error packets anyway? */
946 DP(NETIF_MSG_RX_ERR,
947 "ERROR flags(%u) Rx packet(%u)\n",
948 cqe->fast_path_cqe.error_type_flags,
949 sw_comp_cons);
950 /* TBD make sure MC counts this as a drop */
951 goto reuse_rx;
952 }
953
954 /* Since we don't have a jumbo ring
955 * copy small packets if mtu > 1500
956 */
957 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
958 (len <= RX_COPY_THRESH)) {
959 struct sk_buff *new_skb;
960
961 new_skb = netdev_alloc_skb(bp->dev,
962 len + pad);
963 if (new_skb == NULL) {
964 DP(NETIF_MSG_RX_ERR,
965 "ERROR packet dropped "
966 "because of alloc failure\n");
967 /* TBD count this as a drop? */
968 goto reuse_rx;
969 }
970
971 /* aligned copy */
972 skb_copy_from_linear_data_offset(skb, pad,
973 new_skb->data + pad, len);
974 skb_reserve(new_skb, pad);
975 skb_put(new_skb, len);
976
977 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
978
979 skb = new_skb;
980
981 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
982 pci_unmap_single(bp->pdev,
983 pci_unmap_addr(rx_buf, mapping),
984 bp->rx_buf_use_size,
985 PCI_DMA_FROMDEVICE);
986 skb_reserve(skb, pad);
987 skb_put(skb, len);
988
989 } else {
990 DP(NETIF_MSG_RX_ERR,
991 "ERROR packet dropped because "
992 "of alloc failure\n");
993reuse_rx:
994 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
995 goto next_rx;
996 }
997
998 skb->protocol = eth_type_trans(skb, bp->dev);
999
1000 skb->ip_summed = CHECKSUM_NONE;
1001 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1002 skb->ip_summed = CHECKSUM_UNNECESSARY;
1003
1004 /* TBD do we pass bad csum packets in promisc */
1005 }
1006
1007#ifdef BCM_VLAN
1008 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1009 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1010 && (bp->vlgrp != NULL))
1011 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1012 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1013 else
1014#endif
1015 netif_receive_skb(skb);
1016
1017 bp->dev->last_rx = jiffies;
1018
1019next_rx:
1020 rx_buf->skb = NULL;
1021
1022 bd_cons = NEXT_RX_IDX(bd_cons);
1023 bd_prod = NEXT_RX_IDX(bd_prod);
1024next_cqe:
1025 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1026 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1027 rx_pkt++;
1028
1029 if ((rx_pkt == budget))
1030 break;
1031 } /* while */
1032
1033 fp->rx_bd_cons = bd_cons;
1034 fp->rx_bd_prod = bd_prod;
1035 fp->rx_comp_cons = sw_comp_cons;
1036 fp->rx_comp_prod = sw_comp_prod;
1037
1038 REG_WR(bp, BAR_TSTRORM_INTMEM +
1039 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1040
1041 mmiowb(); /* keep prod updates ordered */
1042
1043 fp->rx_pkt += rx_pkt;
1044 fp->rx_calls++;
1045
1046 return rx_pkt;
1047}
1048
1049static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1050{
1051 struct bnx2x_fastpath *fp = fp_cookie;
1052 struct bnx2x *bp = fp->bp;
1053 struct net_device *dev = bp->dev;
1054 int index = fp->index;
1055
1056 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1057 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1058
1059#ifdef BNX2X_STOP_ON_ERROR
1060 if (unlikely(bp->panic))
1061 return IRQ_HANDLED;
1062#endif
1063
1064 prefetch(fp->rx_cons_sb);
1065 prefetch(fp->tx_cons_sb);
1066 prefetch(&fp->status_blk->c_status_block.status_block_index);
1067 prefetch(&fp->status_blk->u_status_block.status_block_index);
1068
1069 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1070 return IRQ_HANDLED;
1071}
1072
1073static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1074{
1075 struct net_device *dev = dev_instance;
1076 struct bnx2x *bp = netdev_priv(dev);
1077 u16 status = bnx2x_ack_int(bp);
1078
1079 if (unlikely(status == 0)) {
1080 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1081 return IRQ_NONE;
1082 }
1083
1084 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1085
1086#ifdef BNX2X_STOP_ON_ERROR
1087 if (unlikely(bp->panic))
1088 return IRQ_HANDLED;
1089#endif
1090
1091 /* Return here if interrupt is shared and is disabled */
1092 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1093 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1094 return IRQ_HANDLED;
1095 }
1096
1097 if (status & 0x2) {
1098 struct bnx2x_fastpath *fp = &bp->fp[0];
1099
1100 prefetch(fp->rx_cons_sb);
1101 prefetch(fp->tx_cons_sb);
1102 prefetch(&fp->status_blk->c_status_block.status_block_index);
1103 prefetch(&fp->status_blk->u_status_block.status_block_index);
1104
1105 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1106
1107 status &= ~0x2;
1108 if (!status)
1109 return IRQ_HANDLED;
1110 }
1111
1112 if (unlikely(status & 0x1)) {
1113
1114 schedule_work(&bp->sp_task);
1115
1116 status &= ~0x1;
1117 if (!status)
1118 return IRQ_HANDLED;
1119 }
1120
1121 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1122 status);
1123
1124 return IRQ_HANDLED;
1125}
1126
1127/* end of fast path */
1128
1129/* PHY/MAC */
1130
1131/*
1132 * General service functions
1133 */
1134
1135static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1136{
1137 int port = bp->port;
1138
1139 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1140 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1141 SHARED_HW_CFG_LED_MODE_SHIFT));
1142 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1143
1144 /* Set blinking rate to ~15.9Hz */
1145 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1146 LED_BLINK_RATE_VAL);
1147 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1148
1149 /* On Ax chip versions for speeds less than 10G
1150 LED scheme is different */
1151 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1152 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1153 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1154 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1155 }
1156}
1157
1158static void bnx2x_leds_unset(struct bnx2x *bp)
1159{
1160 int port = bp->port;
1161
1162 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1163 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1164}
1165
1166static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1167{
1168 u32 val = REG_RD(bp, reg);
1169
1170 val |= bits;
1171 REG_WR(bp, reg, val);
1172 return val;
1173}
1174
1175static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1176{
1177 u32 val = REG_RD(bp, reg);
1178
1179 val &= ~bits;
1180 REG_WR(bp, reg, val);
1181 return val;
1182}
1183
1184static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1185{
1186 int rc;
1187 u32 tmp, i;
1188 int port = bp->port;
1189 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1190
1191/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1192 bp->phy_addr, reg, val); */
1193
1194 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1195
1196 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1197 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1198 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1199 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1200 udelay(40);
1201 }
1202
1203 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1204 (val & EMAC_MDIO_COMM_DATA) |
1205 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1206 EMAC_MDIO_COMM_START_BUSY);
1207 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1208
1209 for (i = 0; i < 50; i++) {
1210 udelay(10);
1211
1212 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1213 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1214 udelay(5);
1215 break;
1216 }
1217 }
1218
1219 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1220 BNX2X_ERR("write phy register failed\n");
1221
1222 rc = -EBUSY;
1223 } else {
1224 rc = 0;
1225 }
1226
1227 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1228
1229 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1230 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1231 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1232 }
1233
1234 return rc;
1235}
1236
1237static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1238{
1239 int port = bp->port;
1240 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1241 u32 val, i;
1242 int rc;
1243
1244 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1245
1246 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1247 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1248 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1249 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1250 udelay(40);
1251 }
1252
1253 val = ((bp->phy_addr << 21) | (reg << 16) |
1254 EMAC_MDIO_COMM_COMMAND_READ_22 |
1255 EMAC_MDIO_COMM_START_BUSY);
1256 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1257
1258 for (i = 0; i < 50; i++) {
1259 udelay(10);
1260
1261 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1262 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1263 val &= EMAC_MDIO_COMM_DATA;
1264 break;
1265 }
1266 }
1267
1268 if (val & EMAC_MDIO_COMM_START_BUSY) {
1269 BNX2X_ERR("read phy register failed\n");
1270
1271 *ret_val = 0x0;
1272 rc = -EBUSY;
1273 } else {
1274 *ret_val = val;
1275 rc = 0;
1276 }
1277
1278 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1279
1280 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1281 val |= EMAC_MDIO_MODE_AUTO_POLL;
1282 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1283 }
1284
1285/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1286 bp->phy_addr, reg, *ret_val); */
1287
1288 return rc;
1289}
1290
1291static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1292{
1293 int rc = 0;
1294 u32 tmp, i;
1295 int port = bp->port;
1296 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1297
1298 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1299
1300 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1301 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1302 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1303 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1304 udelay(40);
1305 }
1306
1307 /* set clause 45 mode */
1308 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1309 tmp |= EMAC_MDIO_MODE_CLAUSE_45;
1310 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1311
1312 /* address */
1313 tmp = ((bp->phy_addr << 21) | (reg << 16) | addr |
1314 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1315 EMAC_MDIO_COMM_START_BUSY);
1316 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1317
1318 for (i = 0; i < 50; i++) {
1319 udelay(10);
1320
1321 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1322 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1323 udelay(5);
1324 break;
1325 }
1326 }
1327
1328 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1329 BNX2X_ERR("write phy register failed\n");
1330
1331 rc = -EBUSY;
1332 } else {
1333 /* data */
1334 tmp = ((bp->phy_addr << 21) | (reg << 16) | val |
1335 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1336 EMAC_MDIO_COMM_START_BUSY);
1337 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1338
1339 for (i = 0; i < 50; i++) {
1340 udelay(10);
1341
1342 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1343 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1344 udelay(5);
1345 break;
1346 }
1347 }
1348
1349 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1350 BNX2X_ERR("write phy register failed\n");
1351
1352 rc = -EBUSY;
1353 }
1354 }
1355
1356 /* unset clause 45 mode */
1357 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1358 tmp &= ~EMAC_MDIO_MODE_CLAUSE_45;
1359 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1360
1361 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1362
1363 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1364 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1365 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1366 }
1367
1368 return rc;
1369}
1370
1371static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr,
1372 u32 *ret_val)
1373{
1374 int port = bp->port;
1375 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1376 u32 val, i;
1377 int rc = 0;
1378
1379 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1380
1381 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1382 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1383 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1384 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1385 udelay(40);
1386 }
1387
1388 /* set clause 45 mode */
1389 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1390 val |= EMAC_MDIO_MODE_CLAUSE_45;
1391 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1392
1393 /* address */
1394 val = ((bp->phy_addr << 21) | (reg << 16) | addr |
1395 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1396 EMAC_MDIO_COMM_START_BUSY);
1397 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1398
1399 for (i = 0; i < 50; i++) {
1400 udelay(10);
1401
1402 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1403 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1404 udelay(5);
1405 break;
1406 }
1407 }
1408
1409 if (val & EMAC_MDIO_COMM_START_BUSY) {
1410 BNX2X_ERR("read phy register failed\n");
1411
1412 *ret_val = 0;
1413 rc = -EBUSY;
1414 } else {
1415 /* data */
1416 val = ((bp->phy_addr << 21) | (reg << 16) |
1417 EMAC_MDIO_COMM_COMMAND_READ_45 |
1418 EMAC_MDIO_COMM_START_BUSY);
1419 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1420
1421 for (i = 0; i < 50; i++) {
1422 udelay(10);
1423
1424 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1425 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1426 val &= EMAC_MDIO_COMM_DATA;
1427 break;
1428 }
1429 }
1430
1431 if (val & EMAC_MDIO_COMM_START_BUSY) {
1432 BNX2X_ERR("read phy register failed\n");
1433
1434 val = 0;
1435 rc = -EBUSY;
1436 }
1437
1438 *ret_val = val;
1439 }
1440
1441 /* unset clause 45 mode */
1442 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1443 val &= ~EMAC_MDIO_MODE_CLAUSE_45;
1444 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1445
1446 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1447
1448 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1449 val |= EMAC_MDIO_MODE_AUTO_POLL;
1450 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1451 }
1452
1453 return rc;
1454}
1455
1456static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1457{
1458 int i;
1459 u32 rd_val;
1460
1461 might_sleep();
1462 for (i = 0; i < 10; i++) {
1463 bnx2x_mdio45_write(bp, reg, addr, val);
1464 msleep(5);
1465 bnx2x_mdio45_read(bp, reg, addr, &rd_val);
1466 /* if the read value is not the same as the value we wrote,
1467 we should write it again */
1468 if (rd_val == val)
1469 return 0;
1470 }
1471 BNX2X_ERR("MDIO write in CL45 failed\n");
1472 return -EBUSY;
1473}
1474
1475/*
c14423fe 1476 * link management
a2fbb9ea
ET
1477 */
1478
1479static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1480{
1481 u32 ld_pause; /* local driver */
1482 u32 lp_pause; /* link partner */
1483 u32 pause_result;
1484
1485 bp->flow_ctrl = 0;
1486
c14423fe 1487 /* resolve from gp_status in case of AN complete and not sgmii */
a2fbb9ea
ET
1488 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1489 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1490 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1491 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1492
1493 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1494 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1495 &ld_pause);
1496 bnx2x_mdio22_read(bp,
1497 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1498 &lp_pause);
1499 pause_result = (ld_pause &
1500 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1501 pause_result |= (lp_pause &
1502 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1503 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1504
1505 switch (pause_result) { /* ASYM P ASYM P */
1506 case 0xb: /* 1 0 1 1 */
1507 bp->flow_ctrl = FLOW_CTRL_TX;
1508 break;
1509
1510 case 0xe: /* 1 1 1 0 */
1511 bp->flow_ctrl = FLOW_CTRL_RX;
1512 break;
1513
1514 case 0x5: /* 0 1 0 1 */
1515 case 0x7: /* 0 1 1 1 */
1516 case 0xd: /* 1 1 0 1 */
1517 case 0xf: /* 1 1 1 1 */
1518 bp->flow_ctrl = FLOW_CTRL_BOTH;
1519 break;
1520
1521 default:
1522 break;
1523 }
1524
1525 } else { /* forced mode */
1526 switch (bp->req_flow_ctrl) {
1527 case FLOW_CTRL_AUTO:
1528 if (bp->dev->mtu <= 4500)
1529 bp->flow_ctrl = FLOW_CTRL_BOTH;
1530 else
1531 bp->flow_ctrl = FLOW_CTRL_TX;
1532 break;
1533
1534 case FLOW_CTRL_TX:
1535 case FLOW_CTRL_RX:
1536 case FLOW_CTRL_BOTH:
1537 bp->flow_ctrl = bp->req_flow_ctrl;
1538 break;
1539
1540 case FLOW_CTRL_NONE:
1541 default:
1542 break;
1543 }
1544 }
1545 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1546}
1547
1548static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1549{
1550 bp->link_status = 0;
1551
1552 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1553 DP(NETIF_MSG_LINK, "link up\n");
1554
1555 bp->link_up = 1;
1556 bp->link_status |= LINK_STATUS_LINK_UP;
1557
1558 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1559 bp->duplex = DUPLEX_FULL;
1560 else
1561 bp->duplex = DUPLEX_HALF;
1562
1563 bnx2x_flow_ctrl_resolve(bp, gp_status);
1564
1565 switch (gp_status & GP_STATUS_SPEED_MASK) {
1566 case GP_STATUS_10M:
1567 bp->line_speed = SPEED_10;
1568 if (bp->duplex == DUPLEX_FULL)
1569 bp->link_status |= LINK_10TFD;
1570 else
1571 bp->link_status |= LINK_10THD;
1572 break;
1573
1574 case GP_STATUS_100M:
1575 bp->line_speed = SPEED_100;
1576 if (bp->duplex == DUPLEX_FULL)
1577 bp->link_status |= LINK_100TXFD;
1578 else
1579 bp->link_status |= LINK_100TXHD;
1580 break;
1581
1582 case GP_STATUS_1G:
1583 case GP_STATUS_1G_KX:
1584 bp->line_speed = SPEED_1000;
1585 if (bp->duplex == DUPLEX_FULL)
1586 bp->link_status |= LINK_1000TFD;
1587 else
1588 bp->link_status |= LINK_1000THD;
1589 break;
1590
1591 case GP_STATUS_2_5G:
1592 bp->line_speed = SPEED_2500;
1593 if (bp->duplex == DUPLEX_FULL)
1594 bp->link_status |= LINK_2500TFD;
1595 else
1596 bp->link_status |= LINK_2500THD;
1597 break;
1598
1599 case GP_STATUS_5G:
1600 case GP_STATUS_6G:
1601 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1602 gp_status);
1603 break;
1604
1605 case GP_STATUS_10G_KX4:
1606 case GP_STATUS_10G_HIG:
1607 case GP_STATUS_10G_CX4:
1608 bp->line_speed = SPEED_10000;
1609 bp->link_status |= LINK_10GTFD;
1610 break;
1611
1612 case GP_STATUS_12G_HIG:
1613 bp->line_speed = SPEED_12000;
1614 bp->link_status |= LINK_12GTFD;
1615 break;
1616
1617 case GP_STATUS_12_5G:
1618 bp->line_speed = SPEED_12500;
1619 bp->link_status |= LINK_12_5GTFD;
1620 break;
1621
1622 case GP_STATUS_13G:
1623 bp->line_speed = SPEED_13000;
1624 bp->link_status |= LINK_13GTFD;
1625 break;
1626
1627 case GP_STATUS_15G:
1628 bp->line_speed = SPEED_15000;
1629 bp->link_status |= LINK_15GTFD;
1630 break;
1631
1632 case GP_STATUS_16G:
1633 bp->line_speed = SPEED_16000;
1634 bp->link_status |= LINK_16GTFD;
1635 break;
1636
1637 default:
1638 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1639 gp_status);
1640 break;
1641 }
1642
1643 bp->link_status |= LINK_STATUS_SERDES_LINK;
1644
1645 if (bp->req_autoneg & AUTONEG_SPEED) {
1646 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1647
1648 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1649 bp->link_status |=
1650 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1651
1652 if (bp->autoneg & AUTONEG_PARALLEL)
1653 bp->link_status |=
1654 LINK_STATUS_PARALLEL_DETECTION_USED;
1655 }
1656
1657 if (bp->flow_ctrl & FLOW_CTRL_TX)
1658 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1659
1660 if (bp->flow_ctrl & FLOW_CTRL_RX)
1661 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1662
1663 } else { /* link_down */
1664 DP(NETIF_MSG_LINK, "link down\n");
1665
1666 bp->link_up = 0;
1667
1668 bp->line_speed = 0;
1669 bp->duplex = DUPLEX_FULL;
1670 bp->flow_ctrl = 0;
1671 }
1672
1673 DP(NETIF_MSG_LINK, "gp_status 0x%x link_up %d\n"
1674 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1675 " link_status 0x%x\n",
1676 gp_status, bp->link_up, bp->line_speed, bp->duplex, bp->flow_ctrl,
1677 bp->link_status);
1678}
1679
1680static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1681{
1682 int port = bp->port;
1683
1684 /* first reset all status
c14423fe 1685 * we assume only one line will be change at a time */
a2fbb9ea
ET
1686 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1687 (NIG_XGXS0_LINK_STATUS |
1688 NIG_SERDES0_LINK_STATUS |
1689 NIG_STATUS_INTERRUPT_XGXS0_LINK10G));
1690 if (bp->link_up) {
1691 if (is_10g) {
1692 /* Disable the 10G link interrupt
1693 * by writing 1 to the status register
1694 */
1695 DP(NETIF_MSG_LINK, "10G XGXS link up\n");
1696 bnx2x_bits_en(bp,
1697 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1698 NIG_STATUS_INTERRUPT_XGXS0_LINK10G);
1699
1700 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1701 /* Disable the link interrupt
1702 * by writing 1 to the relevant lane
1703 * in the status register
1704 */
1705 DP(NETIF_MSG_LINK, "1G XGXS link up\n");
1706 bnx2x_bits_en(bp,
1707 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1708 ((1 << bp->ser_lane) <<
1709 NIG_XGXS0_LINK_STATUS_SIZE));
1710
1711 } else { /* SerDes */
1712 DP(NETIF_MSG_LINK, "SerDes link up\n");
1713 /* Disable the link interrupt
1714 * by writing 1 to the status register
1715 */
1716 bnx2x_bits_en(bp,
1717 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1718 NIG_SERDES0_LINK_STATUS);
1719 }
1720
1721 } else { /* link_down */
1722 }
1723}
1724
1725static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1726{
1727 u32 ext_phy_type;
1728 u32 ext_phy_addr;
1729 u32 local_phy;
1730 u32 val = 0;
1731 u32 rx_sd, pcs_status;
1732
1733 if (bp->phy_flags & PHY_XGXS_FLAG) {
1734 local_phy = bp->phy_addr;
1735 ext_phy_addr = ((bp->ext_phy_config &
1736 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1737 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1738 bp->phy_addr = (u8)ext_phy_addr;
1739
1740 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1741 switch (ext_phy_type) {
1742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1743 DP(NETIF_MSG_LINK, "XGXS Direct\n");
1744 val = 1;
1745 break;
1746
1747 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1748 DP(NETIF_MSG_LINK, "XGXS 8705\n");
1749 bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
1750 EXT_PHY_OPT_LASI_STATUS, &val);
1751 DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
1752
1753 bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
1754 EXT_PHY_OPT_LASI_STATUS, &val);
1755 DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
1756
1757 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1758 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1759 val = (rx_sd & 0x1);
1760 break;
1761
1762 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
1763 DP(NETIF_MSG_LINK, "XGXS 8706\n");
1764 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1765 EXT_PHY_OPT_LASI_STATUS, &val);
1766 DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
1767
1768 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1769 EXT_PHY_OPT_LASI_STATUS, &val);
1770 DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
1771
1772 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1773 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1774 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PCS_DEVAD,
1775 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
1776 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
1777 " pcs_status 0x%x\n", rx_sd, pcs_status);
1778 /* link is up if both bit 0 of pmd_rx and
1779 * bit 0 of pcs_status are set
1780 */
1781 val = (rx_sd & pcs_status);
1782 break;
1783
1784 default:
1785 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
1786 bp->ext_phy_config);
1787 val = 0;
1788 break;
1789 }
1790 bp->phy_addr = local_phy;
1791
1792 } else { /* SerDes */
1793 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
1794 switch (ext_phy_type) {
1795 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
1796 DP(NETIF_MSG_LINK, "SerDes Direct\n");
1797 val = 1;
1798 break;
1799
1800 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
1801 DP(NETIF_MSG_LINK, "SerDes 5482\n");
1802 val = 1;
1803 break;
1804
1805 default:
1806 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
1807 bp->ext_phy_config);
1808 val = 0;
1809 break;
1810 }
1811 }
1812
1813 return val;
1814}
1815
1816static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
1817{
1818 int port = bp->port;
1819 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
1820 NIG_REG_INGRESS_BMAC0_MEM;
1821 u32 wb_write[2];
1822 u32 val;
1823
c14423fe 1824 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
a2fbb9ea
ET
1825 /* reset and unreset the BigMac */
1826 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1827 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1828 msleep(5);
1829 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1830 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1831
1832 /* enable access for bmac registers */
1833 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
1834
1835 /* XGXS control */
1836 wb_write[0] = 0x3c;
1837 wb_write[1] = 0;
1838 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
1839 wb_write, 2);
1840
1841 /* tx MAC SA */
1842 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
1843 (bp->dev->dev_addr[3] << 16) |
1844 (bp->dev->dev_addr[4] << 8) |
1845 bp->dev->dev_addr[5]);
1846 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
1847 bp->dev->dev_addr[1]);
1848 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
1849 wb_write, 2);
1850
1851 /* tx control */
1852 val = 0xc0;
1853 if (bp->flow_ctrl & FLOW_CTRL_TX)
1854 val |= 0x800000;
1855 wb_write[0] = val;
1856 wb_write[1] = 0;
1857 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
1858
1859 /* set tx mtu */
1860 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
1861 wb_write[1] = 0;
1862 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
1863
1864 /* mac control */
1865 val = 0x3;
1866 if (is_lb) {
1867 val |= 0x4;
1868 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
1869 }
1870 wb_write[0] = val;
1871 wb_write[1] = 0;
1872 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
1873 wb_write, 2);
1874
1875 /* rx control set to don't strip crc */
1876 val = 0x14;
1877 if (bp->flow_ctrl & FLOW_CTRL_RX)
1878 val |= 0x20;
1879 wb_write[0] = val;
1880 wb_write[1] = 0;
1881 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
1882
1883 /* set rx mtu */
1884 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1885 wb_write[1] = 0;
1886 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
1887
1888 /* set cnt max size */
1889 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
1890 wb_write[1] = 0;
1891 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
1892 wb_write, 2);
1893
1894 /* configure safc */
1895 wb_write[0] = 0x1000200;
1896 wb_write[1] = 0;
1897 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
1898 wb_write, 2);
1899
1900 /* fix for emulation */
1901 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
1902 wb_write[0] = 0xf000;
1903 wb_write[1] = 0;
1904 REG_WR_DMAE(bp,
1905 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
1906 wb_write, 2);
1907 }
1908
1909 /* reset old bmac stats */
1910 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
1911
1912 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
1913
1914 /* select XGXS */
1915 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
1916 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
1917
1918 /* disable the NIG in/out to the emac */
1919 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
1920 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
1921 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
1922
1923 /* enable the NIG in/out to the bmac */
1924 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
1925
1926 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
1927 val = 0;
1928 if (bp->flow_ctrl & FLOW_CTRL_TX)
1929 val = 1;
1930 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
1931 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
1932
1933 bp->phy_flags |= PHY_BMAC_FLAG;
1934
1935 bp->stats_state = STATS_STATE_ENABLE;
1936}
1937
1938static void bnx2x_emac_enable(struct bnx2x *bp)
1939{
1940 int port = bp->port;
1941 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1942 u32 val;
1943 int timeout;
1944
c14423fe 1945 DP(NETIF_MSG_LINK, "enabling EMAC\n");
a2fbb9ea
ET
1946 /* reset and unreset the emac core */
1947 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1948 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
1949 msleep(5);
1950 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1951 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
1952
1953 /* enable emac and not bmac */
1954 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
1955
1956 /* for paladium */
1957 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
1958 /* Use lane 1 (of lanes 0-3) */
1959 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
1960 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
1961 }
1962 /* for fpga */
1963 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
1964 /* Use lane 1 (of lanes 0-3) */
1965 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
1966 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
1967 }
1968 /* ASIC */
1969 else {
1970 if (bp->phy_flags & PHY_XGXS_FLAG) {
1971 DP(NETIF_MSG_LINK, "XGXS\n");
1972 /* select the master lanes (out of 0-3) */
1973 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
1974 bp->ser_lane);
1975 /* select XGXS */
1976 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
1977
1978 } else { /* SerDes */
1979 DP(NETIF_MSG_LINK, "SerDes\n");
1980 /* select SerDes */
1981 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
1982 }
1983 }
1984
1985 /* enable emac */
1986 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
1987
1988 /* init emac - use read-modify-write */
1989 /* self clear reset */
1990 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1991 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
1992
1993 timeout = 200;
1994 while (val & EMAC_MODE_RESET) {
1995 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1996 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
1997 if (!timeout) {
1998 BNX2X_ERR("EMAC timeout!\n");
1999 break;
2000 }
2001 timeout--;
2002 }
2003
2004 /* reset tx part */
2005 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2006
2007 timeout = 200;
2008 while (val & EMAC_TX_MODE_RESET) {
2009 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2010 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2011 if (!timeout) {
2012 BNX2X_ERR("EMAC timeout!\n");
2013 break;
2014 }
2015 timeout--;
2016 }
2017
2018 if (CHIP_REV_IS_SLOW(bp)) {
2019 /* config GMII mode */
2020 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2021 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2022
2023 } else { /* ASIC */
2024 /* pause enable/disable */
2025 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2026 EMAC_RX_MODE_FLOW_EN);
2027 if (bp->flow_ctrl & FLOW_CTRL_RX)
2028 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2029 EMAC_RX_MODE_FLOW_EN);
2030
2031 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2032 EMAC_TX_MODE_EXT_PAUSE_EN);
2033 if (bp->flow_ctrl & FLOW_CTRL_TX)
2034 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2035 EMAC_TX_MODE_EXT_PAUSE_EN);
2036 }
2037
c14423fe 2038 /* KEEP_VLAN_TAG, promiscuous */
a2fbb9ea
ET
2039 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2040 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2041 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2042
2043 /* identify magic packets */
2044 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2045 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2046
2047 /* enable emac for jumbo packets */
2048 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2049 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2050 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2051
2052 /* strip CRC */
2053 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2054
2055 val = ((bp->dev->dev_addr[0] << 8) |
2056 bp->dev->dev_addr[1]);
2057 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2058
2059 val = ((bp->dev->dev_addr[2] << 24) |
2060 (bp->dev->dev_addr[3] << 16) |
2061 (bp->dev->dev_addr[4] << 8) |
2062 bp->dev->dev_addr[5]);
2063 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2064
2065 /* disable the NIG in/out to the bmac */
2066 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2067 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2068 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2069
2070 /* enable the NIG in/out to the emac */
2071 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2072 val = 0;
2073 if (bp->flow_ctrl & FLOW_CTRL_TX)
2074 val = 1;
2075 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2076 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2077
2078 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2079 /* take the BigMac out of reset */
2080 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2081 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2082
2083 /* enable access for bmac registers */
2084 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2085 }
2086
2087 bp->phy_flags |= PHY_EMAC_FLAG;
2088
2089 bp->stats_state = STATS_STATE_ENABLE;
2090}
2091
2092static void bnx2x_emac_program(struct bnx2x *bp)
2093{
2094 u16 mode = 0;
2095 int port = bp->port;
2096
2097 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2098 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2099 (EMAC_MODE_25G_MODE |
2100 EMAC_MODE_PORT_MII_10M |
2101 EMAC_MODE_HALF_DUPLEX));
2102 switch (bp->line_speed) {
2103 case SPEED_10:
2104 mode |= EMAC_MODE_PORT_MII_10M;
2105 break;
2106
2107 case SPEED_100:
2108 mode |= EMAC_MODE_PORT_MII;
2109 break;
2110
2111 case SPEED_1000:
2112 mode |= EMAC_MODE_PORT_GMII;
2113 break;
2114
2115 case SPEED_2500:
2116 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2117 break;
2118
2119 default:
2120 /* 10G not valid for EMAC */
2121 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2122 break;
2123 }
2124
2125 if (bp->duplex == DUPLEX_HALF)
2126 mode |= EMAC_MODE_HALF_DUPLEX;
2127 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2128 mode);
2129
2130 bnx2x_leds_set(bp, bp->line_speed);
2131}
2132
2133static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2134{
2135 u32 lp_up2;
2136 u32 tx_driver;
2137
2138 /* read precomp */
2139 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2140 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2141
2142 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2143 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2144
2145 /* bits [10:7] at lp_up2, positioned at [15:12] */
2146 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2147 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2148 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2149
2150 if ((lp_up2 != 0) &&
2151 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2152 /* replace tx_driver bits [15:12] */
2153 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2154 tx_driver |= lp_up2;
2155 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2156 }
2157}
2158
2159static void bnx2x_pbf_update(struct bnx2x *bp)
2160{
2161 int port = bp->port;
2162 u32 init_crd, crd;
2163 u32 count = 1000;
2164 u32 pause = 0;
2165
a2fbb9ea
ET
2166 /* disable port */
2167 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2168
2169 /* wait for init credit */
2170 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2171 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2172 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2173
2174 while ((init_crd != crd) && count) {
2175 msleep(5);
2176
2177 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2178 count--;
2179 }
2180 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2181 if (init_crd != crd)
2182 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2183
2184 if (bp->flow_ctrl & FLOW_CTRL_RX)
2185 pause = 1;
2186 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2187 if (pause) {
2188 /* update threshold */
2189 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2190 /* update init credit */
2191 init_crd = 778; /* (800-18-4) */
2192
2193 } else {
2194 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2195
2196 /* update threshold */
2197 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2198 /* update init credit */
2199 switch (bp->line_speed) {
2200 case SPEED_10:
2201 case SPEED_100:
2202 case SPEED_1000:
2203 init_crd = thresh + 55 - 22;
2204 break;
2205
2206 case SPEED_2500:
2207 init_crd = thresh + 138 - 22;
2208 break;
2209
2210 case SPEED_10000:
2211 init_crd = thresh + 553 - 22;
2212 break;
2213
2214 default:
2215 BNX2X_ERR("Invalid line_speed 0x%x\n",
2216 bp->line_speed);
2217 break;
2218 }
2219 }
2220 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2221 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2222 bp->line_speed, init_crd);
2223
2224 /* probe the credit changes */
2225 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2226 msleep(5);
2227 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2228
2229 /* enable port */
2230 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2231}
2232
2233static void bnx2x_update_mng(struct bnx2x *bp)
2234{
2235 if (!nomcp)
2236 SHMEM_WR(bp, drv_fw_mb[bp->port].link_status,
2237 bp->link_status);
2238}
2239
2240static void bnx2x_link_report(struct bnx2x *bp)
2241{
2242 if (bp->link_up) {
2243 netif_carrier_on(bp->dev);
2244 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2245
2246 printk("%d Mbps ", bp->line_speed);
2247
2248 if (bp->duplex == DUPLEX_FULL)
2249 printk("full duplex");
2250 else
2251 printk("half duplex");
2252
2253 if (bp->flow_ctrl) {
2254 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2255 printk(", receive ");
2256 if (bp->flow_ctrl & FLOW_CTRL_TX)
2257 printk("& transmit ");
2258 } else {
2259 printk(", transmit ");
2260 }
2261 printk("flow control ON");
2262 }
2263 printk("\n");
2264
2265 } else { /* link_down */
2266 netif_carrier_off(bp->dev);
2267 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2268 }
2269}
2270
2271static void bnx2x_link_up(struct bnx2x *bp)
2272{
2273 int port = bp->port;
2274
2275 /* PBF - link up */
2276 bnx2x_pbf_update(bp);
2277
2278 /* disable drain */
2279 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2280
2281 /* update shared memory */
2282 bnx2x_update_mng(bp);
2283
2284 /* indicate link up */
2285 bnx2x_link_report(bp);
2286}
2287
2288static void bnx2x_link_down(struct bnx2x *bp)
2289{
2290 int port = bp->port;
2291
2292 /* notify stats */
2293 if (bp->stats_state != STATS_STATE_DISABLE) {
2294 bp->stats_state = STATS_STATE_STOP;
2295 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2296 }
2297
2298 /* indicate link down */
2299 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2300
2301 /* reset BigMac */
2302 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2303 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2304
2305 /* ignore drain flag interrupt */
2306 /* activate nig drain */
2307 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2308
2309 /* update shared memory */
2310 bnx2x_update_mng(bp);
2311
2312 /* indicate link down */
2313 bnx2x_link_report(bp);
2314}
2315
2316static void bnx2x_init_mac_stats(struct bnx2x *bp);
2317
2318/* This function is called upon link interrupt */
2319static void bnx2x_link_update(struct bnx2x *bp)
2320{
2321 u32 gp_status;
2322 int port = bp->port;
2323 int i;
2324 int link_10g;
2325
2326 DP(NETIF_MSG_LINK, "port %x, is xgxs %x, stat_mask 0x%x,"
2327 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2328 " 10G %x, XGXS_LINK %x\n", port, (bp->phy_flags & PHY_XGXS_FLAG),
2329 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2330 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2331 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2332 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2333 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2334 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2335 );
2336
2337 might_sleep();
2338 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2339 /* avoid fast toggling */
2340 for (i = 0 ; i < 10 ; i++) {
2341 msleep(10);
2342 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2343 &gp_status);
2344 }
2345
2346 bnx2x_link_settings_status(bp, gp_status);
2347
2348 /* anything 10 and over uses the bmac */
2349 link_10g = ((bp->line_speed >= SPEED_10000) &&
2350 (bp->line_speed <= SPEED_16000));
2351
2352 bnx2x_link_int_ack(bp, link_10g);
2353
2354 /* link is up only if both local phy and external phy are up */
2355 if (bp->link_up && bnx2x_ext_phy_is_link_up(bp)) {
2356 if (link_10g) {
2357 bnx2x_bmac_enable(bp, 0);
2358 bnx2x_leds_set(bp, SPEED_10000);
2359
2360 } else {
2361 bnx2x_emac_enable(bp);
2362 bnx2x_emac_program(bp);
2363
2364 /* AN complete? */
2365 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2366 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2367 bnx2x_set_sgmii_tx_driver(bp);
2368 }
2369 }
2370 bnx2x_link_up(bp);
2371
2372 } else { /* link down */
2373 bnx2x_leds_unset(bp);
2374 bnx2x_link_down(bp);
2375 }
2376
2377 bnx2x_init_mac_stats(bp);
2378}
2379
2380/*
2381 * Init service functions
2382 */
2383
2384static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2385{
2386 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2387 (bp->phy_addr + bp->ser_lane) : 0;
2388
2389 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2390 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2391}
2392
2393static void bnx2x_set_master_ln(struct bnx2x *bp)
2394{
2395 u32 new_master_ln;
2396
2397 /* set the master_ln for AN */
2398 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2399 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2400 &new_master_ln);
2401 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2402 (new_master_ln | bp->ser_lane));
2403}
2404
2405static void bnx2x_reset_unicore(struct bnx2x *bp)
2406{
2407 u32 mii_control;
2408 int i;
2409
2410 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2411 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2412 /* reset the unicore */
2413 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2414 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2415
2416 /* wait for the reset to self clear */
2417 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2418 udelay(5);
2419
2420 /* the reset erased the previous bank value */
2421 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2422 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2423 &mii_control);
2424
2425 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2426 udelay(5);
2427 return;
2428 }
2429 }
2430
2431 BNX2X_ERR("BUG! unicore is still in reset!\n");
2432}
2433
2434static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2435{
2436 /* Each two bits represents a lane number:
2437 No swap is 0123 => 0x1b no need to enable the swap */
2438
2439 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2440 if (bp->rx_lane_swap != 0x1b) {
2441 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2442 (bp->rx_lane_swap |
2443 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2444 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2445 } else {
2446 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2447 }
2448
2449 if (bp->tx_lane_swap != 0x1b) {
2450 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2451 (bp->tx_lane_swap |
2452 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2453 } else {
2454 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2455 }
2456}
2457
2458static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2459{
2460 u32 control2;
2461
2462 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2463 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2464 &control2);
2465
2466 if (bp->autoneg & AUTONEG_PARALLEL) {
2467 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2468 } else {
2469 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2470 }
2471 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2472 control2);
2473
2474 if (bp->phy_flags & PHY_XGXS_FLAG) {
2475 DP(NETIF_MSG_LINK, "XGXS\n");
2476 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2477
2478 bnx2x_mdio22_write(bp,
2479 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2480 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2481
2482 bnx2x_mdio22_read(bp,
2483 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2484 &control2);
2485
2486 if (bp->autoneg & AUTONEG_PARALLEL) {
2487 control2 |=
2488 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2489 } else {
2490 control2 &=
2491 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2492 }
2493 bnx2x_mdio22_write(bp,
2494 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2495 control2);
2496 }
2497}
2498
2499static void bnx2x_set_autoneg(struct bnx2x *bp)
2500{
2501 u32 reg_val;
2502
2503 /* CL37 Autoneg */
2504 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2505 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2506 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2507 (bp->autoneg & AUTONEG_CL37)) {
2508 /* CL37 Autoneg Enabled */
2509 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2510 } else {
2511 /* CL37 Autoneg Disabled */
2512 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2513 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2514 }
2515 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2516
2517 /* Enable/Disable Autodetection */
2518 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2519 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2520 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2521
2522 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2523 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2524 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2525 } else {
2526 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2527 }
2528 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2529
2530 /* Enable TetonII and BAM autoneg */
2531 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2532 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2533 &reg_val);
2534 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2535 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2536 /* Enable BAM aneg Mode and TetonII aneg Mode */
2537 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2538 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2539 } else {
2540 /* TetonII and BAM Autoneg Disabled */
2541 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2542 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2543 }
2544 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2545 reg_val);
2546
2547 /* Enable Clause 73 Aneg */
2548 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2549 (bp->autoneg & AUTONEG_CL73)) {
2550 /* Enable BAM Station Manager */
2551 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2552 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2553 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2554 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2555 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2556
2557 /* Merge CL73 and CL37 aneg resolution */
2558 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2559 &reg_val);
2560 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2561 (reg_val |
2562 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2563
2564 /* Set the CL73 AN speed */
2565 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2566 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2567 /* In the SerDes we support only the 1G.
2568 In the XGXS we support the 10G KX4
2569 but we currently do not support the KR */
2570 if (bp->phy_flags & PHY_XGXS_FLAG) {
2571 DP(NETIF_MSG_LINK, "XGXS\n");
2572 /* 10G KX4 */
2573 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2574 } else {
2575 DP(NETIF_MSG_LINK, "SerDes\n");
2576 /* 1000M KX */
2577 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2578 }
2579 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2580
2581 /* CL73 Autoneg Enabled */
2582 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2583 } else {
2584 /* CL73 Autoneg Disabled */
2585 reg_val = 0;
2586 }
2587 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2588 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2589}
2590
2591/* program SerDes, forced speed */
2592static void bnx2x_program_serdes(struct bnx2x *bp)
2593{
2594 u32 reg_val;
2595
2596 /* program duplex, disable autoneg */
2597 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2598 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2599 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2600 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2601 if (bp->req_duplex == DUPLEX_FULL)
2602 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2603 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2604
2605 /* program speed
2606 - needed only if the speed is greater than 1G (2.5G or 10G) */
2607 if (bp->req_line_speed > SPEED_1000) {
2608 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2609 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2610 /* clearing the speed value before setting the right speed */
2611 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2612 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2613 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2614 if (bp->req_line_speed == SPEED_10000)
2615 reg_val |=
2616 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
2617 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
2618 }
2619}
2620
2621static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
2622{
2623 u32 val = 0;
2624
2625 /* configure the 48 bits for BAM AN */
2626 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2627
2628 /* set extended capabilities */
2629 if (bp->advertising & ADVERTISED_2500baseT_Full)
2630 val |= MDIO_OVER_1G_UP1_2_5G;
2631 if (bp->advertising & ADVERTISED_10000baseT_Full)
2632 val |= MDIO_OVER_1G_UP1_10G;
2633 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
2634
2635 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
2636}
2637
2638static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
2639{
2640 u32 an_adv;
2641
2642 /* for AN, we are always publishing full duplex */
2643 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
2644
2645 /* set pause */
2646 switch (bp->pause_mode) {
2647 case PAUSE_SYMMETRIC:
2648 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
2649 break;
2650 case PAUSE_ASYMMETRIC:
2651 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2652 break;
2653 case PAUSE_BOTH:
2654 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2655 break;
2656 case PAUSE_NONE:
2657 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
2658 break;
2659 }
2660
2661 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2662 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
2663}
2664
2665static void bnx2x_restart_autoneg(struct bnx2x *bp)
2666{
2667 if (bp->autoneg & AUTONEG_CL73) {
2668 /* enable and restart clause 73 aneg */
2669 u32 an_ctrl;
2670
2671 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2672 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2673 &an_ctrl);
2674 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2675 (an_ctrl |
2676 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
2677 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
2678
2679 } else {
2680 /* Enable and restart BAM/CL37 aneg */
2681 u32 mii_control;
2682
2683 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2684 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2685 &mii_control);
2686 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2687 (mii_control |
2688 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2689 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
2690 }
2691}
2692
2693static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
2694{
2695 u32 control1;
2696
2697 /* in SGMII mode, the unicore is always slave */
2698 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2699 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2700 &control1);
2701 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
2702 /* set sgmii mode (and not fiber) */
2703 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
2704 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
2705 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
2706 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2707 control1);
2708
2709 /* if forced speed */
2710 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
2711 /* set speed, disable autoneg */
2712 u32 mii_control;
2713
2714 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2715 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2716 &mii_control);
2717 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2718 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
2719 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
2720
2721 switch (bp->req_line_speed) {
2722 case SPEED_100:
2723 mii_control |=
2724 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
2725 break;
2726 case SPEED_1000:
2727 mii_control |=
2728 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
2729 break;
2730 case SPEED_10:
2731 /* there is nothing to set for 10M */
2732 break;
2733 default:
2734 /* invalid speed for SGMII */
2735 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
2736 bp->req_line_speed);
2737 break;
2738 }
2739
2740 /* setting the full duplex */
2741 if (bp->req_duplex == DUPLEX_FULL)
2742 mii_control |=
2743 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2744 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2745 mii_control);
2746
2747 } else { /* AN mode */
2748 /* enable and restart AN */
2749 bnx2x_restart_autoneg(bp);
2750 }
2751}
2752
2753static void bnx2x_link_int_enable(struct bnx2x *bp)
2754{
2755 int port = bp->port;
2756
2757 /* setting the status to report on link up
2758 for either XGXS or SerDes */
2759 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2760 (NIG_XGXS0_LINK_STATUS |
2761 NIG_STATUS_INTERRUPT_XGXS0_LINK10G |
2762 NIG_SERDES0_LINK_STATUS));
2763
2764 if (bp->phy_flags & PHY_XGXS_FLAG) {
2765 /* TBD -
2766 * in force mode (not AN) we can enable just the relevant
2767 * interrupt
2768 * Even in AN we might enable only one according to the AN
2769 * speed mask
2770 */
2771 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2772 (NIG_MASK_XGXS0_LINK_STATUS |
2773 NIG_MASK_XGXS0_LINK10G));
2774 DP(NETIF_MSG_LINK, "enable XGXS interrupt\n");
2775
2776 } else { /* SerDes */
2777 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2778 NIG_MASK_SERDES0_LINK_STATUS);
2779 DP(NETIF_MSG_LINK, "enable SerDes interrupt\n");
2780 }
2781}
2782
2783static void bnx2x_ext_phy_init(struct bnx2x *bp)
2784{
2785 int port = bp->port;
2786 u32 ext_phy_type;
2787 u32 ext_phy_addr;
2788 u32 local_phy;
2789
2790 if (bp->phy_flags & PHY_XGXS_FLAG) {
2791 local_phy = bp->phy_addr;
2792 ext_phy_addr = ((bp->ext_phy_config &
2793 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2794 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2795
2796 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
2797 switch (ext_phy_type) {
2798 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2799 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2800 break;
2801
2802 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2803 DP(NETIF_MSG_LINK, "XGXS 8705\n");
2804 bnx2x_bits_en(bp,
2805 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2806 NIG_MASK_MI_INT);
c14423fe 2807 DP(NETIF_MSG_LINK, "enabled external phy int\n");
a2fbb9ea
ET
2808
2809 bp->phy_addr = ext_phy_type;
2810 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2811 EXT_PHY_OPT_PMD_MISC_CNTL,
2812 0x8288);
2813 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2814 EXT_PHY_OPT_PHY_IDENTIFIER,
2815 0x7fbf);
2816 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2817 EXT_PHY_OPT_CMU_PLL_BYPASS,
2818 0x0100);
2819 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_WIS_DEVAD,
2820 EXT_PHY_OPT_LASI_CNTL, 0x1);
2821 break;
2822
2823 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2824 DP(NETIF_MSG_LINK, "XGXS 8706\n");
2825 bnx2x_bits_en(bp,
2826 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2827 NIG_MASK_MI_INT);
c14423fe 2828 DP(NETIF_MSG_LINK, "enabled external phy int\n");
a2fbb9ea
ET
2829
2830 bp->phy_addr = ext_phy_type;
2831 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2832 EXT_PHY_OPT_PMD_DIGITAL_CNT,
2833 0x400);
2834 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2835 EXT_PHY_OPT_LASI_CNTL, 0x1);
2836 break;
2837
2838 default:
2839 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2840 bp->ext_phy_config);
2841 break;
2842 }
2843 bp->phy_addr = local_phy;
2844
2845 } else { /* SerDes */
2846/* ext_phy_addr = ((bp->ext_phy_config &
2847 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
2848 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
2849*/
2850 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2851 switch (ext_phy_type) {
2852 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2853 DP(NETIF_MSG_LINK, "SerDes Direct\n");
2854 break;
2855
2856 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2857 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2858 bnx2x_bits_en(bp,
2859 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2860 NIG_MASK_MI_INT);
c14423fe 2861 DP(NETIF_MSG_LINK, "enabled external phy int\n");
a2fbb9ea
ET
2862 break;
2863
2864 default:
2865 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2866 bp->ext_phy_config);
2867 break;
2868 }
2869 }
2870}
2871
2872static void bnx2x_ext_phy_reset(struct bnx2x *bp)
2873{
2874 u32 ext_phy_type;
2875 u32 ext_phy_addr;
2876 u32 local_phy;
2877
2878 if (bp->phy_flags & PHY_XGXS_FLAG) {
2879 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
2880 switch (ext_phy_type) {
2881 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2882 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2883 break;
2884
2885 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2886 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2887 DP(NETIF_MSG_LINK, "XGXS 8705/6\n");
2888 local_phy = bp->phy_addr;
2889 ext_phy_addr = ((bp->ext_phy_config &
2890 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2891 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2892 bp->phy_addr = (u8)ext_phy_addr;
2893 bnx2x_mdio45_write(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2894 EXT_PHY_OPT_CNTL, 0xa040);
2895 bp->phy_addr = local_phy;
2896 break;
2897
2898 default:
2899 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2900 bp->ext_phy_config);
2901 break;
2902 }
2903
2904 } else { /* SerDes */
2905 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2906 switch (ext_phy_type) {
2907 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2908 DP(NETIF_MSG_LINK, "SerDes Direct\n");
2909 break;
2910
2911 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2912 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2913 break;
2914
2915 default:
2916 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2917 bp->ext_phy_config);
2918 break;
2919 }
2920 }
2921}
2922
2923static void bnx2x_link_initialize(struct bnx2x *bp)
2924{
2925 int port = bp->port;
2926
2927 /* disable attentions */
2928 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2929 (NIG_MASK_XGXS0_LINK_STATUS |
2930 NIG_MASK_XGXS0_LINK10G |
2931 NIG_MASK_SERDES0_LINK_STATUS |
2932 NIG_MASK_MI_INT));
2933
2934 bnx2x_ext_phy_reset(bp);
2935
2936 bnx2x_set_aer_mmd(bp);
2937
2938 if (bp->phy_flags & PHY_XGXS_FLAG)
2939 bnx2x_set_master_ln(bp);
2940
2941 /* reset the SerDes and wait for reset bit return low */
2942 bnx2x_reset_unicore(bp);
2943
2944 bnx2x_set_aer_mmd(bp);
2945
2946 /* setting the masterLn_def again after the reset */
2947 if (bp->phy_flags & PHY_XGXS_FLAG) {
2948 bnx2x_set_master_ln(bp);
2949 bnx2x_set_swap_lanes(bp);
2950 }
2951
2952 /* Set Parallel Detect */
2953 if (bp->req_autoneg & AUTONEG_SPEED)
2954 bnx2x_set_parallel_detection(bp);
2955
2956 if (bp->phy_flags & PHY_XGXS_FLAG) {
2957 if (bp->req_line_speed &&
2958 bp->req_line_speed < SPEED_1000) {
2959 bp->phy_flags |= PHY_SGMII_FLAG;
2960 } else {
2961 bp->phy_flags &= ~PHY_SGMII_FLAG;
2962 }
2963 }
2964
2965 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
2966 u16 bank, rx_eq;
2967
2968 rx_eq = ((bp->serdes_config &
2969 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
2970 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
2971
2972 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
2973 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
2974 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
2975 MDIO_SET_REG_BANK(bp, bank);
2976 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
2977 ((rx_eq &
2978 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
2979 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
2980 }
2981
2982 /* forced speed requested? */
2983 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
2984 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2985
2986 /* disable autoneg */
2987 bnx2x_set_autoneg(bp);
2988
2989 /* program speed and duplex */
2990 bnx2x_program_serdes(bp);
2991
2992 } else { /* AN_mode */
2993 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
2994
2995 /* AN enabled */
2996 bnx2x_set_brcm_cl37_advertisment(bp);
2997
c14423fe 2998 /* program duplex & pause advertisement (for aneg) */
a2fbb9ea
ET
2999 bnx2x_set_ieee_aneg_advertisment(bp);
3000
3001 /* enable autoneg */
3002 bnx2x_set_autoneg(bp);
3003
c14423fe 3004 /* enable and restart AN */
a2fbb9ea
ET
3005 bnx2x_restart_autoneg(bp);
3006 }
3007
3008 } else { /* SGMII mode */
3009 DP(NETIF_MSG_LINK, "SGMII\n");
3010
3011 bnx2x_initialize_sgmii_process(bp);
3012 }
3013
3014 /* enable the interrupt */
3015 bnx2x_link_int_enable(bp);
3016
3017 /* init ext phy and enable link state int */
3018 bnx2x_ext_phy_init(bp);
3019}
3020
3021static void bnx2x_phy_deassert(struct bnx2x *bp)
3022{
3023 int port = bp->port;
3024 u32 val;
3025
3026 if (bp->phy_flags & PHY_XGXS_FLAG) {
3027 DP(NETIF_MSG_LINK, "XGXS\n");
3028 val = XGXS_RESET_BITS;
3029
3030 } else { /* SerDes */
3031 DP(NETIF_MSG_LINK, "SerDes\n");
3032 val = SERDES_RESET_BITS;
3033 }
3034
3035 val = val << (port*16);
3036
3037 /* reset and unreset the SerDes/XGXS */
3038 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3039 msleep(5);
3040 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3041}
3042
3043static int bnx2x_phy_init(struct bnx2x *bp)
3044{
3045 DP(NETIF_MSG_LINK, "started\n");
3046 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3047 bp->phy_flags |= PHY_EMAC_FLAG;
3048 bp->link_up = 1;
3049 bp->line_speed = SPEED_10000;
3050 bp->duplex = DUPLEX_FULL;
3051 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3052 bnx2x_emac_enable(bp);
3053 bnx2x_link_report(bp);
3054 return 0;
3055
3056 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3057 bp->phy_flags |= PHY_BMAC_FLAG;
3058 bp->link_up = 1;
3059 bp->line_speed = SPEED_10000;
3060 bp->duplex = DUPLEX_FULL;
3061 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3062 bnx2x_bmac_enable(bp, 0);
3063 bnx2x_link_report(bp);
3064 return 0;
3065
3066 } else {
3067 bnx2x_phy_deassert(bp);
3068 bnx2x_link_initialize(bp);
3069 }
3070
3071 return 0;
3072}
3073
3074static void bnx2x_link_reset(struct bnx2x *bp)
3075{
3076 int port = bp->port;
3077
3078 /* disable attentions */
3079 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3080 (NIG_MASK_XGXS0_LINK_STATUS |
3081 NIG_MASK_XGXS0_LINK10G |
3082 NIG_MASK_SERDES0_LINK_STATUS |
3083 NIG_MASK_MI_INT));
3084
3085 bnx2x_ext_phy_reset(bp);
3086
3087 /* reset the SerDes/XGXS */
3088 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3089 (0x1ff << (port*16)));
3090
3091 /* reset EMAC / BMAC and disable NIG interfaces */
3092 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3093 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3094
3095 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3096 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3097 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3098
3099 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3100}
3101
3102#ifdef BNX2X_XGXS_LB
3103static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3104{
3105 int port = bp->port;
3106
3107 if (is_10g) {
3108 u32 md_devad;
3109
3110 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3111
3112 /* change the uni_phy_addr in the nig */
3113 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3114 &md_devad);
3115 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3116
3117 /* change the aer mmd */
3118 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3119 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3120
3121 /* config combo IEEE0 control reg for loopback */
3122 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3123 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3124 0x6041);
3125
3126 /* set aer mmd back */
3127 bnx2x_set_aer_mmd(bp);
3128
3129 /* and md_devad */
3130 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3131
3132 } else {
3133 u32 mii_control;
3134
3135 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3136
3137 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3138 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3139 &mii_control);
3140 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3141 (mii_control |
3142 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3143 }
3144}
3145#endif
3146
3147/* end of PHY/MAC */
3148
3149/* slow path */
3150
3151/*
3152 * General service functions
3153 */
3154
3155/* the slow path queue is odd since completions arrive on the fastpath ring */
3156static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3157 u32 data_hi, u32 data_lo, int common)
3158{
3159 int port = bp->port;
3160
3161 DP(NETIF_MSG_TIMER,
c14423fe 3162 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
3163 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3164 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3165 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3166
3167#ifdef BNX2X_STOP_ON_ERROR
3168 if (unlikely(bp->panic))
3169 return -EIO;
3170#endif
3171
3172 spin_lock(&bp->spq_lock);
3173
3174 if (!bp->spq_left) {
3175 BNX2X_ERR("BUG! SPQ ring full!\n");
3176 spin_unlock(&bp->spq_lock);
3177 bnx2x_panic();
3178 return -EBUSY;
3179 }
3180 /* CID needs port number to be encoded int it */
3181 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3182 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3183 HW_CID(bp, cid)));
3184 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3185 if (common)
3186 bp->spq_prod_bd->hdr.type |=
3187 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3188
3189 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3190 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3191
3192 bp->spq_left--;
3193
3194 if (bp->spq_prod_bd == bp->spq_last_bd) {
3195 bp->spq_prod_bd = bp->spq;
3196 bp->spq_prod_idx = 0;
3197 DP(NETIF_MSG_TIMER, "end of spq\n");
3198
3199 } else {
3200 bp->spq_prod_bd++;
3201 bp->spq_prod_idx++;
3202 }
3203
3204 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
3205 bp->spq_prod_idx);
3206
3207 spin_unlock(&bp->spq_lock);
3208 return 0;
3209}
3210
3211/* acquire split MCP access lock register */
3212static int bnx2x_lock_alr(struct bnx2x *bp)
3213{
3214 int rc = 0;
3215 u32 i, j, val;
3216
3217 might_sleep();
3218 i = 100;
3219 for (j = 0; j < i*10; j++) {
3220 val = (1UL << 31);
3221 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3222 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
3223 if (val & (1L << 31))
3224 break;
3225
3226 msleep(5);
3227 }
3228
3229 if (!(val & (1L << 31))) {
3230 BNX2X_ERR("Cannot acquire nvram interface\n");
3231
3232 rc = -EBUSY;
3233 }
3234
3235 return rc;
3236}
3237
3238/* Release split MCP access lock register */
3239static void bnx2x_unlock_alr(struct bnx2x *bp)
3240{
3241 u32 val = 0;
3242
3243 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3244}
3245
3246static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3247{
3248 struct host_def_status_block *def_sb = bp->def_status_blk;
3249 u16 rc = 0;
3250
3251 barrier(); /* status block is written to by the chip */
3252
3253 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3254 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3255 rc |= 1;
3256 }
3257 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
3258 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
3259 rc |= 2;
3260 }
3261 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
3262 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
3263 rc |= 4;
3264 }
3265 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
3266 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
3267 rc |= 8;
3268 }
3269 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
3270 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
3271 rc |= 16;
3272 }
3273 return rc;
3274}
3275
3276/*
3277 * slow path service functions
3278 */
3279
3280static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3281{
3282 int port = bp->port;
3283 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
3284 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3285 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3286 u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3287 NIG_REG_MASK_INTERRUPT_PORT0;
3288
3289 if (~bp->aeu_mask & (asserted & 0xff))
3290 BNX2X_ERR("IGU ERROR\n");
3291 if (bp->attn_state & asserted)
3292 BNX2X_ERR("IGU ERROR\n");
3293
3294 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3295 bp->aeu_mask, asserted);
3296 bp->aeu_mask &= ~(asserted & 0xff);
3297 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
3298
3299 REG_WR(bp, aeu_addr, bp->aeu_mask);
3300
3301 bp->attn_state |= asserted;
3302
3303 if (asserted & ATTN_HARD_WIRED_MASK) {
3304 if (asserted & ATTN_NIG_FOR_FUNC) {
3305 u32 nig_status_port;
3306 u32 nig_int_addr = port ?
3307 NIG_REG_STATUS_INTERRUPT_PORT1 :
3308 NIG_REG_STATUS_INTERRUPT_PORT0;
3309
3310 bp->nig_mask = REG_RD(bp, nig_mask_addr);
3311 REG_WR(bp, nig_mask_addr, 0);
3312
3313 nig_status_port = REG_RD(bp, nig_int_addr);
3314 bnx2x_link_update(bp);
3315
3316 /* handle unicore attn? */
3317 }
3318 if (asserted & ATTN_SW_TIMER_4_FUNC)
3319 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3320
3321 if (asserted & GPIO_2_FUNC)
3322 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3323
3324 if (asserted & GPIO_3_FUNC)
3325 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3326
3327 if (asserted & GPIO_4_FUNC)
3328 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3329
3330 if (port == 0) {
3331 if (asserted & ATTN_GENERAL_ATTN_1) {
3332 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3333 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3334 }
3335 if (asserted & ATTN_GENERAL_ATTN_2) {
3336 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3337 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3338 }
3339 if (asserted & ATTN_GENERAL_ATTN_3) {
3340 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3341 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3342 }
3343 } else {
3344 if (asserted & ATTN_GENERAL_ATTN_4) {
3345 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3346 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3347 }
3348 if (asserted & ATTN_GENERAL_ATTN_5) {
3349 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3350 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3351 }
3352 if (asserted & ATTN_GENERAL_ATTN_6) {
3353 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3354 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3355 }
3356 }
3357
3358 } /* if hardwired */
3359
3360 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
3361 asserted, BAR_IGU_INTMEM + igu_addr);
3362 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
3363
3364 /* now set back the mask */
3365 if (asserted & ATTN_NIG_FOR_FUNC)
3366 REG_WR(bp, nig_mask_addr, bp->nig_mask);
3367}
3368
3369static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3370{
3371 int port = bp->port;
3372 int index;
3373 struct attn_route attn;
3374 struct attn_route group_mask;
3375 u32 reg_addr;
3376 u32 val;
3377
3378 /* need to take HW lock because MCP or other port might also
3379 try to handle this event */
3380 bnx2x_lock_alr(bp);
3381
3382 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3383 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3384 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3385 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3386 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
3387
3388 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3389 if (deasserted & (1 << index)) {
3390 group_mask = bp->attn_group[index];
3391
3392 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
3393 (unsigned long long)group_mask.sig[0]);
3394
3395 if (attn.sig[3] & group_mask.sig[3] &
3396 EVEREST_GEN_ATTN_IN_USE_MASK) {
3397
3398 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
3399
3400 BNX2X_ERR("MC assert!\n");
3401 bnx2x_panic();
3402
3403 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
3404
3405 BNX2X_ERR("MCP assert!\n");
3406 REG_WR(bp,
3407 MISC_REG_AEU_GENERAL_ATTN_11, 0);
3408 bnx2x_mc_assert(bp);
3409
3410 } else {
3411 BNX2X_ERR("UNKOWEN HW ASSERT!\n");
3412 }
3413 }
3414
3415 if (attn.sig[1] & group_mask.sig[1] &
3416 BNX2X_DOORQ_ASSERT) {
3417
3418 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3419 BNX2X_ERR("DB hw attention 0x%x\n", val);
3420 /* DORQ discard attention */
3421 if (val & 0x2)
3422 BNX2X_ERR("FATAL error from DORQ\n");
3423 }
3424
3425 if (attn.sig[2] & group_mask.sig[2] &
3426 AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3427
3428 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3429 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3430 /* CFC error attention */
3431 if (val & 0x2)
3432 BNX2X_ERR("FATAL error from CFC\n");
3433 }
3434
3435 if (attn.sig[2] & group_mask.sig[2] &
3436 AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3437
3438 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3439 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3440 /* RQ_USDMDP_FIFO_OVERFLOW */
3441 if (val & 0x18000)
3442 BNX2X_ERR("FATAL error from PXP\n");
3443 }
3444
3445 if (attn.sig[3] & group_mask.sig[3] &
3446 EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3447
3448 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
3449 0x7ff);
3450 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
3451 attn.sig[3]);
3452 }
3453
3454 if ((attn.sig[0] & group_mask.sig[0] &
3455 HW_INTERRUT_ASSERT_SET_0) ||
3456 (attn.sig[1] & group_mask.sig[1] &
3457 HW_INTERRUT_ASSERT_SET_1) ||
3458 (attn.sig[2] & group_mask.sig[2] &
3459 HW_INTERRUT_ASSERT_SET_2))
3460 BNX2X_ERR("FATAL HW block attention\n");
3461
3462 if ((attn.sig[0] & group_mask.sig[0] &
3463 HW_PRTY_ASSERT_SET_0) ||
3464 (attn.sig[1] & group_mask.sig[1] &
3465 HW_PRTY_ASSERT_SET_1) ||
3466 (attn.sig[2] & group_mask.sig[2] &
3467 HW_PRTY_ASSERT_SET_2))
c14423fe 3468 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3469 }
3470 }
3471
3472 bnx2x_unlock_alr(bp);
3473
3474 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
3475
3476 val = ~deasserted;
3477/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
3478 val, BAR_IGU_INTMEM + reg_addr); */
3479 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
3480
3481 if (bp->aeu_mask & (deasserted & 0xff))
3482 BNX2X_ERR("IGU BUG\n");
3483 if (~bp->attn_state & deasserted)
3484 BNX2X_ERR("IGU BUG\n");
3485
3486 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3487 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3488
3489 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
3490 bp->aeu_mask |= (deasserted & 0xff);
3491
3492 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
3493 REG_WR(bp, reg_addr, bp->aeu_mask);
3494
3495 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3496 bp->attn_state &= ~deasserted;
3497 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3498}
3499
3500static void bnx2x_attn_int(struct bnx2x *bp)
3501{
3502 /* read local copy of bits */
3503 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
3504 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
3505 u32 attn_state = bp->attn_state;
3506
3507 /* look for changed bits */
3508 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3509 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3510
3511 DP(NETIF_MSG_HW,
3512 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3513 attn_bits, attn_ack, asserted, deasserted);
3514
3515 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3516 BNX2X_ERR("bad attention state\n");
3517
3518 /* handle bits that were raised */
3519 if (asserted)
3520 bnx2x_attn_int_asserted(bp, asserted);
3521
3522 if (deasserted)
3523 bnx2x_attn_int_deasserted(bp, deasserted);
3524}
3525
3526static void bnx2x_sp_task(struct work_struct *work)
3527{
3528 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
3529 u16 status;
3530
3531 /* Return here if interrupt is disabled */
3532 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3533 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3534 return;
3535 }
3536
3537 status = bnx2x_update_dsb_idx(bp);
3538 if (status == 0)
3539 BNX2X_ERR("spurious slowpath interrupt!\n");
3540
3541 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3542
3543 if (status & 0x1) {
3544 /* HW attentions */
3545 bnx2x_attn_int(bp);
3546 }
3547
3548 /* CStorm events: query_stats, cfc delete ramrods */
3549 if (status & 0x2)
3550 bp->stat_pending = 0;
3551
3552 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
3553 IGU_INT_NOP, 1);
3554 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3555 IGU_INT_NOP, 1);
3556 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3557 IGU_INT_NOP, 1);
3558 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3559 IGU_INT_NOP, 1);
3560 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3561 IGU_INT_ENABLE, 1);
3562}
3563
3564static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3565{
3566 struct net_device *dev = dev_instance;
3567 struct bnx2x *bp = netdev_priv(dev);
3568
3569 /* Return here if interrupt is disabled */
3570 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3571 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3572 return IRQ_HANDLED;
3573 }
3574
3575 bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
3576
3577#ifdef BNX2X_STOP_ON_ERROR
3578 if (unlikely(bp->panic))
3579 return IRQ_HANDLED;
3580#endif
3581
3582 schedule_work(&bp->sp_task);
3583
3584 return IRQ_HANDLED;
3585}
3586
3587/* end of slow path */
3588
3589/* Statistics */
3590
3591/****************************************************************************
3592* Macros
3593****************************************************************************/
3594
3595#define UPDATE_STAT(s, t) \
3596 do { \
3597 estats->t += new->s - old->s; \
3598 old->s = new->s; \
3599 } while (0)
3600
3601/* sum[hi:lo] += add[hi:lo] */
3602#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3603 do { \
3604 s_lo += a_lo; \
3605 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
3606 } while (0)
3607
3608/* difference = minuend - subtrahend */
3609#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3610 do { \
3611 if (m_lo < s_lo) { /* underflow */ \
3612 d_hi = m_hi - s_hi; \
3613 if (d_hi > 0) { /* we can 'loan' 1 */ \
3614 d_hi--; \
3615 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3616 } else { /* m_hi <= s_hi */ \
3617 d_hi = 0; \
3618 d_lo = 0; \
3619 } \
3620 } else { /* m_lo >= s_lo */ \
3621 if (m_hi < s_hi) { \
3622 d_hi = 0; \
3623 d_lo = 0; \
3624 } else { /* m_hi >= s_hi */ \
3625 d_hi = m_hi - s_hi; \
3626 d_lo = m_lo - s_lo; \
3627 } \
3628 } \
3629 } while (0)
3630
3631/* minuend -= subtrahend */
3632#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3633 do { \
3634 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3635 } while (0)
3636
3637#define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
3638 do { \
3639 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
3640 diff.lo, new->s_lo, old->s_lo); \
3641 old->s_hi = new->s_hi; \
3642 old->s_lo = new->s_lo; \
3643 ADD_64(estats->t_hi, diff.hi, \
3644 estats->t_lo, diff.lo); \
3645 } while (0)
3646
3647/* sum[hi:lo] += add */
3648#define ADD_EXTEND_64(s_hi, s_lo, a) \
3649 do { \
3650 s_lo += a; \
3651 s_hi += (s_lo < a) ? 1 : 0; \
3652 } while (0)
3653
3654#define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
3655 do { \
3656 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
3657 } while (0)
3658
3659#define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
3660 do { \
3661 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3662 old_tclient->s = le32_to_cpu(tclient->s); \
3663 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
3664 } while (0)
3665
3666/*
3667 * General service functions
3668 */
3669
3670static inline long bnx2x_hilo(u32 *hiref)
3671{
3672 u32 lo = *(hiref + 1);
3673#if (BITS_PER_LONG == 64)
3674 u32 hi = *hiref;
3675
3676 return HILO_U64(hi, lo);
3677#else
3678 return lo;
3679#endif
3680}
3681
3682/*
3683 * Init service functions
3684 */
3685
3686static void bnx2x_init_mac_stats(struct bnx2x *bp)
3687{
3688 struct dmae_command *dmae;
3689 int port = bp->port;
3690 int loader_idx = port * 8;
3691 u32 opcode;
3692 u32 mac_addr;
3693
3694 bp->executer_idx = 0;
3695 if (bp->fw_mb) {
3696 /* MCP */
3697 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3698 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3699#ifdef __BIG_ENDIAN
3700 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3701#else
3702 DMAE_CMD_ENDIANITY_DW_SWAP |
3703#endif
3704 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3705
3706 if (bp->link_up)
3707 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
3708
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
3712 sizeof(u32));
3713 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
3714 sizeof(u32));
3715 dmae->dst_addr_lo = bp->fw_mb >> 2;
3716 dmae->dst_addr_hi = 0;
3717 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
3718 sizeof(u32)) >> 2;
3719 if (bp->link_up) {
3720 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3721 dmae->comp_addr_hi = 0;
3722 dmae->comp_val = 1;
3723 } else {
3724 dmae->comp_addr_lo = 0;
3725 dmae->comp_addr_hi = 0;
3726 dmae->comp_val = 0;
3727 }
3728 }
3729
3730 if (!bp->link_up) {
3731 /* no need to collect statistics in link down */
3732 return;
3733 }
3734
3735 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3736 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3737 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3738#ifdef __BIG_ENDIAN
3739 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3740#else
3741 DMAE_CMD_ENDIANITY_DW_SWAP |
3742#endif
3743 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3744
3745 if (bp->phy_flags & PHY_BMAC_FLAG) {
3746
3747 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3748 NIG_REG_INGRESS_BMAC0_MEM);
3749
3750 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3751 BIGMAC_REGISTER_TX_STAT_GTBYT */
3752 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3753 dmae->opcode = opcode;
3754 dmae->src_addr_lo = (mac_addr +
3755 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3756 dmae->src_addr_hi = 0;
3757 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3758 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3759 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3760 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3761 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3762 dmae->comp_addr_hi = 0;
3763 dmae->comp_val = 1;
3764
3765 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3766 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3767 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3768 dmae->opcode = opcode;
3769 dmae->src_addr_lo = (mac_addr +
3770 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3771 dmae->src_addr_hi = 0;
3772 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3773 offsetof(struct bmac_stats, rx_gr64));
3774 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3775 offsetof(struct bmac_stats, rx_gr64));
3776 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3777 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3778 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3779 dmae->comp_addr_hi = 0;
3780 dmae->comp_val = 1;
3781
3782 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
3783
3784 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3785
3786 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3787 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3788 dmae->opcode = opcode;
3789 dmae->src_addr_lo = (mac_addr +
3790 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3791 dmae->src_addr_hi = 0;
3792 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3793 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3794 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3795 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3796 dmae->comp_addr_hi = 0;
3797 dmae->comp_val = 1;
3798
3799 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3800 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3801 dmae->opcode = opcode;
3802 dmae->src_addr_lo = (mac_addr +
3803 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3804 dmae->src_addr_hi = 0;
3805 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3806 offsetof(struct emac_stats,
3807 rx_falsecarriererrors));
3808 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3809 offsetof(struct emac_stats,
3810 rx_falsecarriererrors));
3811 dmae->len = 1;
3812 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3813 dmae->comp_addr_hi = 0;
3814 dmae->comp_val = 1;
3815
3816 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3817 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3818 dmae->opcode = opcode;
3819 dmae->src_addr_lo = (mac_addr +
3820 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3821 dmae->src_addr_hi = 0;
3822 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3823 offsetof(struct emac_stats,
3824 tx_ifhcoutoctets));
3825 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3826 offsetof(struct emac_stats,
3827 tx_ifhcoutoctets));
3828 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3829 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3830 dmae->comp_addr_hi = 0;
3831 dmae->comp_val = 1;
3832 }
3833
3834 /* NIG */
3835 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3836 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3837 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3838 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3839#ifdef __BIG_ENDIAN
3840 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3841#else
3842 DMAE_CMD_ENDIANITY_DW_SWAP |
3843#endif
3844 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3845 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3846 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3847 dmae->src_addr_hi = 0;
3848 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
3849 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
3850 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
3851 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
3852 offsetof(struct nig_stats, done));
3853 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
3854 offsetof(struct nig_stats, done));
3855 dmae->comp_val = 0xffffffff;
3856}
3857
3858static void bnx2x_init_stats(struct bnx2x *bp)
3859{
3860 int port = bp->port;
3861
3862 bp->stats_state = STATS_STATE_DISABLE;
3863 bp->executer_idx = 0;
3864
3865 bp->old_brb_discard = REG_RD(bp,
3866 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3867
3868 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
3869 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3870 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3871
3872 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
3873 REG_WR(bp, BAR_XSTRORM_INTMEM +
3874 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3875
3876 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
3877 REG_WR(bp, BAR_TSTRORM_INTMEM +
3878 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3879
3880 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
3881 REG_WR(bp, BAR_CSTRORM_INTMEM +
3882 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3883
3884 REG_WR(bp, BAR_XSTRORM_INTMEM +
3885 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
3886 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3887 REG_WR(bp, BAR_XSTRORM_INTMEM +
3888 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
3889 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3890
3891 REG_WR(bp, BAR_TSTRORM_INTMEM +
3892 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
3893 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3894 REG_WR(bp, BAR_TSTRORM_INTMEM +
3895 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
3896 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3897}
3898
3899static void bnx2x_stop_stats(struct bnx2x *bp)
3900{
3901 might_sleep();
3902 if (bp->stats_state != STATS_STATE_DISABLE) {
3903 int timeout = 10;
3904
3905 bp->stats_state = STATS_STATE_STOP;
3906 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
3907
3908 while (bp->stats_state != STATS_STATE_DISABLE) {
3909 if (!timeout) {
c14423fe 3910 BNX2X_ERR("timeout waiting for stats stop\n");
a2fbb9ea
ET
3911 break;
3912 }
3913 timeout--;
3914 msleep(100);
3915 }
3916 }
3917 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
3918}
3919
3920/*
3921 * Statistics service functions
3922 */
3923
3924static void bnx2x_update_bmac_stats(struct bnx2x *bp)
3925{
3926 struct regp diff;
3927 struct regp sum;
3928 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
3929 struct bmac_stats *old = &bp->old_bmac;
3930 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
3931
3932 sum.hi = 0;
3933 sum.lo = 0;
3934
3935 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
3936 tx_gtbyt.lo, total_bytes_transmitted_lo);
3937
3938 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
3939 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
3940 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
3941
3942 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
3943 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
3944 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
3945
3946 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
3947 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
3948 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
3949 estats->total_unicast_packets_transmitted_lo, sum.lo);
3950
3951 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
3952 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
3953 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
3954 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
3955 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
3956 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
3957 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
3958 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
3959 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
3960 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
3961 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
3962
3963 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
3964 UPDATE_STAT(rx_grund.lo, runt_packets_received);
3965 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
3966 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
3967 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
3968 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
3969 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
3970 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
3971
3972 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
3973 rx_grerb.lo, stat_IfHCInBadOctets_lo);
3974 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
3975 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
3976 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
3977 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
3978 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
3979}
3980
3981static void bnx2x_update_emac_stats(struct bnx2x *bp)
3982{
3983 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
3984 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
3985
3986 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
3987 total_bytes_transmitted_lo);
3988 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
3989 total_unicast_packets_transmitted_hi,
3990 total_unicast_packets_transmitted_lo);
3991 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
3992 total_multicast_packets_transmitted_hi,
3993 total_multicast_packets_transmitted_lo);
3994 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
3995 total_broadcast_packets_transmitted_hi,
3996 total_broadcast_packets_transmitted_lo);
3997
3998 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
3999 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4000 estats->single_collision_transmit_frames +=
4001 new->tx_dot3statssinglecollisionframes;
4002 estats->multiple_collision_transmit_frames +=
4003 new->tx_dot3statsmultiplecollisionframes;
4004 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4005 estats->excessive_collision_frames +=
4006 new->tx_dot3statsexcessivecollisions;
4007 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4008 estats->frames_transmitted_65_127_bytes +=
4009 new->tx_etherstatspkts65octetsto127octets;
4010 estats->frames_transmitted_128_255_bytes +=
4011 new->tx_etherstatspkts128octetsto255octets;
4012 estats->frames_transmitted_256_511_bytes +=
4013 new->tx_etherstatspkts256octetsto511octets;
4014 estats->frames_transmitted_512_1023_bytes +=
4015 new->tx_etherstatspkts512octetsto1023octets;
4016 estats->frames_transmitted_1024_1522_bytes +=
4017 new->tx_etherstatspkts1024octetsto1522octet;
4018 estats->frames_transmitted_1523_9022_bytes +=
4019 new->tx_etherstatspktsover1522octets;
4020
4021 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4022 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4023 estats->false_carrier_detections += new->rx_falsecarriererrors;
4024 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4025 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4026 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4027 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4028 estats->control_frames_received += new->rx_maccontrolframesreceived;
4029 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4030 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4031
4032 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4033 stat_IfHCInBadOctets_lo);
4034 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4035 stat_IfHCOutBadOctets_lo);
4036 estats->stat_Dot3statsInternalMacTransmitErrors +=
4037 new->tx_dot3statsinternalmactransmiterrors;
4038 estats->stat_Dot3StatsCarrierSenseErrors +=
4039 new->rx_dot3statscarriersenseerrors;
4040 estats->stat_Dot3StatsDeferredTransmissions +=
4041 new->tx_dot3statsdeferredtransmissions;
4042 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4043 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4044}
4045
4046static int bnx2x_update_storm_stats(struct bnx2x *bp)
4047{
4048 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4049 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4050 struct tstorm_per_client_stats *tclient =
4051 &tstats->client_statistics[0];
4052 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4053 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4054 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4055 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4056 u32 diff;
4057
4058 /* are DMAE stats valid? */
4059 if (nstats->done != 0xffffffff) {
4060 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4061 return -1;
4062 }
4063
4064 /* are storm stats valid? */
4065 if (tstats->done.hi != 0xffffffff) {
4066 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4067 return -2;
4068 }
4069 if (xstats->done.hi != 0xffffffff) {
4070 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4071 return -3;
4072 }
4073
4074 estats->total_bytes_received_hi =
4075 estats->valid_bytes_received_hi =
4076 le32_to_cpu(tclient->total_rcv_bytes.hi);
4077 estats->total_bytes_received_lo =
4078 estats->valid_bytes_received_lo =
4079 le32_to_cpu(tclient->total_rcv_bytes.lo);
4080 ADD_64(estats->total_bytes_received_hi,
4081 le32_to_cpu(tclient->rcv_error_bytes.hi),
4082 estats->total_bytes_received_lo,
4083 le32_to_cpu(tclient->rcv_error_bytes.lo));
4084
4085 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4086 total_unicast_packets_received_hi,
4087 total_unicast_packets_received_lo);
4088 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4089 total_multicast_packets_received_hi,
4090 total_multicast_packets_received_lo);
4091 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4092 total_broadcast_packets_received_hi,
4093 total_broadcast_packets_received_lo);
4094
4095 estats->frames_received_64_bytes = MAC_STX_NA;
4096 estats->frames_received_65_127_bytes = MAC_STX_NA;
4097 estats->frames_received_128_255_bytes = MAC_STX_NA;
4098 estats->frames_received_256_511_bytes = MAC_STX_NA;
4099 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4100 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4101 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4102
4103 estats->x_total_sent_bytes_hi =
4104 le32_to_cpu(xstats->total_sent_bytes.hi);
4105 estats->x_total_sent_bytes_lo =
4106 le32_to_cpu(xstats->total_sent_bytes.lo);
4107 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4108
4109 estats->t_rcv_unicast_bytes_hi =
4110 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4111 estats->t_rcv_unicast_bytes_lo =
4112 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4113 estats->t_rcv_broadcast_bytes_hi =
4114 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4115 estats->t_rcv_broadcast_bytes_lo =
4116 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4117 estats->t_rcv_multicast_bytes_hi =
4118 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4119 estats->t_rcv_multicast_bytes_lo =
4120 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4121 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4122
4123 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4124 estats->packets_too_big_discard =
4125 le32_to_cpu(tclient->packets_too_big_discard);
4126 estats->jabber_packets_received = estats->packets_too_big_discard +
4127 estats->stat_Dot3statsFramesTooLong;
4128 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4129 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4130 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4131 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4132 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4133 estats->brb_truncate_discard =
4134 le32_to_cpu(tstats->brb_truncate_discard);
4135
4136 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4137 bp->old_brb_discard = nstats->brb_discard;
4138
4139 estats->brb_packet = nstats->brb_packet;
4140 estats->brb_truncate = nstats->brb_truncate;
4141 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4142 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4143 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4144 estats->mng_discard = nstats->mng_discard;
4145 estats->mng_octet_inp = nstats->mng_octet_inp;
4146 estats->mng_octet_out = nstats->mng_octet_out;
4147 estats->mng_packet_inp = nstats->mng_packet_inp;
4148 estats->mng_packet_out = nstats->mng_packet_out;
4149 estats->pbf_octets = nstats->pbf_octets;
4150 estats->pbf_packet = nstats->pbf_packet;
4151 estats->safc_inp = nstats->safc_inp;
4152
4153 xstats->done.hi = 0;
4154 tstats->done.hi = 0;
4155 nstats->done = 0;
4156
4157 return 0;
4158}
4159
4160static void bnx2x_update_net_stats(struct bnx2x *bp)
4161{
4162 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4163 struct net_device_stats *nstats = &bp->dev->stats;
4164
4165 nstats->rx_packets =
4166 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4167 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4168 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4169
4170 nstats->tx_packets =
4171 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4172 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4173 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4174
4175 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4176
4177 nstats->tx_bytes =
4178 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4179
4180 nstats->rx_dropped = estats->checksum_discard +
4181 estats->mac_discard;
4182 nstats->tx_dropped = 0;
4183
4184 nstats->multicast =
4185 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4186
4187 nstats->collisions =
4188 estats->single_collision_transmit_frames +
4189 estats->multiple_collision_transmit_frames +
4190 estats->late_collision_frames +
4191 estats->excessive_collision_frames;
4192
4193 nstats->rx_length_errors = estats->runt_packets_received +
4194 estats->jabber_packets_received;
4195 nstats->rx_over_errors = estats->no_buff_discard;
4196 nstats->rx_crc_errors = estats->crc_receive_errors;
4197 nstats->rx_frame_errors = estats->alignment_errors;
4198 nstats->rx_fifo_errors = estats->brb_discard +
4199 estats->brb_truncate_discard;
4200 nstats->rx_missed_errors = estats->xxoverflow_discard;
4201
4202 nstats->rx_errors = nstats->rx_length_errors +
4203 nstats->rx_over_errors +
4204 nstats->rx_crc_errors +
4205 nstats->rx_frame_errors +
4206 nstats->rx_fifo_errors;
4207
4208 nstats->tx_aborted_errors = estats->late_collision_frames +
4209 estats->excessive_collision_frames;
4210 nstats->tx_carrier_errors = estats->false_carrier_detections;
4211 nstats->tx_fifo_errors = 0;
4212 nstats->tx_heartbeat_errors = 0;
4213 nstats->tx_window_errors = 0;
4214
4215 nstats->tx_errors = nstats->tx_aborted_errors +
4216 nstats->tx_carrier_errors;
4217
4218 estats->mac_stx_start = ++estats->mac_stx_end;
4219}
4220
4221static void bnx2x_update_stats(struct bnx2x *bp)
4222{
4223 int i;
4224
4225 if (!bnx2x_update_storm_stats(bp)) {
4226
4227 if (bp->phy_flags & PHY_BMAC_FLAG) {
4228 bnx2x_update_bmac_stats(bp);
4229
4230 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4231 bnx2x_update_emac_stats(bp);
4232
4233 } else { /* unreached */
4234 BNX2X_ERR("no MAC active\n");
4235 return;
4236 }
4237
4238 bnx2x_update_net_stats(bp);
4239 }
4240
4241 if (bp->msglevel & NETIF_MSG_TIMER) {
4242 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4243 struct net_device_stats *nstats = &bp->dev->stats;
4244
4245 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4246 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4247 " tx pkt (%lx)\n",
4248 bnx2x_tx_avail(bp->fp),
4249 *bp->fp->tx_cons_sb, nstats->tx_packets);
4250 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4251 " rx pkt (%lx)\n",
4252 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
4253 *bp->fp->rx_cons_sb, nstats->rx_packets);
4254 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
4255 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
4256 estats->driver_xoff, estats->brb_discard);
4257 printk(KERN_DEBUG "tstats: checksum_discard %u "
4258 "packets_too_big_discard %u no_buff_discard %u "
4259 "mac_discard %u mac_filter_discard %u "
4260 "xxovrflow_discard %u brb_truncate_discard %u "
4261 "ttl0_discard %u\n",
4262 estats->checksum_discard,
4263 estats->packets_too_big_discard,
4264 estats->no_buff_discard, estats->mac_discard,
4265 estats->mac_filter_discard, estats->xxoverflow_discard,
4266 estats->brb_truncate_discard, estats->ttl0_discard);
4267
4268 for_each_queue(bp, i) {
4269 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4270 bnx2x_fp(bp, i, tx_pkt),
4271 bnx2x_fp(bp, i, rx_pkt),
4272 bnx2x_fp(bp, i, rx_calls));
4273 }
4274 }
4275
4276 if (bp->state != BNX2X_STATE_OPEN) {
4277 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
4278 return;
4279 }
4280
4281#ifdef BNX2X_STOP_ON_ERROR
4282 if (unlikely(bp->panic))
4283 return;
4284#endif
4285
4286 /* loader */
4287 if (bp->executer_idx) {
4288 struct dmae_command *dmae = &bp->dmae;
4289 int port = bp->port;
4290 int loader_idx = port * 8;
4291
4292 memset(dmae, 0, sizeof(struct dmae_command));
4293
4294 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4295 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4296 DMAE_CMD_DST_RESET |
4297#ifdef __BIG_ENDIAN
4298 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4299#else
4300 DMAE_CMD_ENDIANITY_DW_SWAP |
4301#endif
4302 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4303 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
4304 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
4305 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
4306 sizeof(struct dmae_command) *
4307 (loader_idx + 1)) >> 2;
4308 dmae->dst_addr_hi = 0;
4309 dmae->len = sizeof(struct dmae_command) >> 2;
4310 dmae->len--; /* !!! for A0/1 only */
4311 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
4312 dmae->comp_addr_hi = 0;
4313 dmae->comp_val = 1;
4314
4315 bnx2x_post_dmae(bp, dmae, loader_idx);
4316 }
4317
4318 if (bp->stats_state != STATS_STATE_ENABLE) {
4319 bp->stats_state = STATS_STATE_DISABLE;
4320 return;
4321 }
4322
4323 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
4324 /* stats ramrod has it's own slot on the spe */
4325 bp->spq_left++;
4326 bp->stat_pending = 1;
4327 }
4328}
4329
4330static void bnx2x_timer(unsigned long data)
4331{
4332 struct bnx2x *bp = (struct bnx2x *) data;
4333
4334 if (!netif_running(bp->dev))
4335 return;
4336
4337 if (atomic_read(&bp->intr_sem) != 0)
4338 goto bnx2x_restart_timer;
4339
4340 if (poll) {
4341 struct bnx2x_fastpath *fp = &bp->fp[0];
4342 int rc;
4343
4344 bnx2x_tx_int(fp, 1000);
4345 rc = bnx2x_rx_int(fp, 1000);
4346 }
4347
4348 if (!nomcp && (bp->bc_ver >= 0x040003)) {
4349 int port = bp->port;
4350 u32 drv_pulse;
4351 u32 mcp_pulse;
4352
4353 ++bp->fw_drv_pulse_wr_seq;
4354 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4355 /* TBD - add SYSTEM_TIME */
4356 drv_pulse = bp->fw_drv_pulse_wr_seq;
4357 SHMEM_WR(bp, drv_fw_mb[port].drv_pulse_mb, drv_pulse);
4358
4359 mcp_pulse = (SHMEM_RD(bp, drv_fw_mb[port].mcp_pulse_mb) &
4360 MCP_PULSE_SEQ_MASK);
4361 /* The delta between driver pulse and mcp response
4362 * should be 1 (before mcp response) or 0 (after mcp response)
4363 */
4364 if ((drv_pulse != mcp_pulse) &&
4365 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4366 /* someone lost a heartbeat... */
4367 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4368 drv_pulse, mcp_pulse);
4369 }
4370 }
4371
4372 if (bp->stats_state == STATS_STATE_DISABLE)
4373 goto bnx2x_restart_timer;
4374
4375 bnx2x_update_stats(bp);
4376
4377bnx2x_restart_timer:
4378 mod_timer(&bp->timer, jiffies + bp->current_interval);
4379}
4380
4381/* end of Statistics */
4382
4383/* nic init */
4384
4385/*
4386 * nic init service functions
4387 */
4388
4389static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4390 dma_addr_t mapping, int id)
4391{
4392 int port = bp->port;
4393 u64 section;
4394 int index;
4395
4396 /* USTORM */
4397 section = ((u64)mapping) + offsetof(struct host_status_block,
4398 u_status_block);
4399 sb->u_status_block.status_block_id = id;
4400
4401 REG_WR(bp, BAR_USTRORM_INTMEM +
4402 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
4403 REG_WR(bp, BAR_USTRORM_INTMEM +
4404 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
4405 U64_HI(section));
4406
4407 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4408 REG_WR16(bp, BAR_USTRORM_INTMEM +
4409 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
4410
4411 /* CSTORM */
4412 section = ((u64)mapping) + offsetof(struct host_status_block,
4413 c_status_block);
4414 sb->c_status_block.status_block_id = id;
4415
4416 REG_WR(bp, BAR_CSTRORM_INTMEM +
4417 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
4418 REG_WR(bp, BAR_CSTRORM_INTMEM +
4419 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
4420 U64_HI(section));
4421
4422 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4423 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4424 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
4425
4426 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4427}
4428
4429static void bnx2x_init_def_sb(struct bnx2x *bp,
4430 struct host_def_status_block *def_sb,
4431 dma_addr_t mapping, int id)
4432{
4433 int port = bp->port;
4434 int index, val, reg_offset;
4435 u64 section;
4436
4437 /* ATTN */
4438 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4439 atten_status_block);
4440 def_sb->atten_status_block.status_block_id = id;
4441
4442 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4443 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4444
4445 for (index = 0; index < 3; index++) {
4446 bp->attn_group[index].sig[0] = REG_RD(bp,
4447 reg_offset + 0x10*index);
4448 bp->attn_group[index].sig[1] = REG_RD(bp,
4449 reg_offset + 0x4 + 0x10*index);
4450 bp->attn_group[index].sig[2] = REG_RD(bp,
4451 reg_offset + 0x8 + 0x10*index);
4452 bp->attn_group[index].sig[3] = REG_RD(bp,
4453 reg_offset + 0xc + 0x10*index);
4454 }
4455
4456 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4457 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4458
4459 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4460 HC_REG_ATTN_MSG0_ADDR_L);
4461
4462 REG_WR(bp, reg_offset, U64_LO(section));
4463 REG_WR(bp, reg_offset + 4, U64_HI(section));
4464
4465 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4466
4467 val = REG_RD(bp, reg_offset);
4468 val |= id;
4469 REG_WR(bp, reg_offset, val);
4470
4471 /* USTORM */
4472 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4473 u_def_status_block);
4474 def_sb->u_def_status_block.status_block_id = id;
4475
4476 REG_WR(bp, BAR_USTRORM_INTMEM +
4477 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4478 REG_WR(bp, BAR_USTRORM_INTMEM +
4479 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4480 U64_HI(section));
4481 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
4482 BNX2X_BTR);
4483
4484 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4485 REG_WR16(bp, BAR_USTRORM_INTMEM +
4486 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4487
4488 /* CSTORM */
4489 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4490 c_def_status_block);
4491 def_sb->c_def_status_block.status_block_id = id;
4492
4493 REG_WR(bp, BAR_CSTRORM_INTMEM +
4494 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4495 REG_WR(bp, BAR_CSTRORM_INTMEM +
4496 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4497 U64_HI(section));
4498 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
4499 BNX2X_BTR);
4500
4501 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4502 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4503 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4504
4505 /* TSTORM */
4506 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4507 t_def_status_block);
4508 def_sb->t_def_status_block.status_block_id = id;
4509
4510 REG_WR(bp, BAR_TSTRORM_INTMEM +
4511 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4512 REG_WR(bp, BAR_TSTRORM_INTMEM +
4513 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4514 U64_HI(section));
4515 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
4516 BNX2X_BTR);
4517
4518 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4519 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4520 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4521
4522 /* XSTORM */
4523 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4524 x_def_status_block);
4525 def_sb->x_def_status_block.status_block_id = id;
4526
4527 REG_WR(bp, BAR_XSTRORM_INTMEM +
4528 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4529 REG_WR(bp, BAR_XSTRORM_INTMEM +
4530 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4531 U64_HI(section));
4532 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
4533 BNX2X_BTR);
4534
4535 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4536 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4537 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4538
4539 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4540}
4541
4542static void bnx2x_update_coalesce(struct bnx2x *bp)
4543{
4544 int port = bp->port;
4545 int i;
4546
4547 for_each_queue(bp, i) {
4548
4549 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4550 REG_WR8(bp, BAR_USTRORM_INTMEM +
4551 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
4552 HC_INDEX_U_ETH_RX_CQ_CONS),
4553 bp->rx_ticks_int/12);
4554 REG_WR16(bp, BAR_USTRORM_INTMEM +
4555 USTORM_SB_HC_DISABLE_OFFSET(port, i,
4556 HC_INDEX_U_ETH_RX_CQ_CONS),
4557 bp->rx_ticks_int ? 0 : 1);
4558
4559 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4560 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4561 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
4562 HC_INDEX_C_ETH_TX_CQ_CONS),
4563 bp->tx_ticks_int/12);
4564 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4565 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
4566 HC_INDEX_C_ETH_TX_CQ_CONS),
4567 bp->tx_ticks_int ? 0 : 1);
4568 }
4569}
4570
4571static void bnx2x_init_rx_rings(struct bnx2x *bp)
4572{
4573 u16 ring_prod;
4574 int i, j;
4575 int port = bp->port;
4576
4577 bp->rx_buf_use_size = bp->dev->mtu;
4578
4579 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4580 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4581
4582 for_each_queue(bp, j) {
4583 struct bnx2x_fastpath *fp = &bp->fp[j];
4584
4585 fp->rx_bd_cons = 0;
4586 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4587
4588 for (i = 1; i <= NUM_RX_RINGS; i++) {
4589 struct eth_rx_bd *rx_bd;
4590
4591 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4592 rx_bd->addr_hi =
4593 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4594 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4595 rx_bd->addr_lo =
4596 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4597 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4598
4599 }
4600
4601 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4602 struct eth_rx_cqe_next_page *nextpg;
4603
4604 nextpg = (struct eth_rx_cqe_next_page *)
4605 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4606 nextpg->addr_hi =
4607 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4608 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4609 nextpg->addr_lo =
4610 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4611 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4612 }
4613
4614 /* rx completion queue */
4615 fp->rx_comp_cons = ring_prod = 0;
4616
4617 for (i = 0; i < bp->rx_ring_size; i++) {
4618 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4619 BNX2X_ERR("was only able to allocate "
4620 "%d rx skbs\n", i);
4621 break;
4622 }
4623 ring_prod = NEXT_RX_IDX(ring_prod);
4624 BUG_TRAP(ring_prod > i);
4625 }
4626
4627 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
4628 fp->rx_pkt = fp->rx_calls = 0;
4629
c14423fe 4630 /* Warning! this will generate an interrupt (to the TSTORM) */
a2fbb9ea
ET
4631 /* must only be done when chip is initialized */
4632 REG_WR(bp, BAR_TSTRORM_INTMEM +
4633 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
4634 if (j != 0)
4635 continue;
4636
4637 REG_WR(bp, BAR_USTRORM_INTMEM +
4638 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
4639 U64_LO(fp->rx_comp_mapping));
4640 REG_WR(bp, BAR_USTRORM_INTMEM +
4641 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
4642 U64_HI(fp->rx_comp_mapping));
4643 }
4644}
4645
4646static void bnx2x_init_tx_ring(struct bnx2x *bp)
4647{
4648 int i, j;
4649
4650 for_each_queue(bp, j) {
4651 struct bnx2x_fastpath *fp = &bp->fp[j];
4652
4653 for (i = 1; i <= NUM_TX_RINGS; i++) {
4654 struct eth_tx_bd *tx_bd =
4655 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4656
4657 tx_bd->addr_hi =
4658 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4659 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4660 tx_bd->addr_lo =
4661 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4662 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4663 }
4664
4665 fp->tx_pkt_prod = 0;
4666 fp->tx_pkt_cons = 0;
4667 fp->tx_bd_prod = 0;
4668 fp->tx_bd_cons = 0;
4669 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4670 fp->tx_pkt = 0;
4671 }
4672}
4673
4674static void bnx2x_init_sp_ring(struct bnx2x *bp)
4675{
4676 int port = bp->port;
4677
4678 spin_lock_init(&bp->spq_lock);
4679
4680 bp->spq_left = MAX_SPQ_PENDING;
4681 bp->spq_prod_idx = 0;
4682 bp->dsb_sp_prod_idx = 0;
4683 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4684 bp->spq_prod_bd = bp->spq;
4685 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4686
4687 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
4688 U64_LO(bp->spq_mapping));
4689 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
4690 U64_HI(bp->spq_mapping));
4691
4692 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
4693 bp->spq_prod_idx);
4694}
4695
4696static void bnx2x_init_context(struct bnx2x *bp)
4697{
4698 int i;
4699
4700 for_each_queue(bp, i) {
4701 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4702 struct bnx2x_fastpath *fp = &bp->fp[i];
4703
4704 context->xstorm_st_context.tx_bd_page_base_hi =
4705 U64_HI(fp->tx_desc_mapping);
4706 context->xstorm_st_context.tx_bd_page_base_lo =
4707 U64_LO(fp->tx_desc_mapping);
4708 context->xstorm_st_context.db_data_addr_hi =
4709 U64_HI(fp->tx_prods_mapping);
4710 context->xstorm_st_context.db_data_addr_lo =
4711 U64_LO(fp->tx_prods_mapping);
4712
4713 context->ustorm_st_context.rx_bd_page_base_hi =
4714 U64_HI(fp->rx_desc_mapping);
4715 context->ustorm_st_context.rx_bd_page_base_lo =
4716 U64_LO(fp->rx_desc_mapping);
4717 context->ustorm_st_context.status_block_id = i;
4718 context->ustorm_st_context.sb_index_number =
4719 HC_INDEX_U_ETH_RX_CQ_CONS;
4720 context->ustorm_st_context.rcq_base_address_hi =
4721 U64_HI(fp->rx_comp_mapping);
4722 context->ustorm_st_context.rcq_base_address_lo =
4723 U64_LO(fp->rx_comp_mapping);
4724 context->ustorm_st_context.flags =
4725 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
4726 context->ustorm_st_context.mc_alignment_size = 64;
4727 context->ustorm_st_context.num_rss = bp->num_queues;
4728
4729 context->cstorm_st_context.sb_index_number =
4730 HC_INDEX_C_ETH_TX_CQ_CONS;
4731 context->cstorm_st_context.status_block_id = i;
4732
4733 context->xstorm_ag_context.cdu_reserved =
4734 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4735 CDU_REGION_NUMBER_XCM_AG,
4736 ETH_CONNECTION_TYPE);
4737 context->ustorm_ag_context.cdu_usage =
4738 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4739 CDU_REGION_NUMBER_UCM_AG,
4740 ETH_CONNECTION_TYPE);
4741 }
4742}
4743
4744static void bnx2x_init_ind_table(struct bnx2x *bp)
4745{
4746 int port = bp->port;
4747 int i;
4748
4749 if (!is_multi(bp))
4750 return;
4751
4752 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4753 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4754 i % bp->num_queues);
4755
4756 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4757}
4758
4759static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4760{
4761 int mode = bp->rx_mode;
4762 int port = bp->port;
4763 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4764 int i;
4765
4766 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4767
4768 switch (mode) {
4769 case BNX2X_RX_MODE_NONE: /* no Rx */
4770 tstorm_mac_filter.ucast_drop_all = 1;
4771 tstorm_mac_filter.mcast_drop_all = 1;
4772 tstorm_mac_filter.bcast_drop_all = 1;
4773 break;
4774 case BNX2X_RX_MODE_NORMAL:
4775 tstorm_mac_filter.bcast_accept_all = 1;
4776 break;
4777 case BNX2X_RX_MODE_ALLMULTI:
4778 tstorm_mac_filter.mcast_accept_all = 1;
4779 tstorm_mac_filter.bcast_accept_all = 1;
4780 break;
4781 case BNX2X_RX_MODE_PROMISC:
4782 tstorm_mac_filter.ucast_accept_all = 1;
4783 tstorm_mac_filter.mcast_accept_all = 1;
4784 tstorm_mac_filter.bcast_accept_all = 1;
4785 break;
4786 default:
4787 BNX2X_ERR("bad rx mode (%d)\n", mode);
4788 }
4789
4790 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4791 REG_WR(bp, BAR_TSTRORM_INTMEM +
4792 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
4793 ((u32 *)&tstorm_mac_filter)[i]);
4794
4795/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4796 ((u32 *)&tstorm_mac_filter)[i]); */
4797 }
4798}
4799
4800static void bnx2x_set_client_config(struct bnx2x *bp, int client_id)
4801{
4802#ifdef BCM_VLAN
4803 int mode = bp->rx_mode;
4804#endif
4805 int port = bp->port;
4806 struct tstorm_eth_client_config tstorm_client = {0};
4807
4808 tstorm_client.mtu = bp->dev->mtu;
4809 tstorm_client.statistics_counter_id = 0;
4810 tstorm_client.config_flags =
4811 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4812#ifdef BCM_VLAN
4813 if (mode && bp->vlgrp) {
4814 tstorm_client.config_flags |=
4815 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4816 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4817 }
4818#endif
4819 tstorm_client.drop_flags = (TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR |
4820 TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR |
4821 TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR |
4822 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR);
4823
4824 REG_WR(bp, BAR_TSTRORM_INTMEM +
4825 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id),
4826 ((u32 *)&tstorm_client)[0]);
4827 REG_WR(bp, BAR_TSTRORM_INTMEM +
4828 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) + 4,
4829 ((u32 *)&tstorm_client)[1]);
4830
4831/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
4832 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
4833}
4834
4835static void bnx2x_init_internal(struct bnx2x *bp)
4836{
4837 int port = bp->port;
4838 struct tstorm_eth_function_common_config tstorm_config = {0};
4839 struct stats_indication_flags stats_flags = {0};
4840 int i;
4841
4842 if (is_multi(bp)) {
4843 tstorm_config.config_flags = MULTI_FLAGS;
4844 tstorm_config.rss_result_mask = MULTI_MASK;
4845 }
4846
4847 REG_WR(bp, BAR_TSTRORM_INTMEM +
4848 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
4849 (*(u32 *)&tstorm_config));
4850
4851/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4852 (*(u32 *)&tstorm_config)); */
4853
c14423fe 4854 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4855 bnx2x_set_storm_rx_mode(bp);
4856
4857 for_each_queue(bp, i)
4858 bnx2x_set_client_config(bp, i);
4859
4860
4861 stats_flags.collect_eth = cpu_to_le32(1);
4862
4863 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
4864 ((u32 *)&stats_flags)[0]);
4865 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
4866 ((u32 *)&stats_flags)[1]);
4867
4868 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
4869 ((u32 *)&stats_flags)[0]);
4870 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
4871 ((u32 *)&stats_flags)[1]);
4872
4873 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
4874 ((u32 *)&stats_flags)[0]);
4875 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
4876 ((u32 *)&stats_flags)[1]);
4877
4878/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
4879 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
4880}
4881
4882static void bnx2x_nic_init(struct bnx2x *bp)
4883{
4884 int i;
4885
4886 for_each_queue(bp, i) {
4887 struct bnx2x_fastpath *fp = &bp->fp[i];
4888
4889 fp->state = BNX2X_FP_STATE_CLOSED;
4890 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
4891 bp, fp->status_blk, i);
4892 fp->index = i;
4893 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
4894 }
4895
4896 bnx2x_init_def_sb(bp, bp->def_status_blk,
4897 bp->def_status_blk_mapping, 0x10);
4898 bnx2x_update_coalesce(bp);
4899 bnx2x_init_rx_rings(bp);
4900 bnx2x_init_tx_ring(bp);
4901 bnx2x_init_sp_ring(bp);
4902 bnx2x_init_context(bp);
4903 bnx2x_init_internal(bp);
4904 bnx2x_init_stats(bp);
4905 bnx2x_init_ind_table(bp);
4906 bnx2x_enable_int(bp);
4907
4908}
4909
4910/* end of nic init */
4911
4912/*
4913 * gzip service functions
4914 */
4915
4916static int bnx2x_gunzip_init(struct bnx2x *bp)
4917{
4918 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4919 &bp->gunzip_mapping);
4920 if (bp->gunzip_buf == NULL)
4921 goto gunzip_nomem1;
4922
4923 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4924 if (bp->strm == NULL)
4925 goto gunzip_nomem2;
4926
4927 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4928 GFP_KERNEL);
4929 if (bp->strm->workspace == NULL)
4930 goto gunzip_nomem3;
4931
4932 return 0;
4933
4934gunzip_nomem3:
4935 kfree(bp->strm);
4936 bp->strm = NULL;
4937
4938gunzip_nomem2:
4939 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4940 bp->gunzip_mapping);
4941 bp->gunzip_buf = NULL;
4942
4943gunzip_nomem1:
4944 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4945 " uncompression\n", bp->dev->name);
4946 return -ENOMEM;
4947}
4948
4949static void bnx2x_gunzip_end(struct bnx2x *bp)
4950{
4951 kfree(bp->strm->workspace);
4952
4953 kfree(bp->strm);
4954 bp->strm = NULL;
4955
4956 if (bp->gunzip_buf) {
4957 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4958 bp->gunzip_mapping);
4959 bp->gunzip_buf = NULL;
4960 }
4961}
4962
4963static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4964{
4965 int n, rc;
4966
4967 /* check gzip header */
4968 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4969 return -EINVAL;
4970
4971 n = 10;
4972
4973#define FNAME 0x8
4974
4975 if (zbuf[3] & FNAME)
4976 while ((zbuf[n++] != 0) && (n < len));
4977
4978 bp->strm->next_in = zbuf + n;
4979 bp->strm->avail_in = len - n;
4980 bp->strm->next_out = bp->gunzip_buf;
4981 bp->strm->avail_out = FW_BUF_SIZE;
4982
4983 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4984 if (rc != Z_OK)
4985 return rc;
4986
4987 rc = zlib_inflate(bp->strm, Z_FINISH);
4988 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4989 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4990 bp->dev->name, bp->strm->msg);
4991
4992 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4993 if (bp->gunzip_outlen & 0x3)
4994 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4995 " gunzip_outlen (%d) not aligned\n",
4996 bp->dev->name, bp->gunzip_outlen);
4997 bp->gunzip_outlen >>= 2;
4998
4999 zlib_inflateEnd(bp->strm);
5000
5001 if (rc == Z_STREAM_END)
5002 return 0;
5003
5004 return rc;
5005}
5006
5007/* nic load/unload */
5008
5009/*
5010 * general service functions
5011 */
5012
5013/* send a NIG loopback debug packet */
5014static void bnx2x_lb_pckt(struct bnx2x *bp)
5015{
5016#ifdef USE_DMAE
5017 u32 wb_write[3];
5018#endif
5019
5020 /* Ethernet source and destination addresses */
5021#ifdef USE_DMAE
5022 wb_write[0] = 0x55555555;
5023 wb_write[1] = 0x55555555;
5024 wb_write[2] = 0x20; /* SOP */
5025 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5026#else
5027 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5028 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5029 /* SOP */
5030 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5031#endif
5032
5033 /* NON-IP protocol */
5034#ifdef USE_DMAE
5035 wb_write[0] = 0x09000000;
5036 wb_write[1] = 0x55555555;
5037 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5038 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5039#else
5040 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5041 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5042 /* EOP, eop_bvalid = 0 */
5043 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5044#endif
5045}
5046
5047/* some of the internal memories
5048 * are not directly readable from the driver
5049 * to test them we send debug packets
5050 */
5051static int bnx2x_int_mem_test(struct bnx2x *bp)
5052{
5053 int factor;
5054 int count, i;
5055 u32 val = 0;
5056
5057 switch (CHIP_REV(bp)) {
5058 case CHIP_REV_EMUL:
5059 factor = 200;
5060 break;
5061 case CHIP_REV_FPGA:
5062 factor = 120;
5063 break;
5064 default:
5065 factor = 1;
5066 break;
5067 }
5068
5069 DP(NETIF_MSG_HW, "start part1\n");
5070
5071 /* Disable inputs of parser neighbor blocks */
5072 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5073 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5074 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5075 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5076
5077 /* Write 0 to parser credits for CFC search request */
5078 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5079
5080 /* send Ethernet packet */
5081 bnx2x_lb_pckt(bp);
5082
5083 /* TODO do i reset NIG statistic? */
5084 /* Wait until NIG register shows 1 packet of size 0x10 */
5085 count = 1000 * factor;
5086 while (count) {
5087#ifdef BNX2X_DMAE_RD
5088 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5089 val = *bnx2x_sp(bp, wb_data[0]);
5090#else
5091 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5092 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5093#endif
5094 if (val == 0x10)
5095 break;
5096
5097 msleep(10);
5098 count--;
5099 }
5100 if (val != 0x10) {
5101 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5102 return -1;
5103 }
5104
5105 /* Wait until PRS register shows 1 packet */
5106 count = 1000 * factor;
5107 while (count) {
5108 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5109
5110 if (val == 1)
5111 break;
5112
5113 msleep(10);
5114 count--;
5115 }
5116 if (val != 0x1) {
5117 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5118 return -2;
5119 }
5120
5121 /* Reset and init BRB, PRS */
5122 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
5123 msleep(50);
5124 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
5125 msleep(50);
5126 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5127 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5128
5129 DP(NETIF_MSG_HW, "part2\n");
5130
5131 /* Disable inputs of parser neighbor blocks */
5132 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5133 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5134 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5135 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5136
5137 /* Write 0 to parser credits for CFC search request */
5138 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5139
5140 /* send 10 Ethernet packets */
5141 for (i = 0; i < 10; i++)
5142 bnx2x_lb_pckt(bp);
5143
5144 /* Wait until NIG register shows 10 + 1
5145 packets of size 11*0x10 = 0xb0 */
5146 count = 1000 * factor;
5147 while (count) {
5148#ifdef BNX2X_DMAE_RD
5149 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5150 val = *bnx2x_sp(bp, wb_data[0]);
5151#else
5152 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5153 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5154#endif
5155 if (val == 0xb0)
5156 break;
5157
5158 msleep(10);
5159 count--;
5160 }
5161 if (val != 0xb0) {
5162 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5163 return -3;
5164 }
5165
5166 /* Wait until PRS register shows 2 packets */
5167 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5168 if (val != 2)
5169 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5170
5171 /* Write 1 to parser credits for CFC search request */
5172 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5173
5174 /* Wait until PRS register shows 3 packets */
5175 msleep(10 * factor);
5176 /* Wait until NIG register shows 1 packet of size 0x10 */
5177 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5178 if (val != 3)
5179 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5180
5181 /* clear NIG EOP FIFO */
5182 for (i = 0; i < 11; i++)
5183 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5184 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5185 if (val != 1) {
5186 BNX2X_ERR("clear of NIG failed\n");
5187 return -4;
5188 }
5189
5190 /* Reset and init BRB, PRS, NIG */
5191 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5192 msleep(50);
5193 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5194 msleep(50);
5195 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5196 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5197#ifndef BCM_ISCSI
5198 /* set NIC mode */
5199 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5200#endif
5201
5202 /* Enable inputs of parser neighbor blocks */
5203 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5204 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5205 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5206 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5207
5208 DP(NETIF_MSG_HW, "done\n");
5209
5210 return 0; /* OK */
5211}
5212
5213static void enable_blocks_attention(struct bnx2x *bp)
5214{
5215 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5216 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5217 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5218 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5219 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5220 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5221 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5222 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5223 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5224/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5225/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5226 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5227 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5228 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5229/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5230/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5231 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5232 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5233 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5234 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5235/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5236/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5237 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
5238 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5239 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5240 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5241/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5242/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5243 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5244 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5245/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5246 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5247}
5248
5249static int bnx2x_function_init(struct bnx2x *bp, int mode)
5250{
5251 int func = bp->port;
5252 int port = func ? PORT1 : PORT0;
5253 u32 val, i;
5254#ifdef USE_DMAE
5255 u32 wb_write[2];
5256#endif
5257
5258 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
5259 if ((func != 0) && (func != 1)) {
5260 BNX2X_ERR("BAD function number (%d)\n", func);
5261 return -ENODEV;
5262 }
5263
5264 bnx2x_gunzip_init(bp);
5265
5266 if (mode & 0x1) { /* init common */
5267 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
5268 func, mode);
5269 REG_WR(bp, MISC_REG_RESET_REG_1, 0xffffffff);
5270 REG_WR(bp, MISC_REG_RESET_REG_2, 0xfffc);
5271 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5272
5273 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5274 msleep(30);
5275 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5276
5277 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5278 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5279
5280 bnx2x_init_pxp(bp);
5281
5282 if (CHIP_REV(bp) == CHIP_REV_Ax) {
5283 /* enable HW interrupt from PXP on USDM
5284 overflow bit 16 on INT_MASK_0 */
5285 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5286 }
5287
5288#ifdef __BIG_ENDIAN
5289 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5290 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5291 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5292 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5293 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5294 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5295
5296/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5297 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5298 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5299 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5300 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5301#endif
5302
5303#ifndef BCM_ISCSI
5304 /* set NIC mode */
5305 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5306#endif
5307
5308 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
5309#ifdef BCM_ISCSI
5310 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5311 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5312 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5313#endif
5314
5315 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5316
5317 /* let the HW do it's magic ... */
5318 msleep(100);
5319 /* finish PXP init
5320 (can be moved up if we want to use the DMAE) */
5321 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5322 if (val != 1) {
5323 BNX2X_ERR("PXP2 CFG failed\n");
5324 return -EBUSY;
5325 }
5326
5327 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5328 if (val != 1) {
5329 BNX2X_ERR("PXP2 RD_INIT failed\n");
5330 return -EBUSY;
5331 }
5332
5333 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5334 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5335
5336 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5337
5338 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5339 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5340 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5341 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5342
5343#ifdef BNX2X_DMAE_RD
5344 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5345 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5346 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5347 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5348#else
5349 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
5350 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
5351 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
5352 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
5353 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
5354 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
5355 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
5356 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
5357 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
5358 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
5359 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
5360 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
5361#endif
5362 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
c14423fe 5363 /* soft reset pulse */
a2fbb9ea
ET
5364 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5365 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5366
5367#ifdef BCM_ISCSI
5368 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5369#endif
5370 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5371 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
5372 if (CHIP_REV(bp) == CHIP_REV_Ax) {
5373 /* enable hw interrupt from doorbell Q */
5374 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5375 }
5376
5377 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5378
5379 if (CHIP_REV_IS_SLOW(bp)) {
5380 /* fix for emulation and FPGA for no pause */
5381 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5382 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5383 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5384 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5385 }
5386
5387 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5388
5389 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5390 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5391 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5392 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5393
5394 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5395 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5396 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5397 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5398
5399 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5400 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5401 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5402 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5403
5404 /* sync semi rtc */
5405 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5406 0x80000000);
5407 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5408 0x80000000);
5409
5410 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5411 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5412 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5413
5414 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5415 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5416 REG_WR(bp, i, 0xc0cac01a);
c14423fe 5417 /* TODO: replace with something meaningful */
a2fbb9ea
ET
5418 }
5419 /* SRCH COMMON comes here */
5420 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5421
5422 if (sizeof(union cdu_context) != 1024) {
5423 /* we currently assume that a context is 1024 bytes */
5424 printk(KERN_ALERT PFX "please adjust the size of"
5425 " cdu_context(%ld)\n",
5426 (long)sizeof(union cdu_context));
5427 }
5428 val = (4 << 24) + (0 << 12) + 1024;
5429 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5430 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5431
5432 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5433 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5434
5435 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5436 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
5437 MISC_AEU_COMMON_END);
5438 /* RXPCS COMMON comes here */
5439 /* EMAC0 COMMON comes here */
5440 /* EMAC1 COMMON comes here */
5441 /* DBU COMMON comes here */
5442 /* DBG COMMON comes here */
5443 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5444
5445 if (CHIP_REV_IS_SLOW(bp))
5446 msleep(200);
5447
5448 /* finish CFC init */
5449 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
5450 if (val != 1) {
5451 BNX2X_ERR("CFC LL_INIT failed\n");
5452 return -EBUSY;
5453 }
5454
5455 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
5456 if (val != 1) {
5457 BNX2X_ERR("CFC AC_INIT failed\n");
5458 return -EBUSY;
5459 }
5460
5461 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
5462 if (val != 1) {
5463 BNX2X_ERR("CFC CAM_INIT failed\n");
5464 return -EBUSY;
5465 }
5466
5467 REG_WR(bp, CFC_REG_DEBUG0, 0);
5468
5469 /* read NIG statistic
5470 to see if this is our first up since powerup */
5471#ifdef BNX2X_DMAE_RD
5472 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5473 val = *bnx2x_sp(bp, wb_data[0]);
5474#else
5475 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5476 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5477#endif
5478 /* do internal memory self test */
5479 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5480 BNX2X_ERR("internal mem selftest failed\n");
5481 return -EBUSY;
5482 }
5483
5484 /* clear PXP2 attentions */
5485 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
5486
5487 enable_blocks_attention(bp);
5488 /* enable_blocks_parity(bp); */
5489
5490 } /* end of common init */
5491
5492 /* per port init */
5493
5494 /* the phys address is shifted right 12 bits and has an added
5495 1=valid bit added to the 53rd bit
5496 then since this is a wide register(TM)
5497 we split it into two 32 bit writes
5498 */
5499#define RQ_ONCHIP_AT_PORT_SIZE 384
5500#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5501#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5502#define PXP_ONE_ILT(x) ((x << 10) | x)
5503
5504 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
5505
5506 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
5507
5508 /* Port PXP comes here */
5509 /* Port PXP2 comes here */
5510
5511 /* Offset is
5512 * Port0 0
5513 * Port1 384 */
5514 i = func * RQ_ONCHIP_AT_PORT_SIZE;
5515#ifdef USE_DMAE
5516 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
5517 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
5518 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5519#else
5520 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
5521 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
5522 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
5523 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
5524#endif
5525 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
5526
5527#ifdef BCM_ISCSI
5528 /* Port0 1
5529 * Port1 385 */
5530 i++;
5531 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5532 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5533 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5534 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5535
5536 /* Port0 2
5537 * Port1 386 */
5538 i++;
5539 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5540 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5541 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5542 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5543
5544 /* Port0 3
5545 * Port1 387 */
5546 i++;
5547 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5548 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5549 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5550 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5551#endif
5552
5553 /* Port TCM comes here */
5554 /* Port UCM comes here */
5555 /* Port CCM comes here */
5556 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
5557 func ? XCM_PORT1_END : XCM_PORT0_END);
5558
5559#ifdef USE_DMAE
5560 wb_write[0] = 0;
5561 wb_write[1] = 0;
5562#endif
5563 for (i = 0; i < 32; i++) {
5564 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
5565#ifdef USE_DMAE
5566 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
5567#else
5568 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
5569 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
5570#endif
5571 }
5572 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
5573
5574 /* Port QM comes here */
5575
5576#ifdef BCM_ISCSI
5577 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5578 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5579
5580 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5581 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5582#endif
5583 /* Port DQ comes here */
5584 /* Port BRB1 comes here */
5585 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
5586 func ? PRS_PORT1_END : PRS_PORT0_END);
5587 /* Port TSDM comes here */
5588 /* Port CSDM comes here */
5589 /* Port USDM comes here */
5590 /* Port XSDM comes here */
5591 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
5592 func ? TSEM_PORT1_END : TSEM_PORT0_END);
5593 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
5594 func ? USEM_PORT1_END : USEM_PORT0_END);
5595 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
5596 func ? CSEM_PORT1_END : CSEM_PORT0_END);
5597 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
5598 func ? XSEM_PORT1_END : XSEM_PORT0_END);
5599 /* Port UPB comes here */
5600 /* Port XSDM comes here */
5601 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
5602 func ? PBF_PORT1_END : PBF_PORT0_END);
5603
5604 /* configure PBF to work without PAUSE mtu 9000 */
5605 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
5606
5607 /* update threshold */
5608 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
5609 /* update init credit */
5610 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
5611
5612 /* probe changes */
5613 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
5614 msleep(5);
5615 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
5616
5617#ifdef BCM_ISCSI
5618 /* tell the searcher where the T2 table is */
5619 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5620
5621 wb_write[0] = U64_LO(bp->t2_mapping);
5622 wb_write[1] = U64_HI(bp->t2_mapping);
5623 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5624 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5625 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5626 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5627
5628 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5629 /* Port SRCH comes here */
5630#endif
5631 /* Port CDU comes here */
5632 /* Port CFC comes here */
5633 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
5634 func ? HC_PORT1_END : HC_PORT0_END);
5635 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
5636 MISC_AEU_PORT0_START,
5637 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5638 /* Port PXPCS comes here */
5639 /* Port EMAC0 comes here */
5640 /* Port EMAC1 comes here */
5641 /* Port DBU comes here */
5642 /* Port DBG comes here */
5643 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
5644 func ? NIG_PORT1_END : NIG_PORT0_END);
5645 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
5646 /* Port MCP comes here */
5647 /* Port DMAE comes here */
5648
5649 bnx2x_link_reset(bp);
5650
c14423fe 5651 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5652 REG_WR(bp, 0x2114, 0xffffffff);
5653 REG_WR(bp, 0x2120, 0xffffffff);
5654 REG_WR(bp, 0x2814, 0xffffffff);
5655
5656 /* !!! move to init_values.h */
5657 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5658 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5659 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5660 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5661
5662 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
5663 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
5664 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5665 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
5666
5667 bnx2x_gunzip_end(bp);
5668
5669 if (!nomcp) {
5670 port = bp->port;
5671
5672 bp->fw_drv_pulse_wr_seq =
5673 (SHMEM_RD(bp, drv_fw_mb[port].drv_pulse_mb) &
5674 DRV_PULSE_SEQ_MASK);
5675 bp->fw_mb = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_param);
5676 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
5677 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
5678 } else {
5679 bp->fw_mb = 0;
5680 }
5681
5682 return 0;
5683}
5684
c14423fe 5685/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5686static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5687{
5688 u32 rc = 0;
5689 u32 seq = ++bp->fw_seq;
5690 int port = bp->port;
5691
5692 SHMEM_WR(bp, drv_fw_mb[port].drv_mb_header, command|seq);
5693 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", command|seq);
5694
5695 /* let the FW do it's magic ... */
5696 msleep(100); /* TBD */
5697
5698 if (CHIP_REV_IS_SLOW(bp))
5699 msleep(900);
5700
5701 rc = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_header);
5702
5703 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
5704
5705 /* is this a reply to our command? */
5706 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5707 rc &= FW_MSG_CODE_MASK;
5708 } else {
5709 /* FW BUG! */
5710 BNX2X_ERR("FW failed to respond!\n");
5711 bnx2x_fw_dump(bp);
5712 rc = 0;
5713 }
5714 return rc;
5715}
5716
5717static void bnx2x_free_mem(struct bnx2x *bp)
5718{
5719
5720#define BNX2X_PCI_FREE(x, y, size) \
5721 do { \
5722 if (x) { \
5723 pci_free_consistent(bp->pdev, size, x, y); \
5724 x = NULL; \
5725 y = 0; \
5726 } \
5727 } while (0)
5728
5729#define BNX2X_FREE(x) \
5730 do { \
5731 if (x) { \
5732 vfree(x); \
5733 x = NULL; \
5734 } \
5735 } while (0)
5736
5737 int i;
5738
5739 /* fastpath */
5740 for_each_queue(bp, i) {
5741
5742 /* Status blocks */
5743 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5744 bnx2x_fp(bp, i, status_blk_mapping),
5745 sizeof(struct host_status_block) +
5746 sizeof(struct eth_tx_db_data));
5747
5748 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5749 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5750 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5751 bnx2x_fp(bp, i, tx_desc_mapping),
5752 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5753
5754 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5755 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5756 bnx2x_fp(bp, i, rx_desc_mapping),
5757 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5758
5759 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5760 bnx2x_fp(bp, i, rx_comp_mapping),
5761 sizeof(struct eth_fast_path_rx_cqe) *
5762 NUM_RCQ_BD);
5763 }
5764
5765 BNX2X_FREE(bp->fp);
5766
5767 /* end of fastpath */
5768
5769 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5770 (sizeof(struct host_def_status_block)));
5771
5772 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5773 (sizeof(struct bnx2x_slowpath)));
5774
5775#ifdef BCM_ISCSI
5776 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5777 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5778 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5779 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5780#endif
5781 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
5782
5783#undef BNX2X_PCI_FREE
5784#undef BNX2X_KFREE
5785}
5786
5787static int bnx2x_alloc_mem(struct bnx2x *bp)
5788{
5789
5790#define BNX2X_PCI_ALLOC(x, y, size) \
5791 do { \
5792 x = pci_alloc_consistent(bp->pdev, size, y); \
5793 if (x == NULL) \
5794 goto alloc_mem_err; \
5795 memset(x, 0, size); \
5796 } while (0)
5797
5798#define BNX2X_ALLOC(x, size) \
5799 do { \
5800 x = vmalloc(size); \
5801 if (x == NULL) \
5802 goto alloc_mem_err; \
5803 memset(x, 0, size); \
5804 } while (0)
5805
5806 int i;
5807
5808 /* fastpath */
5809 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
5810
5811 for_each_queue(bp, i) {
5812 bnx2x_fp(bp, i, bp) = bp;
5813
5814 /* Status blocks */
5815 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5816 &bnx2x_fp(bp, i, status_blk_mapping),
5817 sizeof(struct host_status_block) +
5818 sizeof(struct eth_tx_db_data));
5819
5820 bnx2x_fp(bp, i, hw_tx_prods) =
5821 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5822
5823 bnx2x_fp(bp, i, tx_prods_mapping) =
5824 bnx2x_fp(bp, i, status_blk_mapping) +
5825 sizeof(struct host_status_block);
5826
5827 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5828 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5829 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5830 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5831 &bnx2x_fp(bp, i, tx_desc_mapping),
5832 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5833
5834 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5835 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5836 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5837 &bnx2x_fp(bp, i, rx_desc_mapping),
5838 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5839
5840 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5841 &bnx2x_fp(bp, i, rx_comp_mapping),
5842 sizeof(struct eth_fast_path_rx_cqe) *
5843 NUM_RCQ_BD);
5844
5845 }
5846 /* end of fastpath */
5847
5848 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5849 sizeof(struct host_def_status_block));
5850
5851 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5852 sizeof(struct bnx2x_slowpath));
5853
5854#ifdef BCM_ISCSI
5855 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5856
5857 /* Initialize T1 */
5858 for (i = 0; i < 64*1024; i += 64) {
5859 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5860 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5861 }
5862
5863 /* allocate searcher T2 table
5864 we allocate 1/4 of alloc num for T2
5865 (which is not entered into the ILT) */
5866 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5867
5868 /* Initialize T2 */
5869 for (i = 0; i < 16*1024; i += 64)
5870 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5871
c14423fe 5872 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5873 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5874
5875 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5876 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5877
5878 /* QM queues (128*MAX_CONN) */
5879 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5880#endif
5881
5882 /* Slow path ring */
5883 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5884
5885 return 0;
5886
5887alloc_mem_err:
5888 bnx2x_free_mem(bp);
5889 return -ENOMEM;
5890
5891#undef BNX2X_PCI_ALLOC
5892#undef BNX2X_ALLOC
5893}
5894
5895static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5896{
5897 int i;
5898
5899 for_each_queue(bp, i) {
5900 struct bnx2x_fastpath *fp = &bp->fp[i];
5901
5902 u16 bd_cons = fp->tx_bd_cons;
5903 u16 sw_prod = fp->tx_pkt_prod;
5904 u16 sw_cons = fp->tx_pkt_cons;
5905
5906 BUG_TRAP(fp->tx_buf_ring != NULL);
5907
5908 while (sw_cons != sw_prod) {
5909 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5910 sw_cons++;
5911 }
5912 }
5913}
5914
5915static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5916{
5917 int i, j;
5918
5919 for_each_queue(bp, j) {
5920 struct bnx2x_fastpath *fp = &bp->fp[j];
5921
5922 BUG_TRAP(fp->rx_buf_ring != NULL);
5923
5924 for (i = 0; i < NUM_RX_BD; i++) {
5925 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5926 struct sk_buff *skb = rx_buf->skb;
5927
5928 if (skb == NULL)
5929 continue;
5930
5931 pci_unmap_single(bp->pdev,
5932 pci_unmap_addr(rx_buf, mapping),
5933 bp->rx_buf_use_size,
5934 PCI_DMA_FROMDEVICE);
5935
5936 rx_buf->skb = NULL;
5937 dev_kfree_skb(skb);
5938 }
5939 }
5940}
5941
5942static void bnx2x_free_skbs(struct bnx2x *bp)
5943{
5944 bnx2x_free_tx_skbs(bp);
5945 bnx2x_free_rx_skbs(bp);
5946}
5947
5948static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5949{
5950 int i;
5951
5952 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5953 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5954 bp->msix_table[0].vector);
5955
5956 for_each_queue(bp, i) {
c14423fe 5957 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
a2fbb9ea
ET
5958 "state(%x)\n", i, bp->msix_table[i + 1].vector,
5959 bnx2x_fp(bp, i, state));
5960
5961 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
5962
5963 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
5964 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
5965
5966 } else
5967 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
5968
5969 }
5970
5971}
5972
5973static void bnx2x_free_irq(struct bnx2x *bp)
5974{
5975
5976 if (bp->flags & USING_MSIX_FLAG) {
5977
5978 bnx2x_free_msix_irqs(bp);
5979 pci_disable_msix(bp->pdev);
5980
5981 bp->flags &= ~USING_MSIX_FLAG;
5982
5983 } else
5984 free_irq(bp->pdev->irq, bp->dev);
5985}
5986
5987static int bnx2x_enable_msix(struct bnx2x *bp)
5988{
5989
5990 int i;
5991
5992 bp->msix_table[0].entry = 0;
5993 for_each_queue(bp, i)
5994 bp->msix_table[i + 1].entry = i + 1;
5995
5996 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
5997 bp->num_queues + 1)){
5998 BNX2X_ERR("failed to enable msix\n");
5999 return -1;
6000
6001 }
6002
6003 bp->flags |= USING_MSIX_FLAG;
6004
6005 return 0;
6006
6007}
6008
6009
6010static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6011{
6012
a2fbb9ea
ET
6013 int i, rc;
6014
6015 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6016
6017 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6018 bp->dev->name, bp->dev);
6019
6020 if (rc) {
6021 BNX2X_ERR("request sp irq failed\n");
6022 return -EBUSY;
6023 }
6024
6025 for_each_queue(bp, i) {
6026 rc = request_irq(bp->msix_table[i + 1].vector,
6027 bnx2x_msix_fp_int, 0,
6028 bp->dev->name, &bp->fp[i]);
6029
6030 if (rc) {
6031 BNX2X_ERR("request fp #%d irq failed\n", i);
6032 bnx2x_free_msix_irqs(bp);
6033 return -EBUSY;
6034 }
6035
6036 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6037
6038 }
6039
6040 return 0;
6041
6042}
6043
6044static int bnx2x_req_irq(struct bnx2x *bp)
6045{
6046
6047 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6048 IRQF_SHARED, bp->dev->name, bp->dev);
6049 if (!rc)
6050 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6051
6052 return rc;
6053
6054}
6055
6056/*
6057 * Init service functions
6058 */
6059
6060static void bnx2x_set_mac_addr(struct bnx2x *bp)
6061{
6062 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6063
6064 /* CAM allocation
6065 * unicasts 0-31:port0 32-63:port1
6066 * multicast 64-127:port0 128-191:port1
6067 */
6068 config->hdr.length_6b = 2;
6069 config->hdr.offset = bp->port ? 31 : 0;
6070 config->hdr.reserved0 = 0;
6071 config->hdr.reserved1 = 0;
6072
6073 /* primary MAC */
6074 config->config_table[0].cam_entry.msb_mac_addr =
6075 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6076 config->config_table[0].cam_entry.middle_mac_addr =
6077 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6078 config->config_table[0].cam_entry.lsb_mac_addr =
6079 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6080 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
6081 config->config_table[0].target_table_entry.flags = 0;
6082 config->config_table[0].target_table_entry.client_id = 0;
6083 config->config_table[0].target_table_entry.vlan_id = 0;
6084
6085 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6086 config->config_table[0].cam_entry.msb_mac_addr,
6087 config->config_table[0].cam_entry.middle_mac_addr,
6088 config->config_table[0].cam_entry.lsb_mac_addr);
6089
6090 /* broadcast */
6091 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6092 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6093 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6094 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
6095 config->config_table[1].target_table_entry.flags =
6096 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6097 config->config_table[1].target_table_entry.client_id = 0;
6098 config->config_table[1].target_table_entry.vlan_id = 0;
6099
6100 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6101 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6102 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6103}
6104
6105static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6106 int *state_p, int poll)
6107{
6108 /* can take a while if any port is running */
6109 int timeout = 500;
6110
c14423fe
ET
6111 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6112 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6113
6114 might_sleep();
6115
6116 while (timeout) {
6117
6118 if (poll) {
6119 bnx2x_rx_int(bp->fp, 10);
6120 /* If index is different from 0
6121 * The reply for some commands will
6122 * be on the none default queue
6123 */
6124 if (idx)
6125 bnx2x_rx_int(&bp->fp[idx], 10);
6126 }
6127
6128 mb(); /* state is changed by bnx2x_sp_event()*/
6129
6130 if (*state_p != state)
6131 return 0;
6132
6133 timeout--;
6134 msleep(1);
6135
6136 }
6137
a2fbb9ea
ET
6138 /* timeout! */
6139 BNX2X_ERR("timeout waiting for ramrod %d on %d\n", state, idx);
6140 return -EBUSY;
6141
6142}
6143
6144static int bnx2x_setup_leading(struct bnx2x *bp)
6145{
6146
c14423fe 6147 /* reset IGU state */
a2fbb9ea
ET
6148 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6149
6150 /* SETUP ramrod */
6151 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6152
6153 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6154
6155}
6156
6157static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6158{
6159
6160 /* reset IGU state */
6161 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6162
6163 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6164 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6165
6166 /* Wait for completion */
6167 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6168 &(bp->fp[index].state), 1);
6169
6170}
6171
6172
6173static int bnx2x_poll(struct napi_struct *napi, int budget);
6174static void bnx2x_set_rx_mode(struct net_device *dev);
6175
6176static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6177{
6178 int rc;
6179 int i = 0;
6180
6181 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6182
6183 /* Send LOAD_REQUEST command to MCP.
6184 Returns the type of LOAD command: if it is the
6185 first port to be initialized common blocks should be
6186 initialized, otherwise - not.
6187 */
6188 if (!nomcp) {
6189 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6190 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6191 return -EBUSY; /* other port in diagnostic mode */
6192 }
6193 } else {
6194 rc = FW_MSG_CODE_DRV_LOAD_COMMON;
6195 }
6196
a2fbb9ea
ET
6197 /* if we can't use msix we only need one fp,
6198 * so try to enable msix with the requested number of fp's
6199 * and fallback to inta with one fp
6200 */
6201 if (req_irq) {
a2fbb9ea
ET
6202 if (use_inta) {
6203 bp->num_queues = 1;
6204 } else {
c14423fe 6205 if ((use_multi > 1) && (use_multi <= 16))
a2fbb9ea
ET
6206 /* user requested number */
6207 bp->num_queues = use_multi;
6208 else if (use_multi == 1)
6209 bp->num_queues = num_online_cpus();
6210 else
6211 bp->num_queues = 1;
6212
6213 if (bnx2x_enable_msix(bp)) {
c14423fe 6214 /* failed to enable msix */
a2fbb9ea
ET
6215 bp->num_queues = 1;
6216 if (use_multi)
c14423fe 6217 BNX2X_ERR("Multi requested but failed"
a2fbb9ea
ET
6218 " to enable MSI-X\n");
6219 }
6220 }
6221 }
6222
c14423fe
ET
6223 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
6224
a2fbb9ea
ET
6225 if (bnx2x_alloc_mem(bp))
6226 return -ENOMEM;
6227
6228 if (req_irq) {
6229 if (bp->flags & USING_MSIX_FLAG) {
6230 if (bnx2x_req_msix_irqs(bp)) {
6231 pci_disable_msix(bp->pdev);
6232 goto out_error;
6233 }
6234
6235 } else {
6236 if (bnx2x_req_irq(bp)) {
6237 BNX2X_ERR("IRQ request failed, aborting\n");
6238 goto out_error;
6239 }
6240 }
6241 }
6242
6243 for_each_queue(bp, i)
6244 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6245 bnx2x_poll, 128);
6246
6247
6248 /* Initialize HW */
6249 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
6250 BNX2X_ERR("HW init failed, aborting\n");
6251 goto out_error;
6252 }
6253
6254
6255 atomic_set(&bp->intr_sem, 0);
6256
a2fbb9ea
ET
6257
6258 /* Setup NIC internals and enable interrupts */
6259 bnx2x_nic_init(bp);
6260
6261 /* Send LOAD_DONE command to MCP */
6262 if (!nomcp) {
6263 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6264 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
6265 if (!rc) {
6266 BNX2X_ERR("MCP response failure, unloading\n");
6267 goto int_disable;
6268 }
6269 }
6270
6271 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6272
6273 /* Enable Rx interrupt handling before sending the ramrod
6274 as it's completed on Rx FP queue */
6275 for_each_queue(bp, i)
6276 napi_enable(&bnx2x_fp(bp, i, napi));
6277
6278 if (bnx2x_setup_leading(bp))
6279 goto stop_netif;
6280
6281 for_each_nondefault_queue(bp, i)
6282 if (bnx2x_setup_multi(bp, i))
6283 goto stop_netif;
6284
6285 bnx2x_set_mac_addr(bp);
6286
6287 bnx2x_phy_init(bp);
6288
6289 /* Start fast path */
6290 if (req_irq) { /* IRQ is only requested from bnx2x_open */
6291 netif_start_queue(bp->dev);
6292 if (bp->flags & USING_MSIX_FLAG)
6293 printk(KERN_INFO PFX "%s: using MSI-X\n",
6294 bp->dev->name);
6295
6296 /* Otherwise Tx queue should be only reenabled */
6297 } else if (netif_running(bp->dev)) {
6298 netif_wake_queue(bp->dev);
6299 bnx2x_set_rx_mode(bp->dev);
6300 }
6301
6302 /* start the timer */
6303 mod_timer(&bp->timer, jiffies + bp->current_interval);
6304
6305 return 0;
6306
6307stop_netif:
6308 for_each_queue(bp, i)
6309 napi_disable(&bnx2x_fp(bp, i, napi));
6310
6311int_disable:
6312 bnx2x_disable_int_sync(bp);
6313
6314 bnx2x_free_skbs(bp);
6315 bnx2x_free_irq(bp);
6316
6317out_error:
6318 bnx2x_free_mem(bp);
6319
6320 /* TBD we really need to reset the chip
6321 if we want to recover from this */
6322 return rc;
6323}
6324
6325static void bnx2x_netif_stop(struct bnx2x *bp)
6326{
6327 int i;
6328
6329 bp->rx_mode = BNX2X_RX_MODE_NONE;
6330 bnx2x_set_storm_rx_mode(bp);
6331
6332 bnx2x_disable_int_sync(bp);
6333 bnx2x_link_reset(bp);
6334
6335 for_each_queue(bp, i)
6336 napi_disable(&bnx2x_fp(bp, i, napi));
6337
6338 if (netif_running(bp->dev)) {
6339 netif_tx_disable(bp->dev);
6340 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6341 }
6342}
6343
6344static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6345{
6346 int port = bp->port;
6347#ifdef USE_DMAE
6348 u32 wb_write[2];
6349#endif
6350 int base, i;
6351
6352 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
6353
6354 /* Do not rcv packets to BRB */
6355 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6356 /* Do not direct rcv packets that are not for MCP to the BRB */
6357 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6358 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6359
6360 /* Configure IGU and AEU */
6361 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6362 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6363
6364 /* TODO: Close Doorbell port? */
6365
6366 /* Clear ILT */
6367#ifdef USE_DMAE
6368 wb_write[0] = 0;
6369 wb_write[1] = 0;
6370#endif
6371 base = port * RQ_ONCHIP_AT_PORT_SIZE;
6372 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
6373#ifdef USE_DMAE
6374 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6375#else
6376 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
6377 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
6378#endif
6379 }
6380
6381 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6382 /* reset_common */
6383 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6384 0xd3ffff7f);
6385 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6386 0x1403);
6387 }
6388}
6389
6390static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6391{
6392
6393 int rc;
6394
c14423fe 6395 /* halt the connection */
a2fbb9ea
ET
6396 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6397 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6398
6399
6400 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6401 &(bp->fp[index].state), 1);
c14423fe 6402 if (rc) /* timeout */
a2fbb9ea
ET
6403 return rc;
6404
6405 /* delete cfc entry */
6406 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6407
6408 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_DELETED, index,
6409 &(bp->fp[index].state), 1);
6410
6411}
6412
6413
6414static void bnx2x_stop_leading(struct bnx2x *bp)
6415{
6416
c14423fe 6417 /* if the other port is handling traffic,
a2fbb9ea
ET
6418 this can take a lot of time */
6419 int timeout = 500;
6420
6421 might_sleep();
6422
6423 /* Send HALT ramrod */
6424 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6425 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
6426
6427 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6428 &(bp->fp[0].state), 1))
6429 return;
6430
6431 bp->dsb_sp_prod_idx = *bp->dsb_sp_prod;
6432
6433 /* Send CFC_DELETE ramrod */
6434 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6435
6436 /*
6437 Wait for completion.
6438 we are going to reset the chip anyway
6439 so there is not much to do if this times out
6440 */
6441 while (bp->dsb_sp_prod_idx == *bp->dsb_sp_prod && timeout) {
6442 timeout--;
6443 msleep(1);
6444 }
6445
6446}
6447
6448static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
6449{
6450 u32 reset_code = 0;
6451 int rc;
6452 int i;
6453
6454 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6455
6456 /* Calling flush_scheduled_work() may deadlock because
6457 * linkwatch_event() may be on the workqueue and it will try to get
6458 * the rtnl_lock which we are holding.
6459 */
6460
6461 while (bp->in_reset_task)
6462 msleep(1);
6463
6464 /* Delete the timer: do it before disabling interrupts, as it
c14423fe 6465 may be still STAT_QUERY ramrod pending after stopping the timer */
a2fbb9ea
ET
6466 del_timer_sync(&bp->timer);
6467
6468 /* Wait until stat ramrod returns and all SP tasks complete */
6469 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
6470 msleep(1);
6471
6472 /* Stop fast path, disable MAC, disable interrupts, disable napi */
6473 bnx2x_netif_stop(bp);
6474
6475 if (bp->flags & NO_WOL_FLAG)
6476 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6477 else if (bp->wol) {
6478 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
6479 u8 *mac_addr = bp->dev->dev_addr;
6480 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
6481 EMAC_MODE_ACPI_RCVD);
6482
6483 EMAC_WR(EMAC_REG_EMAC_MODE, val);
6484
6485 val = (mac_addr[0] << 8) | mac_addr[1];
6486 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
6487
6488 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6489 (mac_addr[4] << 8) | mac_addr[5];
6490 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
6491
6492 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6493 } else
6494 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6495
6496 for_each_nondefault_queue(bp, i)
6497 if (bnx2x_stop_multi(bp, i))
6498 goto error;
6499
6500
6501 bnx2x_stop_leading(bp);
6502
6503error:
6504 if (!nomcp)
6505 rc = bnx2x_fw_command(bp, reset_code);
6506 else
6507 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6508
6509 /* Release IRQs */
6510 if (fre_irq)
6511 bnx2x_free_irq(bp);
6512
6513 /* Reset the chip */
6514 bnx2x_reset_chip(bp, rc);
6515
6516 /* Report UNLOAD_DONE to MCP */
6517 if (!nomcp)
6518 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6519
6520 /* Free SKBs and driver internals */
6521 bnx2x_free_skbs(bp);
6522 bnx2x_free_mem(bp);
6523
6524 bp->state = BNX2X_STATE_CLOSED;
6525 /* Set link down */
6526 bp->link_up = 0;
6527 netif_carrier_off(bp->dev);
6528
6529 return 0;
6530}
6531
6532/* end of nic load/unload */
6533
6534/* ethtool_ops */
6535
6536/*
6537 * Init service functions
6538 */
6539
6540static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6541{
6542 int port = bp->port;
6543 u32 ext_phy_type;
6544
6545 bp->phy_flags = 0;
6546
6547 switch (switch_cfg) {
6548 case SWITCH_CFG_1G:
6549 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6550
6551 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
6552 switch (ext_phy_type) {
6553 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6554 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6555 ext_phy_type);
6556
6557 bp->supported |= (SUPPORTED_10baseT_Half |
6558 SUPPORTED_10baseT_Full |
6559 SUPPORTED_100baseT_Half |
6560 SUPPORTED_100baseT_Full |
6561 SUPPORTED_1000baseT_Full |
6562 SUPPORTED_2500baseT_Full |
6563 SUPPORTED_TP | SUPPORTED_FIBRE |
6564 SUPPORTED_Autoneg |
6565 SUPPORTED_Pause |
6566 SUPPORTED_Asym_Pause);
6567 break;
6568
6569 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6570 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6571 ext_phy_type);
6572
6573 bp->phy_flags |= PHY_SGMII_FLAG;
6574
6575 bp->supported |= (/* SUPPORTED_10baseT_Half |
6576 SUPPORTED_10baseT_Full |
6577 SUPPORTED_100baseT_Half |
6578 SUPPORTED_100baseT_Full |*/
6579 SUPPORTED_1000baseT_Full |
6580 SUPPORTED_TP | SUPPORTED_FIBRE |
6581 SUPPORTED_Autoneg |
6582 SUPPORTED_Pause |
6583 SUPPORTED_Asym_Pause);
6584 break;
6585
6586 default:
6587 BNX2X_ERR("NVRAM config error. "
6588 "BAD SerDes ext_phy_config 0x%x\n",
6589 bp->ext_phy_config);
6590 return;
6591 }
6592
6593 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6594 port*0x10);
6595 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
6596 break;
6597
6598 case SWITCH_CFG_10G:
6599 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6600
6601 bp->phy_flags |= PHY_XGXS_FLAG;
6602
6603 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
6604 switch (ext_phy_type) {
6605 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6606 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6607 ext_phy_type);
6608
6609 bp->supported |= (SUPPORTED_10baseT_Half |
6610 SUPPORTED_10baseT_Full |
6611 SUPPORTED_100baseT_Half |
6612 SUPPORTED_100baseT_Full |
6613 SUPPORTED_1000baseT_Full |
6614 SUPPORTED_2500baseT_Full |
6615 SUPPORTED_10000baseT_Full |
6616 SUPPORTED_TP | SUPPORTED_FIBRE |
6617 SUPPORTED_Autoneg |
6618 SUPPORTED_Pause |
6619 SUPPORTED_Asym_Pause);
6620 break;
6621
6622 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6623 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6624 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705/6)\n",
6625 ext_phy_type);
6626
6627 bp->supported |= (SUPPORTED_10000baseT_Full |
6628 SUPPORTED_FIBRE |
6629 SUPPORTED_Pause |
6630 SUPPORTED_Asym_Pause);
6631 break;
6632
6633 default:
6634 BNX2X_ERR("NVRAM config error. "
6635 "BAD XGXS ext_phy_config 0x%x\n",
6636 bp->ext_phy_config);
6637 return;
6638 }
6639
6640 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6641 port*0x18);
6642 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
6643
6644 bp->ser_lane = ((bp->lane_config &
6645 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
6646 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
6647 bp->rx_lane_swap = ((bp->lane_config &
6648 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
6649 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
6650 bp->tx_lane_swap = ((bp->lane_config &
6651 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
6652 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
6653 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
6654 bp->rx_lane_swap, bp->tx_lane_swap);
6655 break;
6656
6657 default:
6658 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6659 bp->link_config);
6660 return;
6661 }
6662
6663 /* mask what we support according to speed_cap_mask */
6664 if (!(bp->speed_cap_mask &
6665 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6666 bp->supported &= ~SUPPORTED_10baseT_Half;
6667
6668 if (!(bp->speed_cap_mask &
6669 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6670 bp->supported &= ~SUPPORTED_10baseT_Full;
6671
6672 if (!(bp->speed_cap_mask &
6673 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6674 bp->supported &= ~SUPPORTED_100baseT_Half;
6675
6676 if (!(bp->speed_cap_mask &
6677 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6678 bp->supported &= ~SUPPORTED_100baseT_Full;
6679
6680 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6681 bp->supported &= ~(SUPPORTED_1000baseT_Half |
6682 SUPPORTED_1000baseT_Full);
6683
6684 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6685 bp->supported &= ~SUPPORTED_2500baseT_Full;
6686
6687 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6688 bp->supported &= ~SUPPORTED_10000baseT_Full;
6689
6690 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
6691}
6692
6693static void bnx2x_link_settings_requested(struct bnx2x *bp)
6694{
6695 bp->req_autoneg = 0;
6696 bp->req_duplex = DUPLEX_FULL;
6697
6698 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6699 case PORT_FEATURE_LINK_SPEED_AUTO:
6700 if (bp->supported & SUPPORTED_Autoneg) {
6701 bp->req_autoneg |= AUTONEG_SPEED;
6702 bp->req_line_speed = 0;
6703 bp->advertising = bp->supported;
6704 } else {
6705 u32 ext_phy_type;
6706
6707 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
6708 if ((ext_phy_type ==
6709 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6710 (ext_phy_type ==
6711 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6712 /* force 10G, no AN */
6713 bp->req_line_speed = SPEED_10000;
6714 bp->advertising =
6715 (ADVERTISED_10000baseT_Full |
6716 ADVERTISED_FIBRE);
6717 break;
6718 }
6719 BNX2X_ERR("NVRAM config error. "
6720 "Invalid link_config 0x%x"
6721 " Autoneg not supported\n",
6722 bp->link_config);
6723 return;
6724 }
6725 break;
6726
6727 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6728 if (bp->speed_cap_mask &
6729 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
6730 bp->req_line_speed = SPEED_10;
6731 bp->advertising = (ADVERTISED_10baseT_Full |
6732 ADVERTISED_TP);
6733 } else {
6734 BNX2X_ERR("NVRAM config error. "
6735 "Invalid link_config 0x%x"
6736 " speed_cap_mask 0x%x\n",
6737 bp->link_config, bp->speed_cap_mask);
6738 return;
6739 }
6740 break;
6741
6742 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6743 if (bp->speed_cap_mask &
6744 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
6745 bp->req_line_speed = SPEED_10;
6746 bp->req_duplex = DUPLEX_HALF;
6747 bp->advertising = (ADVERTISED_10baseT_Half |
6748 ADVERTISED_TP);
6749 } else {
6750 BNX2X_ERR("NVRAM config error. "
6751 "Invalid link_config 0x%x"
6752 " speed_cap_mask 0x%x\n",
6753 bp->link_config, bp->speed_cap_mask);
6754 return;
6755 }
6756 break;
6757
6758 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6759 if (bp->speed_cap_mask &
6760 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
6761 bp->req_line_speed = SPEED_100;
6762 bp->advertising = (ADVERTISED_100baseT_Full |
6763 ADVERTISED_TP);
6764 } else {
6765 BNX2X_ERR("NVRAM config error. "
6766 "Invalid link_config 0x%x"
6767 " speed_cap_mask 0x%x\n",
6768 bp->link_config, bp->speed_cap_mask);
6769 return;
6770 }
6771 break;
6772
6773 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6774 if (bp->speed_cap_mask &
6775 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
6776 bp->req_line_speed = SPEED_100;
6777 bp->req_duplex = DUPLEX_HALF;
6778 bp->advertising = (ADVERTISED_100baseT_Half |
6779 ADVERTISED_TP);
6780 } else {
6781 BNX2X_ERR("NVRAM config error. "
6782 "Invalid link_config 0x%x"
6783 " speed_cap_mask 0x%x\n",
6784 bp->link_config, bp->speed_cap_mask);
6785 return;
6786 }
6787 break;
6788
6789 case PORT_FEATURE_LINK_SPEED_1G:
6790 if (bp->speed_cap_mask &
6791 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
6792 bp->req_line_speed = SPEED_1000;
6793 bp->advertising = (ADVERTISED_1000baseT_Full |
6794 ADVERTISED_TP);
6795 } else {
6796 BNX2X_ERR("NVRAM config error. "
6797 "Invalid link_config 0x%x"
6798 " speed_cap_mask 0x%x\n",
6799 bp->link_config, bp->speed_cap_mask);
6800 return;
6801 }
6802 break;
6803
6804 case PORT_FEATURE_LINK_SPEED_2_5G:
6805 if (bp->speed_cap_mask &
6806 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) {
6807 bp->req_line_speed = SPEED_2500;
6808 bp->advertising = (ADVERTISED_2500baseT_Full |
6809 ADVERTISED_TP);
6810 } else {
6811 BNX2X_ERR("NVRAM config error. "
6812 "Invalid link_config 0x%x"
6813 " speed_cap_mask 0x%x\n",
6814 bp->link_config, bp->speed_cap_mask);
6815 return;
6816 }
6817 break;
6818
6819 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6820 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6821 case PORT_FEATURE_LINK_SPEED_10G_KR:
6822 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
6823 BNX2X_ERR("NVRAM config error. "
6824 "Invalid link_config 0x%x"
6825 " phy_flags 0x%x\n",
6826 bp->link_config, bp->phy_flags);
6827 return;
6828 }
6829 if (bp->speed_cap_mask &
6830 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
6831 bp->req_line_speed = SPEED_10000;
6832 bp->advertising = (ADVERTISED_10000baseT_Full |
6833 ADVERTISED_FIBRE);
6834 } else {
6835 BNX2X_ERR("NVRAM config error. "
6836 "Invalid link_config 0x%x"
6837 " speed_cap_mask 0x%x\n",
6838 bp->link_config, bp->speed_cap_mask);
6839 return;
6840 }
6841 break;
6842
6843 default:
6844 BNX2X_ERR("NVRAM config error. "
6845 "BAD link speed link_config 0x%x\n",
6846 bp->link_config);
6847 bp->req_autoneg |= AUTONEG_SPEED;
6848 bp->req_line_speed = 0;
6849 bp->advertising = bp->supported;
6850 break;
6851 }
6852 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
6853 bp->req_line_speed, bp->req_duplex);
6854
6855 bp->req_flow_ctrl = (bp->link_config &
6856 PORT_FEATURE_FLOW_CONTROL_MASK);
6857 /* Please refer to Table 28B-3 of the 802.3ab-1999 spec */
6858 switch (bp->req_flow_ctrl) {
6859 case FLOW_CTRL_AUTO:
6860 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
6861 if (bp->dev->mtu <= 4500) {
6862 bp->pause_mode = PAUSE_BOTH;
6863 bp->advertising |= (ADVERTISED_Pause |
6864 ADVERTISED_Asym_Pause);
6865 } else {
6866 bp->pause_mode = PAUSE_ASYMMETRIC;
6867 bp->advertising |= ADVERTISED_Asym_Pause;
6868 }
6869 break;
6870
6871 case FLOW_CTRL_TX:
6872 bp->pause_mode = PAUSE_ASYMMETRIC;
6873 bp->advertising |= ADVERTISED_Asym_Pause;
6874 break;
6875
6876 case FLOW_CTRL_RX:
6877 case FLOW_CTRL_BOTH:
6878 bp->pause_mode = PAUSE_BOTH;
6879 bp->advertising |= (ADVERTISED_Pause |
6880 ADVERTISED_Asym_Pause);
6881 break;
6882
6883 case FLOW_CTRL_NONE:
6884 default:
6885 bp->pause_mode = PAUSE_NONE;
6886 bp->advertising &= ~(ADVERTISED_Pause |
6887 ADVERTISED_Asym_Pause);
6888 break;
6889 }
6890 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x\n"
6891 KERN_INFO " pause_mode %d advertising 0x%x\n",
6892 bp->req_autoneg, bp->req_flow_ctrl,
6893 bp->pause_mode, bp->advertising);
6894}
6895
6896static void bnx2x_get_hwinfo(struct bnx2x *bp)
6897{
6898 u32 val, val2, val3, val4, id;
6899 int port = bp->port;
6900 u32 switch_cfg;
6901
6902 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6903 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
6904
6905 /* Get the chip revision id and number. */
6906 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6907 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6908 id = ((val & 0xffff) << 16);
6909 val = REG_RD(bp, MISC_REG_CHIP_REV);
6910 id |= ((val & 0xf) << 12);
6911 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6912 id |= ((val & 0xff) << 4);
6913 REG_RD(bp, MISC_REG_BOND_ID);
6914 id |= (val & 0xf);
6915 bp->chip_id = id;
6916 BNX2X_DEV_INFO("chip ID is %x\n", id);
6917
6918 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
6919 BNX2X_DEV_INFO("MCP not active\n");
6920 nomcp = 1;
6921 goto set_mac;
6922 }
6923
6924 val = SHMEM_RD(bp, validity_map[port]);
6925 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
c14423fe 6926 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
a2fbb9ea
ET
6927 BNX2X_ERR("MCP validity signature bad\n");
6928
6929 bp->fw_seq = (SHMEM_RD(bp, drv_fw_mb[port].drv_mb_header) &
6930 DRV_MSG_SEQ_NUMBER_MASK);
6931
6932 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6933
6934 bp->serdes_config =
6935 SHMEM_RD(bp, dev_info.port_hw_config[bp->port].serdes_config);
6936 bp->lane_config =
6937 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6938 bp->ext_phy_config =
6939 SHMEM_RD(bp,
6940 dev_info.port_hw_config[port].external_phy_config);
6941 bp->speed_cap_mask =
6942 SHMEM_RD(bp,
6943 dev_info.port_hw_config[port].speed_capability_mask);
6944
6945 bp->link_config =
6946 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6947
6948 BNX2X_DEV_INFO("hw_config (%08x) serdes_config (%08x)\n"
6949 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
6950 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
6951 " fw_seq (%08x)\n",
6952 bp->hw_config, bp->serdes_config, bp->lane_config,
6953 bp->ext_phy_config, bp->speed_cap_mask,
6954 bp->link_config, bp->fw_seq);
6955
6956 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
6957 bnx2x_link_settings_supported(bp, switch_cfg);
6958
6959 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
6960 /* for now disable cl73 */
6961 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
6962 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
6963
6964 bnx2x_link_settings_requested(bp);
6965
6966 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6967 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6968 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6969 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6970 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6971 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6972 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6973 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6974
6975 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
6976
6977
6978 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6979 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6980 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6981 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6982
6983 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6984 val, val2, val3, val4);
6985
6986 /* bc ver */
6987 if (!nomcp) {
6988 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
6989 BNX2X_DEV_INFO("bc_ver %X\n", val);
6990 if (val < BNX2X_BC_VER) {
6991 /* for now only warn
6992 * later we might need to enforce this */
6993 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6994 " please upgrade BC\n", BNX2X_BC_VER, val);
6995 }
6996 } else {
6997 bp->bc_ver = 0;
6998 }
6999
7000 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7001 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7002 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7003 bp->flash_size, bp->flash_size);
7004
7005 return;
7006
7007set_mac: /* only supposed to happen on emulation/FPGA */
7008 BNX2X_ERR("warning constant MAC workaround active\n");
7009 bp->dev->dev_addr[0] = 0;
7010 bp->dev->dev_addr[1] = 0x50;
7011 bp->dev->dev_addr[2] = 0xc2;
7012 bp->dev->dev_addr[3] = 0x2c;
7013 bp->dev->dev_addr[4] = 0x71;
7014 bp->dev->dev_addr[5] = port ? 0x0d : 0x0e;
7015
7016 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7017
7018}
7019
7020/*
7021 * ethtool service functions
7022 */
7023
7024/* All ethtool functions called with rtnl_lock */
7025
7026static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7027{
7028 struct bnx2x *bp = netdev_priv(dev);
7029
7030 cmd->supported = bp->supported;
7031 cmd->advertising = bp->advertising;
7032
7033 if (netif_carrier_ok(dev)) {
7034 cmd->speed = bp->line_speed;
7035 cmd->duplex = bp->duplex;
7036 } else {
7037 cmd->speed = bp->req_line_speed;
7038 cmd->duplex = bp->req_duplex;
7039 }
7040
7041 if (bp->phy_flags & PHY_XGXS_FLAG) {
7042 cmd->port = PORT_FIBRE;
7043 } else {
7044 cmd->port = PORT_TP;
7045 }
7046
7047 cmd->phy_address = bp->phy_addr;
7048 cmd->transceiver = XCVR_INTERNAL;
7049
7050 if (bp->req_autoneg & AUTONEG_SPEED) {
7051 cmd->autoneg = AUTONEG_ENABLE;
7052 } else {
7053 cmd->autoneg = AUTONEG_DISABLE;
7054 }
7055
7056 cmd->maxtxpkt = 0;
7057 cmd->maxrxpkt = 0;
7058
7059 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7060 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7061 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7062 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7063 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7064 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7065 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7066
7067 return 0;
7068}
7069
7070static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7071{
7072 struct bnx2x *bp = netdev_priv(dev);
7073 u32 advertising;
7074
7075 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7076 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7077 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7078 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7079 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7080 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7081 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7082
7083 switch (cmd->port) {
7084 case PORT_TP:
7085 if (!(bp->supported & SUPPORTED_TP))
7086 return -EINVAL;
7087
7088 if (bp->phy_flags & PHY_XGXS_FLAG) {
7089 bnx2x_link_reset(bp);
7090 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
7091 bnx2x_phy_deassert(bp);
7092 }
7093 break;
7094
7095 case PORT_FIBRE:
7096 if (!(bp->supported & SUPPORTED_FIBRE))
7097 return -EINVAL;
7098
7099 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7100 bnx2x_link_reset(bp);
7101 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
7102 bnx2x_phy_deassert(bp);
7103 }
7104 break;
7105
7106 default:
7107 return -EINVAL;
7108 }
7109
7110 if (cmd->autoneg == AUTONEG_ENABLE) {
7111 if (!(bp->supported & SUPPORTED_Autoneg))
7112 return -EINVAL;
7113
7114 /* advertise the requested speed and duplex if supported */
7115 cmd->advertising &= bp->supported;
7116
7117 bp->req_autoneg |= AUTONEG_SPEED;
7118 bp->req_line_speed = 0;
7119 bp->req_duplex = DUPLEX_FULL;
7120 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
7121
7122 } else { /* forced speed */
7123 /* advertise the requested speed and duplex if supported */
7124 switch (cmd->speed) {
7125 case SPEED_10:
7126 if (cmd->duplex == DUPLEX_FULL) {
7127 if (!(bp->supported & SUPPORTED_10baseT_Full))
7128 return -EINVAL;
7129
7130 advertising = (ADVERTISED_10baseT_Full |
7131 ADVERTISED_TP);
7132 } else {
7133 if (!(bp->supported & SUPPORTED_10baseT_Half))
7134 return -EINVAL;
7135
7136 advertising = (ADVERTISED_10baseT_Half |
7137 ADVERTISED_TP);
7138 }
7139 break;
7140
7141 case SPEED_100:
7142 if (cmd->duplex == DUPLEX_FULL) {
7143 if (!(bp->supported &
7144 SUPPORTED_100baseT_Full))
7145 return -EINVAL;
7146
7147 advertising = (ADVERTISED_100baseT_Full |
7148 ADVERTISED_TP);
7149 } else {
7150 if (!(bp->supported &
7151 SUPPORTED_100baseT_Half))
7152 return -EINVAL;
7153
7154 advertising = (ADVERTISED_100baseT_Half |
7155 ADVERTISED_TP);
7156 }
7157 break;
7158
7159 case SPEED_1000:
7160 if (cmd->duplex != DUPLEX_FULL)
7161 return -EINVAL;
7162
7163 if (!(bp->supported & SUPPORTED_1000baseT_Full))
7164 return -EINVAL;
7165
7166 advertising = (ADVERTISED_1000baseT_Full |
7167 ADVERTISED_TP);
7168 break;
7169
7170 case SPEED_2500:
7171 if (cmd->duplex != DUPLEX_FULL)
7172 return -EINVAL;
7173
7174 if (!(bp->supported & SUPPORTED_2500baseT_Full))
7175 return -EINVAL;
7176
7177 advertising = (ADVERTISED_2500baseT_Full |
7178 ADVERTISED_TP);
7179 break;
7180
7181 case SPEED_10000:
7182 if (cmd->duplex != DUPLEX_FULL)
7183 return -EINVAL;
7184
7185 if (!(bp->supported & SUPPORTED_10000baseT_Full))
7186 return -EINVAL;
7187
7188 advertising = (ADVERTISED_10000baseT_Full |
7189 ADVERTISED_FIBRE);
7190 break;
7191
7192 default:
7193 return -EINVAL;
7194 }
7195
7196 bp->req_autoneg &= ~AUTONEG_SPEED;
7197 bp->req_line_speed = cmd->speed;
7198 bp->req_duplex = cmd->duplex;
7199 bp->advertising = advertising;
7200 }
7201
7202 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
7203 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7204 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
7205 bp->advertising);
7206
7207 bnx2x_stop_stats(bp);
7208 bnx2x_link_initialize(bp);
7209
7210 return 0;
7211}
7212
7213static void bnx2x_get_drvinfo(struct net_device *dev,
7214 struct ethtool_drvinfo *info)
7215{
7216 struct bnx2x *bp = netdev_priv(dev);
7217
7218 strcpy(info->driver, DRV_MODULE_NAME);
7219 strcpy(info->version, DRV_MODULE_VERSION);
7220 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
7221 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7222 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
7223 bp->bc_ver);
7224 strcpy(info->bus_info, pci_name(bp->pdev));
7225 info->n_stats = BNX2X_NUM_STATS;
7226 info->testinfo_len = BNX2X_NUM_TESTS;
7227 info->eedump_len = bp->flash_size;
7228 info->regdump_len = 0;
7229}
7230
7231static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7232{
7233 struct bnx2x *bp = netdev_priv(dev);
7234
7235 if (bp->flags & NO_WOL_FLAG) {
7236 wol->supported = 0;
7237 wol->wolopts = 0;
7238 } else {
7239 wol->supported = WAKE_MAGIC;
7240 if (bp->wol)
7241 wol->wolopts = WAKE_MAGIC;
7242 else
7243 wol->wolopts = 0;
7244 }
7245 memset(&wol->sopass, 0, sizeof(wol->sopass));
7246}
7247
7248static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7249{
7250 struct bnx2x *bp = netdev_priv(dev);
7251
7252 if (wol->wolopts & ~WAKE_MAGIC)
7253 return -EINVAL;
7254
7255 if (wol->wolopts & WAKE_MAGIC) {
7256 if (bp->flags & NO_WOL_FLAG)
7257 return -EINVAL;
7258
7259 bp->wol = 1;
7260 } else {
7261 bp->wol = 0;
7262 }
7263 return 0;
7264}
7265
7266static u32 bnx2x_get_msglevel(struct net_device *dev)
7267{
7268 struct bnx2x *bp = netdev_priv(dev);
7269
7270 return bp->msglevel;
7271}
7272
7273static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7274{
7275 struct bnx2x *bp = netdev_priv(dev);
7276
7277 if (capable(CAP_NET_ADMIN))
7278 bp->msglevel = level;
7279}
7280
7281static int bnx2x_nway_reset(struct net_device *dev)
7282{
7283 struct bnx2x *bp = netdev_priv(dev);
7284
7285 if (bp->state != BNX2X_STATE_OPEN) {
7286 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
7287 return -EAGAIN;
7288 }
7289
7290 bnx2x_stop_stats(bp);
7291 bnx2x_link_initialize(bp);
7292
7293 return 0;
7294}
7295
7296static int bnx2x_get_eeprom_len(struct net_device *dev)
7297{
7298 struct bnx2x *bp = netdev_priv(dev);
7299
7300 return bp->flash_size;
7301}
7302
7303static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7304{
7305 int port = bp->port;
7306 int count, i;
7307 u32 val = 0;
7308
7309 /* adjust timeout for emulation/FPGA */
7310 count = NVRAM_TIMEOUT_COUNT;
7311 if (CHIP_REV_IS_SLOW(bp))
7312 count *= 100;
7313
7314 /* request access to nvram interface */
7315 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7316 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7317
7318 for (i = 0; i < count*10; i++) {
7319 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7320 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7321 break;
7322
7323 udelay(5);
7324 }
7325
7326 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7327 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
7328 return -EBUSY;
7329 }
7330
7331 return 0;
7332}
7333
7334static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7335{
7336 int port = bp->port;
7337 int count, i;
7338 u32 val = 0;
7339
7340 /* adjust timeout for emulation/FPGA */
7341 count = NVRAM_TIMEOUT_COUNT;
7342 if (CHIP_REV_IS_SLOW(bp))
7343 count *= 100;
7344
7345 /* relinquish nvram interface */
7346 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7347 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7348
7349 for (i = 0; i < count*10; i++) {
7350 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7351 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7352 break;
7353
7354 udelay(5);
7355 }
7356
7357 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7358 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
7359 return -EBUSY;
7360 }
7361
7362 return 0;
7363}
7364
7365static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7366{
7367 u32 val;
7368
7369 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7370
7371 /* enable both bits, even on read */
7372 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7373 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7374 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7375}
7376
7377static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7378{
7379 u32 val;
7380
7381 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7382
7383 /* disable both bits, even after read */
7384 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7385 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7386 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7387}
7388
7389static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7390 u32 cmd_flags)
7391{
7392 int rc;
7393 int count, i;
7394 u32 val;
7395
7396 /* build the command word */
7397 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7398
7399 /* need to clear DONE bit separately */
7400 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7401
7402 /* address of the NVRAM to read from */
7403 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7404 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7405
7406 /* issue a read command */
7407 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7408
7409 /* adjust timeout for emulation/FPGA */
7410 count = NVRAM_TIMEOUT_COUNT;
7411 if (CHIP_REV_IS_SLOW(bp))
7412 count *= 100;
7413
7414 /* wait for completion */
7415 *ret_val = 0;
7416 rc = -EBUSY;
7417 for (i = 0; i < count; i++) {
7418 udelay(5);
7419 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7420
7421 if (val & MCPR_NVM_COMMAND_DONE) {
7422 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7423 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
7424 /* we read nvram data in cpu order
7425 * but ethtool sees it as an array of bytes
7426 * converting to big-endian will do the work */
7427 val = cpu_to_be32(val);
7428 *ret_val = val;
7429 rc = 0;
7430 break;
7431 }
7432 }
7433
7434 return rc;
7435}
7436
7437static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7438 int buf_size)
7439{
7440 int rc;
7441 u32 cmd_flags;
7442 u32 val;
7443
7444 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7445 DP(NETIF_MSG_NVM,
c14423fe 7446 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7447 offset, buf_size);
7448 return -EINVAL;
7449 }
7450
7451 if (offset + buf_size > bp->flash_size) {
c14423fe 7452 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
7453 " buf_size (0x%x) > flash_size (0x%x)\n",
7454 offset, buf_size, bp->flash_size);
7455 return -EINVAL;
7456 }
7457
7458 /* request access to nvram interface */
7459 rc = bnx2x_acquire_nvram_lock(bp);
7460 if (rc)
7461 return rc;
7462
7463 /* enable access to nvram interface */
7464 bnx2x_enable_nvram_access(bp);
7465
7466 /* read the first word(s) */
7467 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7468 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7469 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7470 memcpy(ret_buf, &val, 4);
7471
7472 /* advance to the next dword */
7473 offset += sizeof(u32);
7474 ret_buf += sizeof(u32);
7475 buf_size -= sizeof(u32);
7476 cmd_flags = 0;
7477 }
7478
7479 if (rc == 0) {
7480 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7481 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7482 memcpy(ret_buf, &val, 4);
7483 }
7484
7485 /* disable access to nvram interface */
7486 bnx2x_disable_nvram_access(bp);
7487 bnx2x_release_nvram_lock(bp);
7488
7489 return rc;
7490}
7491
7492static int bnx2x_get_eeprom(struct net_device *dev,
7493 struct ethtool_eeprom *eeprom, u8 *eebuf)
7494{
7495 struct bnx2x *bp = netdev_priv(dev);
7496 int rc;
7497
7498 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7499 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7500 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7501 eeprom->len, eeprom->len);
7502
7503 /* parameters already validated in ethtool_get_eeprom */
7504
7505 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7506
7507 return rc;
7508}
7509
7510static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7511 u32 cmd_flags)
7512{
7513 int rc;
7514 int count, i;
7515
7516 /* build the command word */
7517 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7518
7519 /* need to clear DONE bit separately */
7520 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7521
7522 /* write the data */
7523 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7524
7525 /* address of the NVRAM to write to */
7526 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7527 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7528
7529 /* issue the write command */
7530 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7531
7532 /* adjust timeout for emulation/FPGA */
7533 count = NVRAM_TIMEOUT_COUNT;
7534 if (CHIP_REV_IS_SLOW(bp))
7535 count *= 100;
7536
7537 /* wait for completion */
7538 rc = -EBUSY;
7539 for (i = 0; i < count; i++) {
7540 udelay(5);
7541 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7542 if (val & MCPR_NVM_COMMAND_DONE) {
7543 rc = 0;
7544 break;
7545 }
7546 }
7547
7548 return rc;
7549}
7550
7551#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
7552
7553static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7554 int buf_size)
7555{
7556 int rc;
7557 u32 cmd_flags;
7558 u32 align_offset;
7559 u32 val;
7560
7561 if (offset + buf_size > bp->flash_size) {
c14423fe 7562 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
7563 " buf_size (0x%x) > flash_size (0x%x)\n",
7564 offset, buf_size, bp->flash_size);
7565 return -EINVAL;
7566 }
7567
7568 /* request access to nvram interface */
7569 rc = bnx2x_acquire_nvram_lock(bp);
7570 if (rc)
7571 return rc;
7572
7573 /* enable access to nvram interface */
7574 bnx2x_enable_nvram_access(bp);
7575
7576 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
7577 align_offset = (offset & ~0x03);
7578 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
7579
7580 if (rc == 0) {
7581 val &= ~(0xff << BYTE_OFFSET(offset));
7582 val |= (*data_buf << BYTE_OFFSET(offset));
7583
7584 /* nvram data is returned as an array of bytes
7585 * convert it back to cpu order */
7586 val = be32_to_cpu(val);
7587
7588 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
7589
7590 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
7591 cmd_flags);
7592 }
7593
7594 /* disable access to nvram interface */
7595 bnx2x_disable_nvram_access(bp);
7596 bnx2x_release_nvram_lock(bp);
7597
7598 return rc;
7599}
7600
7601static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
7602 int buf_size)
7603{
7604 int rc;
7605 u32 cmd_flags;
7606 u32 val;
7607 u32 written_so_far;
7608
7609 if (buf_size == 1) { /* ethtool */
7610 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
7611 }
7612
7613 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7614 DP(NETIF_MSG_NVM,
c14423fe 7615 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7616 offset, buf_size);
7617 return -EINVAL;
7618 }
7619
7620 if (offset + buf_size > bp->flash_size) {
c14423fe 7621 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
7622 " buf_size (0x%x) > flash_size (0x%x)\n",
7623 offset, buf_size, bp->flash_size);
7624 return -EINVAL;
7625 }
7626
7627 /* request access to nvram interface */
7628 rc = bnx2x_acquire_nvram_lock(bp);
7629 if (rc)
7630 return rc;
7631
7632 /* enable access to nvram interface */
7633 bnx2x_enable_nvram_access(bp);
7634
7635 written_so_far = 0;
7636 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7637 while ((written_so_far < buf_size) && (rc == 0)) {
7638 if (written_so_far == (buf_size - sizeof(u32)))
7639 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7640 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
7641 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7642 else if ((offset % NVRAM_PAGE_SIZE) == 0)
7643 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
7644
7645 memcpy(&val, data_buf, 4);
7646 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
7647
7648 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
7649
7650 /* advance to the next dword */
7651 offset += sizeof(u32);
7652 data_buf += sizeof(u32);
7653 written_so_far += sizeof(u32);
7654 cmd_flags = 0;
7655 }
7656
7657 /* disable access to nvram interface */
7658 bnx2x_disable_nvram_access(bp);
7659 bnx2x_release_nvram_lock(bp);
7660
7661 return rc;
7662}
7663
7664static int bnx2x_set_eeprom(struct net_device *dev,
7665 struct ethtool_eeprom *eeprom, u8 *eebuf)
7666{
7667 struct bnx2x *bp = netdev_priv(dev);
7668 int rc;
7669
7670 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7671 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7672 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7673 eeprom->len, eeprom->len);
7674
7675 /* parameters already validated in ethtool_set_eeprom */
7676
7677 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7678
7679 return rc;
7680}
7681
7682static int bnx2x_get_coalesce(struct net_device *dev,
7683 struct ethtool_coalesce *coal)
7684{
7685 struct bnx2x *bp = netdev_priv(dev);
7686
7687 memset(coal, 0, sizeof(struct ethtool_coalesce));
7688
7689 coal->rx_coalesce_usecs = bp->rx_ticks;
7690 coal->tx_coalesce_usecs = bp->tx_ticks;
7691 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7692
7693 return 0;
7694}
7695
7696static int bnx2x_set_coalesce(struct net_device *dev,
7697 struct ethtool_coalesce *coal)
7698{
7699 struct bnx2x *bp = netdev_priv(dev);
7700
7701 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7702 if (bp->rx_ticks > 3000)
7703 bp->rx_ticks = 3000;
7704
7705 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7706 if (bp->tx_ticks > 0x3000)
7707 bp->tx_ticks = 0x3000;
7708
7709 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7710 if (bp->stats_ticks > 0xffff00)
7711 bp->stats_ticks = 0xffff00;
7712 bp->stats_ticks &= 0xffff00;
7713
7714 if (netif_running(bp->dev))
7715 bnx2x_update_coalesce(bp);
7716
7717 return 0;
7718}
7719
7720static void bnx2x_get_ringparam(struct net_device *dev,
7721 struct ethtool_ringparam *ering)
7722{
7723 struct bnx2x *bp = netdev_priv(dev);
7724
7725 ering->rx_max_pending = MAX_RX_AVAIL;
7726 ering->rx_mini_max_pending = 0;
7727 ering->rx_jumbo_max_pending = 0;
7728
7729 ering->rx_pending = bp->rx_ring_size;
7730 ering->rx_mini_pending = 0;
7731 ering->rx_jumbo_pending = 0;
7732
7733 ering->tx_max_pending = MAX_TX_AVAIL;
7734 ering->tx_pending = bp->tx_ring_size;
7735}
7736
7737static int bnx2x_set_ringparam(struct net_device *dev,
7738 struct ethtool_ringparam *ering)
7739{
7740 struct bnx2x *bp = netdev_priv(dev);
7741
7742 if ((ering->rx_pending > MAX_RX_AVAIL) ||
7743 (ering->tx_pending > MAX_TX_AVAIL) ||
7744 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
7745 return -EINVAL;
7746
7747 bp->rx_ring_size = ering->rx_pending;
7748 bp->tx_ring_size = ering->tx_pending;
7749
7750 if (netif_running(bp->dev)) {
7751 bnx2x_nic_unload(bp, 0);
7752 bnx2x_nic_load(bp, 0);
7753 }
7754
7755 return 0;
7756}
7757
7758static void bnx2x_get_pauseparam(struct net_device *dev,
7759 struct ethtool_pauseparam *epause)
7760{
7761 struct bnx2x *bp = netdev_priv(dev);
7762
7763 epause->autoneg =
7764 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
7765 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
7766 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
7767
7768 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
7769 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
7770 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7771}
7772
7773static int bnx2x_set_pauseparam(struct net_device *dev,
7774 struct ethtool_pauseparam *epause)
7775{
7776 struct bnx2x *bp = netdev_priv(dev);
7777
7778 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
7779 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
7780 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7781
7782 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
7783 if (epause->autoneg) {
7784 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
7785 if (bp->dev->mtu <= 4500) {
7786 bp->pause_mode = PAUSE_BOTH;
7787 bp->advertising |= (ADVERTISED_Pause |
7788 ADVERTISED_Asym_Pause);
7789 } else {
7790 bp->pause_mode = PAUSE_ASYMMETRIC;
7791 bp->advertising |= ADVERTISED_Asym_Pause;
7792 }
7793
7794 } else {
7795 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
7796
7797 if (epause->rx_pause)
7798 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7799 if (epause->tx_pause)
7800 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7801
7802 switch (bp->req_flow_ctrl) {
7803 case FLOW_CTRL_AUTO:
7804 bp->req_flow_ctrl = FLOW_CTRL_NONE;
7805 bp->pause_mode = PAUSE_NONE;
7806 bp->advertising &= ~(ADVERTISED_Pause |
7807 ADVERTISED_Asym_Pause);
7808 break;
7809
7810 case FLOW_CTRL_TX:
7811 bp->pause_mode = PAUSE_ASYMMETRIC;
7812 bp->advertising |= ADVERTISED_Asym_Pause;
7813 break;
7814
7815 case FLOW_CTRL_RX:
7816 case FLOW_CTRL_BOTH:
7817 bp->pause_mode = PAUSE_BOTH;
7818 bp->advertising |= (ADVERTISED_Pause |
7819 ADVERTISED_Asym_Pause);
7820 break;
7821 }
7822 }
7823
7824 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n"
7825 DP_LEVEL " pause_mode %d advertising 0x%x\n",
7826 bp->req_autoneg, bp->req_flow_ctrl, bp->pause_mode,
7827 bp->advertising);
7828
7829 bnx2x_stop_stats(bp);
7830 bnx2x_link_initialize(bp);
7831
7832 return 0;
7833}
7834
7835static u32 bnx2x_get_rx_csum(struct net_device *dev)
7836{
7837 struct bnx2x *bp = netdev_priv(dev);
7838
7839 return bp->rx_csum;
7840}
7841
7842static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
7843{
7844 struct bnx2x *bp = netdev_priv(dev);
7845
7846 bp->rx_csum = data;
7847 return 0;
7848}
7849
7850static int bnx2x_set_tso(struct net_device *dev, u32 data)
7851{
7852 if (data)
7853 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7854 else
7855 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
7856 return 0;
7857}
7858
7859static struct {
7860 char string[ETH_GSTRING_LEN];
7861} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
7862 { "MC Errors (online)" }
7863};
7864
7865static int bnx2x_self_test_count(struct net_device *dev)
7866{
7867 return BNX2X_NUM_TESTS;
7868}
7869
7870static void bnx2x_self_test(struct net_device *dev,
7871 struct ethtool_test *etest, u64 *buf)
7872{
7873 struct bnx2x *bp = netdev_priv(dev);
7874 int stats_state;
7875
7876 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
7877
7878 if (bp->state != BNX2X_STATE_OPEN) {
7879 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
7880 return;
7881 }
7882
7883 stats_state = bp->stats_state;
7884 bnx2x_stop_stats(bp);
7885
7886 if (bnx2x_mc_assert(bp) != 0) {
7887 buf[0] = 1;
7888 etest->flags |= ETH_TEST_FL_FAILED;
7889 }
7890
7891#ifdef BNX2X_EXTRA_DEBUG
7892 bnx2x_panic_dump(bp);
7893#endif
7894 bp->stats_state = stats_state;
7895}
7896
7897static struct {
7898 char string[ETH_GSTRING_LEN];
7899} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
7900 { "rx_bytes"}, /* 0 */
7901 { "rx_error_bytes"}, /* 1 */
7902 { "tx_bytes"}, /* 2 */
7903 { "tx_error_bytes"}, /* 3 */
7904 { "rx_ucast_packets"}, /* 4 */
7905 { "rx_mcast_packets"}, /* 5 */
7906 { "rx_bcast_packets"}, /* 6 */
7907 { "tx_ucast_packets"}, /* 7 */
7908 { "tx_mcast_packets"}, /* 8 */
7909 { "tx_bcast_packets"}, /* 9 */
7910 { "tx_mac_errors"}, /* 10 */
7911 { "tx_carrier_errors"}, /* 11 */
7912 { "rx_crc_errors"}, /* 12 */
7913 { "rx_align_errors"}, /* 13 */
7914 { "tx_single_collisions"}, /* 14 */
7915 { "tx_multi_collisions"}, /* 15 */
7916 { "tx_deferred"}, /* 16 */
7917 { "tx_excess_collisions"}, /* 17 */
7918 { "tx_late_collisions"}, /* 18 */
7919 { "tx_total_collisions"}, /* 19 */
7920 { "rx_fragments"}, /* 20 */
7921 { "rx_jabbers"}, /* 21 */
7922 { "rx_undersize_packets"}, /* 22 */
7923 { "rx_oversize_packets"}, /* 23 */
7924 { "rx_xon_frames"}, /* 24 */
7925 { "rx_xoff_frames"}, /* 25 */
7926 { "tx_xon_frames"}, /* 26 */
7927 { "tx_xoff_frames"}, /* 27 */
7928 { "rx_mac_ctrl_frames"}, /* 28 */
7929 { "rx_filtered_packets"}, /* 29 */
7930 { "rx_discards"}, /* 30 */
7931};
7932
7933#define STATS_OFFSET32(offset_name) \
7934 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
7935
7936static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
7937 STATS_OFFSET32(total_bytes_received_hi), /* 0 */
7938 STATS_OFFSET32(stat_IfHCInBadOctets_hi), /* 1 */
7939 STATS_OFFSET32(total_bytes_transmitted_hi), /* 2 */
7940 STATS_OFFSET32(stat_IfHCOutBadOctets_hi), /* 3 */
7941 STATS_OFFSET32(total_unicast_packets_received_hi), /* 4 */
7942 STATS_OFFSET32(total_multicast_packets_received_hi), /* 5 */
7943 STATS_OFFSET32(total_broadcast_packets_received_hi), /* 6 */
7944 STATS_OFFSET32(total_unicast_packets_transmitted_hi), /* 7 */
7945 STATS_OFFSET32(total_multicast_packets_transmitted_hi), /* 8 */
7946 STATS_OFFSET32(total_broadcast_packets_transmitted_hi), /* 9 */
7947 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
7948 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), /* 11 */
7949 STATS_OFFSET32(crc_receive_errors), /* 12 */
7950 STATS_OFFSET32(alignment_errors), /* 13 */
7951 STATS_OFFSET32(single_collision_transmit_frames), /* 14 */
7952 STATS_OFFSET32(multiple_collision_transmit_frames), /* 15 */
7953 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), /* 16 */
7954 STATS_OFFSET32(excessive_collision_frames), /* 17 */
7955 STATS_OFFSET32(late_collision_frames), /* 18 */
7956 STATS_OFFSET32(number_of_bugs_found_in_stats_spec), /* 19 */
7957 STATS_OFFSET32(runt_packets_received), /* 20 */
7958 STATS_OFFSET32(jabber_packets_received), /* 21 */
7959 STATS_OFFSET32(error_runt_packets_received), /* 22 */
7960 STATS_OFFSET32(error_jabber_packets_received), /* 23 */
7961 STATS_OFFSET32(pause_xon_frames_received), /* 24 */
7962 STATS_OFFSET32(pause_xoff_frames_received), /* 25 */
7963 STATS_OFFSET32(pause_xon_frames_transmitted), /* 26 */
7964 STATS_OFFSET32(pause_xoff_frames_transmitted), /* 27 */
7965 STATS_OFFSET32(control_frames_received), /* 28 */
7966 STATS_OFFSET32(mac_filter_discard), /* 29 */
7967 STATS_OFFSET32(no_buff_discard), /* 30 */
7968};
7969
7970static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
7971 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
7972 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
7973 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
7974 4,
7975};
7976
7977static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7978{
7979 switch (stringset) {
7980 case ETH_SS_STATS:
7981 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
7982 break;
7983
7984 case ETH_SS_TEST:
7985 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
7986 break;
7987 }
7988}
7989
7990static int bnx2x_get_stats_count(struct net_device *dev)
7991{
7992 return BNX2X_NUM_STATS;
7993}
7994
7995static void bnx2x_get_ethtool_stats(struct net_device *dev,
7996 struct ethtool_stats *stats, u64 *buf)
7997{
7998 struct bnx2x *bp = netdev_priv(dev);
7999 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8000 int i;
8001
8002 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8003 if (bnx2x_stats_len_arr[i] == 0) {
8004 /* skip this counter */
8005 buf[i] = 0;
8006 continue;
8007 }
8008 if (!hw_stats) {
8009 buf[i] = 0;
8010 continue;
8011 }
8012 if (bnx2x_stats_len_arr[i] == 4) {
8013 /* 4-byte counter */
8014 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8015 continue;
8016 }
8017 /* 8-byte counter */
8018 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8019 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8020 }
8021}
8022
8023static int bnx2x_phys_id(struct net_device *dev, u32 data)
8024{
8025 struct bnx2x *bp = netdev_priv(dev);
8026 int i;
8027
8028 if (data == 0)
8029 data = 2;
8030
8031 for (i = 0; i < (data * 2); i++) {
8032 if ((i % 2) == 0) {
8033 bnx2x_leds_set(bp, SPEED_1000);
8034 } else {
8035 bnx2x_leds_unset(bp);
8036 }
8037 msleep_interruptible(500);
8038 if (signal_pending(current))
8039 break;
8040 }
8041
8042 if (bp->link_up)
8043 bnx2x_leds_set(bp, bp->line_speed);
8044
8045 return 0;
8046}
8047
8048static struct ethtool_ops bnx2x_ethtool_ops = {
8049 .get_settings = bnx2x_get_settings,
8050 .set_settings = bnx2x_set_settings,
8051 .get_drvinfo = bnx2x_get_drvinfo,
8052 .get_wol = bnx2x_get_wol,
8053 .set_wol = bnx2x_set_wol,
8054 .get_msglevel = bnx2x_get_msglevel,
8055 .set_msglevel = bnx2x_set_msglevel,
8056 .nway_reset = bnx2x_nway_reset,
8057 .get_link = ethtool_op_get_link,
8058 .get_eeprom_len = bnx2x_get_eeprom_len,
8059 .get_eeprom = bnx2x_get_eeprom,
8060 .set_eeprom = bnx2x_set_eeprom,
8061 .get_coalesce = bnx2x_get_coalesce,
8062 .set_coalesce = bnx2x_set_coalesce,
8063 .get_ringparam = bnx2x_get_ringparam,
8064 .set_ringparam = bnx2x_set_ringparam,
8065 .get_pauseparam = bnx2x_get_pauseparam,
8066 .set_pauseparam = bnx2x_set_pauseparam,
8067 .get_rx_csum = bnx2x_get_rx_csum,
8068 .set_rx_csum = bnx2x_set_rx_csum,
8069 .get_tx_csum = ethtool_op_get_tx_csum,
8070 .set_tx_csum = ethtool_op_set_tx_csum,
8071 .get_sg = ethtool_op_get_sg,
8072 .set_sg = ethtool_op_set_sg,
8073 .get_tso = ethtool_op_get_tso,
8074 .set_tso = bnx2x_set_tso,
8075 .self_test_count = bnx2x_self_test_count,
8076 .self_test = bnx2x_self_test,
8077 .get_strings = bnx2x_get_strings,
8078 .phys_id = bnx2x_phys_id,
8079 .get_stats_count = bnx2x_get_stats_count,
8080 .get_ethtool_stats = bnx2x_get_ethtool_stats
8081};
8082
8083/* end of ethtool_ops */
8084
8085/****************************************************************************
8086* General service functions
8087****************************************************************************/
8088
8089static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8090{
8091 u16 pmcsr;
8092
8093 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
8094
8095 switch (state) {
8096 case PCI_D0:
8097 pci_write_config_word(bp->pdev,
8098 bp->pm_cap + PCI_PM_CTRL,
8099 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
8100 PCI_PM_CTRL_PME_STATUS));
8101
8102 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
8103 /* delay required during transition out of D3hot */
8104 msleep(20);
8105 break;
8106
8107 case PCI_D3hot:
8108 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
8109 pmcsr |= 3;
8110
8111 if (bp->wol)
8112 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
8113
8114 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
8115 pmcsr);
8116
8117 /* No more memory access after this point until
8118 * device is brought back to D0.
8119 */
8120 break;
8121
8122 default:
8123 return -EINVAL;
8124 }
8125 return 0;
8126}
8127
8128/*
8129 * net_device service functions
8130 */
8131
8132/* Called with rtnl_lock from vlan functions and also netif_tx_lock
8133 * from set_multicast.
8134 */
8135static void bnx2x_set_rx_mode(struct net_device *dev)
8136{
8137 struct bnx2x *bp = netdev_priv(dev);
8138 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8139
8140 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
8141
8142 if (dev->flags & IFF_PROMISC)
8143 rx_mode = BNX2X_RX_MODE_PROMISC;
8144
8145 else if ((dev->flags & IFF_ALLMULTI) ||
8146 (dev->mc_count > BNX2X_MAX_MULTICAST))
8147 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8148
8149 else { /* some multicasts */
8150 int i, old, offset;
8151 struct dev_mc_list *mclist;
8152 struct mac_configuration_cmd *config =
8153 bnx2x_sp(bp, mcast_config);
8154
8155 for (i = 0, mclist = dev->mc_list;
8156 mclist && (i < dev->mc_count);
8157 i++, mclist = mclist->next) {
8158
8159 config->config_table[i].cam_entry.msb_mac_addr =
8160 swab16(*(u16 *)&mclist->dmi_addr[0]);
8161 config->config_table[i].cam_entry.middle_mac_addr =
8162 swab16(*(u16 *)&mclist->dmi_addr[2]);
8163 config->config_table[i].cam_entry.lsb_mac_addr =
8164 swab16(*(u16 *)&mclist->dmi_addr[4]);
8165 config->config_table[i].cam_entry.flags =
8166 cpu_to_le16(bp->port);
8167 config->config_table[i].target_table_entry.flags = 0;
8168 config->config_table[i].target_table_entry.
8169 client_id = 0;
8170 config->config_table[i].target_table_entry.
8171 vlan_id = 0;
8172
8173 DP(NETIF_MSG_IFUP,
8174 "setting MCAST[%d] (%04x:%04x:%04x)\n",
8175 i, config->config_table[i].cam_entry.msb_mac_addr,
8176 config->config_table[i].cam_entry.middle_mac_addr,
8177 config->config_table[i].cam_entry.lsb_mac_addr);
8178 }
8179 old = config->hdr.length_6b;
8180 if (old > i) {
8181 for (; i < old; i++) {
8182 if (CAM_IS_INVALID(config->config_table[i])) {
8183 i--; /* already invalidated */
8184 break;
8185 }
8186 /* invalidate */
8187 CAM_INVALIDATE(config->config_table[i]);
8188 }
8189 }
8190
8191 if (CHIP_REV_IS_SLOW(bp))
8192 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
8193 else
8194 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
8195
8196 config->hdr.length_6b = i;
8197 config->hdr.offset = offset;
8198 config->hdr.reserved0 = 0;
8199 config->hdr.reserved1 = 0;
8200
8201 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8202 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8203 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8204 }
8205
8206 bp->rx_mode = rx_mode;
8207 bnx2x_set_storm_rx_mode(bp);
8208}
8209
8210static int bnx2x_poll(struct napi_struct *napi, int budget)
8211{
8212 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
8213 napi);
8214 struct bnx2x *bp = fp->bp;
8215 int work_done = 0;
8216
8217#ifdef BNX2X_STOP_ON_ERROR
8218 if (unlikely(bp->panic))
8219 goto out_panic;
8220#endif
8221
8222 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
8223 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
8224 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
8225
8226 bnx2x_update_fpsb_idx(fp);
8227
8228 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
8229 bnx2x_tx_int(fp, budget);
8230
8231
8232 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
8233 work_done = bnx2x_rx_int(fp, budget);
8234
8235
8236 rmb(); /* bnx2x_has_work() reads the status block */
8237
8238 /* must not complete if we consumed full budget */
8239 if ((work_done < budget) && !bnx2x_has_work(fp)) {
8240
8241#ifdef BNX2X_STOP_ON_ERROR
8242out_panic:
8243#endif
8244 netif_rx_complete(bp->dev, napi);
8245
8246 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
8247 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
8248 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
8249 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
8250 }
8251
8252 return work_done;
8253}
8254
8255/* Called with netif_tx_lock.
8256 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
8257 * netif_wake_queue().
8258 */
8259static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8260{
8261 struct bnx2x *bp = netdev_priv(dev);
8262 struct bnx2x_fastpath *fp;
8263 struct sw_tx_bd *tx_buf;
8264 struct eth_tx_bd *tx_bd;
8265 struct eth_tx_parse_bd *pbd = NULL;
8266 u16 pkt_prod, bd_prod;
8267 int nbd, fp_index = 0;
8268 dma_addr_t mapping;
8269
8270#ifdef BNX2X_STOP_ON_ERROR
8271 if (unlikely(bp->panic))
8272 return NETDEV_TX_BUSY;
8273#endif
8274
8275 fp_index = smp_processor_id() % (bp->num_queues);
8276
8277 fp = &bp->fp[fp_index];
8278 if (unlikely(bnx2x_tx_avail(bp->fp) <
8279 (skb_shinfo(skb)->nr_frags + 3))) {
8280 bp->slowpath->eth_stats.driver_xoff++,
8281 netif_stop_queue(dev);
8282 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
8283 return NETDEV_TX_BUSY;
8284 }
8285
8286 /*
8287 This is a bit ugly. First we use one BD which we mark as start,
8288 then for TSO or xsum we have a parsing info BD,
8289 and only then we have the rest of the TSO bds.
8290 (don't forget to mark the last one as last,
8291 and to unmap only AFTER you write to the BD ...)
8292 I would like to thank DovH for this mess.
8293 */
8294
8295 pkt_prod = fp->tx_pkt_prod++;
8296 bd_prod = fp->tx_bd_prod;
8297 bd_prod = TX_BD(bd_prod);
8298
8299 /* get a tx_buff and first bd */
8300 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8301 tx_bd = &fp->tx_desc_ring[bd_prod];
8302
8303 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
8304 tx_bd->general_data = (UNICAST_ADDRESS <<
8305 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
8306 tx_bd->general_data |= 1; /* header nbd */
8307
c14423fe 8308 /* remember the first bd of the packet */
a2fbb9ea
ET
8309 tx_buf->first_bd = bd_prod;
8310
8311 DP(NETIF_MSG_TX_QUEUED,
8312 "sending pkt %u @%p next_idx %u bd %u @%p\n",
8313 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
8314
8315 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8316 struct iphdr *iph = ip_hdr(skb);
8317 u8 len;
8318
8319 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
8320
8321 /* turn on parsing and get a bd */
8322 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8323 pbd = (void *)&fp->tx_desc_ring[bd_prod];
8324 len = ((u8 *)iph - (u8 *)skb->data) / 2;
8325
8326 /* for now NS flag is not used in Linux */
8327 pbd->global_data = (len |
8328 ((skb->protocol == ETH_P_8021Q) <<
8329 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
8330 pbd->ip_hlen = ip_hdrlen(skb) / 2;
8331 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
8332 if (iph->protocol == IPPROTO_TCP) {
8333 struct tcphdr *th = tcp_hdr(skb);
8334
8335 tx_bd->bd_flags.as_bitfield |=
8336 ETH_TX_BD_FLAGS_TCP_CSUM;
8337 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
8338 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
8339 pbd->tcp_pseudo_csum = swab16(th->check);
8340
8341 } else if (iph->protocol == IPPROTO_UDP) {
8342 struct udphdr *uh = udp_hdr(skb);
8343
8344 tx_bd->bd_flags.as_bitfield |=
8345 ETH_TX_BD_FLAGS_TCP_CSUM;
8346 pbd->total_hlen += cpu_to_le16(4);
8347 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
8348 pbd->cs_offset = 5; /* 10 >> 1 */
8349 pbd->tcp_pseudo_csum = 0;
8350 /* HW bug: we need to subtract 10 bytes before the
8351 * UDP header from the csum
8352 */
8353 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
8354 csum_partial(((u8 *)(uh)-10), 10, 0)));
8355 }
8356 }
8357
8358 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
8359 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
8360 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
8361 } else {
8362 tx_bd->vlan = cpu_to_le16(pkt_prod);
8363 }
8364
8365 mapping = pci_map_single(bp->pdev, skb->data,
8366 skb->len, PCI_DMA_TODEVICE);
8367
8368 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8369 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8370 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
8371 tx_bd->nbd = cpu_to_le16(nbd);
8372 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8373
8374 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
8375 " nbytes %d flags %x vlan %u\n",
8376 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
8377 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
8378
8379 if (skb_shinfo(skb)->gso_size &&
8380 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
8381 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
8382
8383 DP(NETIF_MSG_TX_QUEUED,
8384 "TSO packet len %d hlen %d total len %d tso size %d\n",
8385 skb->len, hlen, skb_headlen(skb),
8386 skb_shinfo(skb)->gso_size);
8387
8388 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
8389
8390 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
8391 /* we split the first bd into headers and data bds
8392 * to ease the pain of our fellow micocode engineers
8393 * we use one mapping for both bds
8394 * So far this has only been observed to happen
8395 * in Other Operating Systems(TM)
8396 */
8397
8398 /* first fix first bd */
8399 nbd++;
8400 tx_bd->nbd = cpu_to_le16(nbd);
8401 tx_bd->nbytes = cpu_to_le16(hlen);
8402
8403 /* we only print this as an error
8404 * because we don't think this will ever happen.
8405 */
8406 BNX2X_ERR("TSO split header size is %d (%x:%x)"
8407 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
8408 tx_bd->addr_lo, tx_bd->nbd);
8409
8410 /* now get a new data bd
8411 * (after the pbd) and fill it */
8412 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8413 tx_bd = &fp->tx_desc_ring[bd_prod];
8414
8415 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8416 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
8417 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
8418 tx_bd->vlan = cpu_to_le16(pkt_prod);
8419 /* this marks the bd
8420 * as one that has no individual mapping
c14423fe 8421 * the FW ignores this flag in a bd not marked start
a2fbb9ea
ET
8422 */
8423 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
8424 DP(NETIF_MSG_TX_QUEUED,
8425 "TSO split data size is %d (%x:%x)\n",
8426 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
8427 }
8428
8429 if (!pbd) {
8430 /* supposed to be unreached
8431 * (and therefore not handled properly...)
8432 */
8433 BNX2X_ERR("LSO with no PBD\n");
8434 BUG();
8435 }
8436
8437 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
8438 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
8439 pbd->ip_id = swab16(ip_hdr(skb)->id);
8440 pbd->tcp_pseudo_csum =
8441 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
8442 ip_hdr(skb)->daddr,
8443 0, IPPROTO_TCP, 0));
8444 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
8445 }
8446
8447 {
8448 int i;
8449
8450 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
8451 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8452
8453 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8454 tx_bd = &fp->tx_desc_ring[bd_prod];
8455
8456 mapping = pci_map_page(bp->pdev, frag->page,
8457 frag->page_offset,
8458 frag->size, PCI_DMA_TODEVICE);
8459
8460 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8461 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8462 tx_bd->nbytes = cpu_to_le16(frag->size);
8463 tx_bd->vlan = cpu_to_le16(pkt_prod);
8464 tx_bd->bd_flags.as_bitfield = 0;
8465 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
8466 " addr (%x:%x) nbytes %d flags %x\n",
8467 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
8468 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
8469 } /* for */
8470 }
8471
8472 /* now at last mark the bd as the last bd */
8473 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
8474
8475 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
8476 tx_bd, tx_bd->bd_flags.as_bitfield);
8477
8478 tx_buf->skb = skb;
8479
8480 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8481
8482 /* now send a tx doorbell, counting the next bd
8483 * if the packet contains or ends with it
8484 */
8485 if (TX_BD_POFF(bd_prod) < nbd)
8486 nbd++;
8487
8488 if (pbd)
8489 DP(NETIF_MSG_TX_QUEUED,
8490 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
8491 " tcp_flags %x xsum %x seq %u hlen %u\n",
8492 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
8493 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
8494 pbd->tcp_send_seq, pbd->total_hlen);
8495
8496 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
8497
8498 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
8499 mb(); /* FW restriction: must not reorder writing nbd and packets */
8500 fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
8501 DOORBELL(bp, fp_index, 0);
8502
8503 mmiowb();
8504
8505 fp->tx_bd_prod = bd_prod;
8506 dev->trans_start = jiffies;
8507
8508 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
8509 netif_stop_queue(dev);
8510 bp->slowpath->eth_stats.driver_xoff++;
8511 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
8512 netif_wake_queue(dev);
8513 }
8514 fp->tx_pkt++;
8515
8516 return NETDEV_TX_OK;
8517}
8518
8519static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
8520{
8521 return &dev->stats;
8522}
8523
8524/* Called with rtnl_lock */
8525static int bnx2x_open(struct net_device *dev)
8526{
8527 struct bnx2x *bp = netdev_priv(dev);
8528
8529 bnx2x_set_power_state(bp, PCI_D0);
8530
8531 return bnx2x_nic_load(bp, 1);
8532}
8533
8534/* Called with rtnl_lock */
8535static int bnx2x_close(struct net_device *dev)
8536{
8537 int rc;
8538 struct bnx2x *bp = netdev_priv(dev);
8539
8540 /* Unload the driver, release IRQs */
8541 rc = bnx2x_nic_unload(bp, 1);
8542 if (rc) {
8543 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
8544 return rc;
8545 }
8546 bnx2x_set_power_state(bp, PCI_D3hot);
8547
8548 return 0;
8549}
8550
8551/* Called with rtnl_lock */
8552static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
8553{
8554 struct sockaddr *addr = p;
8555 struct bnx2x *bp = netdev_priv(dev);
8556
8557 if (!is_valid_ether_addr(addr->sa_data))
8558 return -EINVAL;
8559
8560 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8561 if (netif_running(dev))
8562 bnx2x_set_mac_addr(bp);
8563
8564 return 0;
8565}
8566
8567/* Called with rtnl_lock */
8568static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8569{
8570 struct mii_ioctl_data *data = if_mii(ifr);
8571 struct bnx2x *bp = netdev_priv(dev);
8572 int err;
8573
8574 switch (cmd) {
8575 case SIOCGMIIPHY:
8576 data->phy_id = bp->phy_addr;
8577
c14423fe 8578 /* fallthrough */
a2fbb9ea
ET
8579 case SIOCGMIIREG: {
8580 u32 mii_regval;
8581
8582 spin_lock_bh(&bp->phy_lock);
8583 if (bp->state == BNX2X_STATE_OPEN) {
8584 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
8585 &mii_regval);
8586
8587 data->val_out = mii_regval;
8588 } else {
8589 err = -EAGAIN;
8590 }
8591 spin_unlock_bh(&bp->phy_lock);
8592 return err;
8593 }
8594
8595 case SIOCSMIIREG:
8596 if (!capable(CAP_NET_ADMIN))
8597 return -EPERM;
8598
8599 spin_lock_bh(&bp->phy_lock);
8600 if (bp->state == BNX2X_STATE_OPEN) {
8601 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
8602 data->val_in);
8603 } else {
8604 err = -EAGAIN;
8605 }
8606 spin_unlock_bh(&bp->phy_lock);
8607 return err;
8608
8609 default:
8610 /* do nothing */
8611 break;
8612 }
8613
8614 return -EOPNOTSUPP;
8615}
8616
8617/* Called with rtnl_lock */
8618static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
8619{
8620 struct bnx2x *bp = netdev_priv(dev);
8621
8622 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
8623 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
8624 return -EINVAL;
8625
8626 /* This does not race with packet allocation
c14423fe 8627 * because the actual alloc size is
a2fbb9ea
ET
8628 * only updated as part of load
8629 */
8630 dev->mtu = new_mtu;
8631
8632 if (netif_running(dev)) {
8633 bnx2x_nic_unload(bp, 0);
8634 bnx2x_nic_load(bp, 0);
8635 }
8636 return 0;
8637}
8638
8639static void bnx2x_tx_timeout(struct net_device *dev)
8640{
8641 struct bnx2x *bp = netdev_priv(dev);
8642
8643#ifdef BNX2X_STOP_ON_ERROR
8644 if (!bp->panic)
8645 bnx2x_panic();
8646#endif
8647 /* This allows the netif to be shutdown gracefully before resetting */
8648 schedule_work(&bp->reset_task);
8649}
8650
8651#ifdef BCM_VLAN
8652/* Called with rtnl_lock */
8653static void bnx2x_vlan_rx_register(struct net_device *dev,
8654 struct vlan_group *vlgrp)
8655{
8656 struct bnx2x *bp = netdev_priv(dev);
8657
8658 bp->vlgrp = vlgrp;
8659 if (netif_running(dev))
8660 bnx2x_set_rx_mode(dev);
8661}
8662#endif
8663
8664#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8665static void poll_bnx2x(struct net_device *dev)
8666{
8667 struct bnx2x *bp = netdev_priv(dev);
8668
8669 disable_irq(bp->pdev->irq);
8670 bnx2x_interrupt(bp->pdev->irq, dev);
8671 enable_irq(bp->pdev->irq);
8672}
8673#endif
8674
8675static void bnx2x_reset_task(struct work_struct *work)
8676{
8677 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8678
8679#ifdef BNX2X_STOP_ON_ERROR
8680 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8681 " so reset not done to allow debug dump,\n"
8682 KERN_ERR " you will need to reboot when done\n");
8683 return;
8684#endif
8685
8686 if (!netif_running(bp->dev))
8687 return;
8688
8689 bp->in_reset_task = 1;
8690
8691 bnx2x_netif_stop(bp);
8692
8693 bnx2x_nic_unload(bp, 0);
8694 bnx2x_nic_load(bp, 0);
8695
8696 bp->in_reset_task = 0;
8697}
8698
8699static int __devinit bnx2x_init_board(struct pci_dev *pdev,
8700 struct net_device *dev)
8701{
8702 struct bnx2x *bp;
8703 int rc;
8704
8705 SET_NETDEV_DEV(dev, &pdev->dev);
8706 bp = netdev_priv(dev);
8707
8708 bp->flags = 0;
8709 bp->port = PCI_FUNC(pdev->devfn);
8710
8711 rc = pci_enable_device(pdev);
8712 if (rc) {
8713 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
8714 goto err_out;
8715 }
8716
8717 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8718 printk(KERN_ERR PFX "Cannot find PCI device base address,"
8719 " aborting\n");
8720 rc = -ENODEV;
8721 goto err_out_disable;
8722 }
8723
8724 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8725 printk(KERN_ERR PFX "Cannot find second PCI device"
8726 " base address, aborting\n");
8727 rc = -ENODEV;
8728 goto err_out_disable;
8729 }
8730
8731 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8732 if (rc) {
8733 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
8734 " aborting\n");
8735 goto err_out_disable;
8736 }
8737
8738 pci_set_master(pdev);
8739
8740 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8741 if (bp->pm_cap == 0) {
8742 printk(KERN_ERR PFX "Cannot find power management"
8743 " capability, aborting\n");
8744 rc = -EIO;
8745 goto err_out_release;
8746 }
8747
8748 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8749 if (bp->pcie_cap == 0) {
8750 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
8751 " aborting\n");
8752 rc = -EIO;
8753 goto err_out_release;
8754 }
8755
8756 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
8757 bp->flags |= USING_DAC_FLAG;
8758 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
8759 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
8760 " failed, aborting\n");
8761 rc = -EIO;
8762 goto err_out_release;
8763 }
8764
8765 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
8766 printk(KERN_ERR PFX "System does not support DMA,"
8767 " aborting\n");
8768 rc = -EIO;
8769 goto err_out_release;
8770 }
8771
8772 bp->dev = dev;
8773 bp->pdev = pdev;
8774
8775 spin_lock_init(&bp->phy_lock);
8776
8777 bp->in_reset_task = 0;
8778
8779 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8780 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
8781
cba0516d 8782 dev->base_addr = pci_resource_start(pdev, 0);
a2fbb9ea
ET
8783
8784 dev->irq = pdev->irq;
8785
8786 bp->regview = ioremap_nocache(dev->base_addr,
8787 pci_resource_len(pdev, 0));
8788 if (!bp->regview) {
8789 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
8790 rc = -ENOMEM;
8791 goto err_out_release;
8792 }
8793
8794 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
8795 pci_resource_len(pdev, 2));
8796 if (!bp->doorbells) {
8797 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
8798 rc = -ENOMEM;
8799 goto err_out_unmap;
8800 }
8801
8802 bnx2x_set_power_state(bp, PCI_D0);
8803
8804 bnx2x_get_hwinfo(bp);
8805
8806 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
c14423fe 8807 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
a2fbb9ea
ET
8808 " will only init first device\n");
8809 onefunc = 1;
8810 nomcp = 1;
8811 }
8812
8813 if (nomcp) {
8814 printk(KERN_ERR PFX "MCP disabled, will only"
8815 " init first device\n");
8816 onefunc = 1;
8817 }
8818
8819 if (onefunc && bp->port) {
8820 printk(KERN_ERR PFX "Second device disabled, exiting\n");
8821 rc = -ENODEV;
8822 goto err_out_unmap;
8823 }
8824
8825 bp->tx_ring_size = MAX_TX_AVAIL;
8826 bp->rx_ring_size = MAX_RX_AVAIL;
8827
8828 bp->rx_csum = 1;
8829
8830 bp->rx_offset = 0;
8831
8832 bp->tx_quick_cons_trip_int = 0xff;
8833 bp->tx_quick_cons_trip = 0xff;
8834 bp->tx_ticks_int = 50;
8835 bp->tx_ticks = 50;
8836
8837 bp->rx_quick_cons_trip_int = 0xff;
8838 bp->rx_quick_cons_trip = 0xff;
8839 bp->rx_ticks_int = 25;
8840 bp->rx_ticks = 25;
8841
8842 bp->stats_ticks = 1000000 & 0xffff00;
8843
8844 bp->timer_interval = HZ;
8845 bp->current_interval = (poll ? poll : HZ);
8846
8847 init_timer(&bp->timer);
8848 bp->timer.expires = jiffies + bp->current_interval;
8849 bp->timer.data = (unsigned long) bp;
8850 bp->timer.function = bnx2x_timer;
8851
8852 return 0;
8853
8854err_out_unmap:
8855 if (bp->regview) {
8856 iounmap(bp->regview);
8857 bp->regview = NULL;
8858 }
8859
8860 if (bp->doorbells) {
8861 iounmap(bp->doorbells);
8862 bp->doorbells = NULL;
8863 }
8864
8865err_out_release:
8866 pci_release_regions(pdev);
8867
8868err_out_disable:
8869 pci_disable_device(pdev);
8870 pci_set_drvdata(pdev, NULL);
8871
8872err_out:
8873 return rc;
8874}
8875
8876static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8877 const struct pci_device_id *ent)
8878{
8879 static int version_printed;
8880 struct net_device *dev = NULL;
8881 struct bnx2x *bp;
8882 int rc, i;
8883 int port = PCI_FUNC(pdev->devfn);
8884
8885 if (version_printed++ == 0)
8886 printk(KERN_INFO "%s", version);
8887
8888 /* dev zeroed in init_etherdev */
8889 dev = alloc_etherdev(sizeof(*bp));
8890 if (!dev)
8891 return -ENOMEM;
8892
8893 netif_carrier_off(dev);
8894
8895 bp = netdev_priv(dev);
8896 bp->msglevel = debug;
8897
8898 if (port && onefunc) {
8899 printk(KERN_ERR PFX "second function disabled. exiting\n");
8900 return 0;
8901 }
8902
8903 rc = bnx2x_init_board(pdev, dev);
8904 if (rc < 0) {
8905 free_netdev(dev);
8906 return rc;
8907 }
8908
8909 dev->hard_start_xmit = bnx2x_start_xmit;
8910 dev->watchdog_timeo = TX_TIMEOUT;
8911
8912 dev->get_stats = bnx2x_get_stats;
8913 dev->ethtool_ops = &bnx2x_ethtool_ops;
8914 dev->open = bnx2x_open;
8915 dev->stop = bnx2x_close;
8916 dev->set_multicast_list = bnx2x_set_rx_mode;
8917 dev->set_mac_address = bnx2x_change_mac_addr;
8918 dev->do_ioctl = bnx2x_ioctl;
8919 dev->change_mtu = bnx2x_change_mtu;
8920 dev->tx_timeout = bnx2x_tx_timeout;
8921#ifdef BCM_VLAN
8922 dev->vlan_rx_register = bnx2x_vlan_rx_register;
8923#endif
8924#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8925 dev->poll_controller = poll_bnx2x;
8926#endif
8927 dev->features |= NETIF_F_SG;
8928 if (bp->flags & USING_DAC_FLAG)
8929 dev->features |= NETIF_F_HIGHDMA;
8930 dev->features |= NETIF_F_IP_CSUM;
8931#ifdef BCM_VLAN
8932 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8933#endif
8934 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8935
8936 rc = register_netdev(dev);
8937 if (rc) {
c14423fe 8938 dev_err(&pdev->dev, "Cannot register net device\n");
a2fbb9ea
ET
8939 if (bp->regview)
8940 iounmap(bp->regview);
8941 if (bp->doorbells)
8942 iounmap(bp->doorbells);
8943 pci_release_regions(pdev);
8944 pci_disable_device(pdev);
8945 pci_set_drvdata(pdev, NULL);
8946 free_netdev(dev);
8947 return rc;
8948 }
8949
8950 pci_set_drvdata(pdev, dev);
8951
8952 bp->name = board_info[ent->driver_data].name;
8953 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz "
8954 "found at mem %lx, IRQ %d, ",
8955 dev->name, bp->name,
8956 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8957 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8958 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
8959 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
8960 bp->bus_speed_mhz,
8961 dev->base_addr,
8962 bp->pdev->irq);
8963
8964 printk("node addr ");
8965 for (i = 0; i < 6; i++)
8966 printk("%2.2x", dev->dev_addr[i]);
8967 printk("\n");
8968
8969 return 0;
8970}
8971
8972static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8973{
8974 struct net_device *dev = pci_get_drvdata(pdev);
8975 struct bnx2x *bp = netdev_priv(dev);
8976
8977 flush_scheduled_work();
8978 /*tasklet_kill(&bp->sp_task);*/
8979 unregister_netdev(dev);
8980
8981 if (bp->regview)
8982 iounmap(bp->regview);
8983
8984 if (bp->doorbells)
8985 iounmap(bp->doorbells);
8986
8987 free_netdev(dev);
8988 pci_release_regions(pdev);
8989 pci_disable_device(pdev);
8990 pci_set_drvdata(pdev, NULL);
8991}
8992
8993static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
8994{
8995 struct net_device *dev = pci_get_drvdata(pdev);
8996 struct bnx2x *bp = netdev_priv(dev);
8997 int rc;
8998
8999 if (!netif_running(dev))
9000 return 0;
9001
9002 rc = bnx2x_nic_unload(bp, 0);
9003 if (!rc)
9004 return rc;
9005
9006 netif_device_detach(dev);
9007 pci_save_state(pdev);
9008
9009 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9010 return 0;
9011}
9012
9013static int bnx2x_resume(struct pci_dev *pdev)
9014{
9015 struct net_device *dev = pci_get_drvdata(pdev);
9016 struct bnx2x *bp = netdev_priv(dev);
9017 int rc;
9018
9019 if (!netif_running(dev))
9020 return 0;
9021
9022 pci_restore_state(pdev);
9023
9024 bnx2x_set_power_state(bp, PCI_D0);
9025 netif_device_attach(dev);
9026
9027 rc = bnx2x_nic_load(bp, 0);
9028 if (rc)
9029 return rc;
9030
9031 return 0;
9032}
9033
9034static struct pci_driver bnx2x_pci_driver = {
9035 .name = DRV_MODULE_NAME,
9036 .id_table = bnx2x_pci_tbl,
9037 .probe = bnx2x_init_one,
9038 .remove = __devexit_p(bnx2x_remove_one),
9039 .suspend = bnx2x_suspend,
9040 .resume = bnx2x_resume,
9041};
9042
9043static int __init bnx2x_init(void)
9044{
9045 return pci_register_driver(&bnx2x_pci_driver);
9046}
9047
9048static void __exit bnx2x_cleanup(void)
9049{
9050 pci_unregister_driver(&bnx2x_pci_driver);
9051}
9052
9053module_init(bnx2x_init);
9054module_exit(bnx2x_cleanup);
9055