]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x.c
[BNX2X]: correct statistics
[net-next-2.6.git] / drivers / net / bnx2x.c
CommitLineData
a2fbb9ea
ET
1/* bnx2x.c: Broadcom Everest network driver.
2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
c14423fe 13 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
14 *
15 */
16
17/* define this to make the driver freeze on error
18 * to allow getting debug info
c14423fe 19 * (you will need to reboot afterwards)
a2fbb9ea
ET
20 */
21/*#define BNX2X_STOP_ON_ERROR*/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/kernel.h>
26#include <linux/device.h> /* for dev_info() */
27#include <linux/timer.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/slab.h>
31#include <linux/vmalloc.h>
32#include <linux/interrupt.h>
33#include <linux/pci.h>
34#include <linux/init.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/dma-mapping.h>
39#include <linux/bitops.h>
40#include <linux/irq.h>
41#include <linux/delay.h>
42#include <asm/byteorder.h>
43#include <linux/time.h>
44#include <linux/ethtool.h>
45#include <linux/mii.h>
46#ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
48 #define BCM_VLAN 1
49#endif
50#include <net/ip.h>
51#include <net/tcp.h>
52#include <net/checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/prefetch.h>
56#include <linux/zlib.h>
57#include <linux/version.h>
58#include <linux/io.h>
59
60#include "bnx2x_reg.h"
61#include "bnx2x_fw_defs.h"
62#include "bnx2x_hsi.h"
63#include "bnx2x.h"
64#include "bnx2x_init.h"
65
66#define DRV_MODULE_VERSION "0.40.15"
67#define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $"
f1410647 68#define BNX2X_BC_VER 0x040200
a2fbb9ea
ET
69
70/* Time in jiffies before concluding the transmitter is hung. */
71#define TX_TIMEOUT (5*HZ)
72
53a10565 73static char version[] __devinitdata =
c14423fe 74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
a2fbb9ea
ET
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
f1410647 81MODULE_INFO(cvs_version, "$Revision: #404 $");
a2fbb9ea
ET
82
83static int use_inta;
84static int poll;
85static int onefunc;
86static int nomcp;
87static int debug;
88static int use_multi;
89
90module_param(use_inta, int, 0);
91module_param(poll, int, 0);
92module_param(onefunc, int, 0);
93module_param(debug, int, 0);
94MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95MODULE_PARM_DESC(poll, "use polling (for debug)");
96MODULE_PARM_DESC(onefunc, "enable only first function");
c14423fe
ET
97MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
99
100#ifdef BNX2X_MULTI
101module_param(use_multi, int, 0);
102MODULE_PARM_DESC(use_multi, "use per-CPU queues");
103#endif
104
105enum bnx2x_board_type {
106 BCM57710 = 0,
107};
108
109/* indexed by board_t, above */
53a10565 110static struct {
a2fbb9ea
ET
111 char *name;
112} board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" }
114};
115
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { 0 }
120};
121
122MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
123
124/****************************************************************************
125* General service functions
126****************************************************************************/
127
128/* used only at init
129 * locking is done by mcp
130 */
131static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
132{
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
137}
138
139#ifdef BNX2X_IND_RD
140static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
141{
142 u32 val;
143
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
148
149 return val;
150}
151#endif
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171/* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
177static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178 u32 dst_addr, u32 len32)
179{
180 struct dmae_command *dmae = &bp->dmae;
181 int port = bp->port;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183 int timeout = 200;
184
185 memset(dmae, 0, sizeof(struct dmae_command));
186
187 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
190#ifdef __BIG_ENDIAN
191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
192#else
193 DMAE_CMD_ENDIANITY_DW_SWAP |
194#endif
195 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196 dmae->src_addr_lo = U64_LO(dma_addr);
197 dmae->src_addr_hi = U64_HI(dma_addr);
198 dmae->dst_addr_lo = dst_addr >> 2;
199 dmae->dst_addr_hi = 0;
200 dmae->len = len32;
201 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_val = BNX2X_WB_COMP_VAL;
204
205/*
206 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
208 "dst_addr [%x:%08x (%08x)]\n"
209 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
210 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
213*/
214/*
215 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
218*/
219
220 *wb_comp = 0;
221
222 bnx2x_post_dmae(bp, dmae, port * 8);
223
224 udelay(5);
225 /* adjust timeout for emulation/FPGA */
226 if (CHIP_REV_IS_SLOW(bp))
227 timeout *= 100;
228 while (*wb_comp != BNX2X_WB_COMP_VAL) {
229/* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
230 udelay(5);
231 if (!timeout) {
232 BNX2X_ERR("dmae timeout!\n");
233 break;
234 }
235 timeout--;
236 }
237}
238
239#ifdef BNX2X_DMAE_RD
240static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
241{
242 struct dmae_command *dmae = &bp->dmae;
243 int port = bp->port;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
245 int timeout = 200;
246
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
249
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
253#ifdef __BIG_ENDIAN
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
255#else
256 DMAE_CMD_ENDIANITY_DW_SWAP |
257#endif
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
263 dmae->len = len32;
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
267
268/*
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
276*/
277
278 *wb_comp = 0;
279
280 bnx2x_post_dmae(bp, dmae, port * 8);
281
282 udelay(5);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
284 udelay(5);
285 if (!timeout) {
286 BNX2X_ERR("dmae timeout!\n");
287 break;
288 }
289 timeout--;
290 }
291/*
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
295*/
296}
297#endif
298
299static int bnx2x_mc_assert(struct bnx2x *bp)
300{
49d66772 301 int i, j, rc = 0;
a2fbb9ea
ET
302 char last_idx;
303 const char storm[] = {"XTCU"};
304 const u32 intmem_base[] = {
305 BAR_XSTRORM_INTMEM,
306 BAR_TSTRORM_INTMEM,
307 BAR_CSTRORM_INTMEM,
308 BAR_USTRORM_INTMEM
309 };
310
311 /* Go through all instances of all SEMIs */
312 for (i = 0; i < 4; i++) {
313 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
314 intmem_base[i]);
49d66772
ET
315 if (last_idx)
316 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
317 storm[i], last_idx);
a2fbb9ea
ET
318
319 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321 u32 row0, row1, row2, row3;
322
323 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
324 intmem_base[i]);
325 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
326 intmem_base[i]);
327 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
328 intmem_base[i]);
329 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
330 intmem_base[i]);
331
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
49d66772 333 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
a2fbb9ea
ET
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0);
336 rc++;
337 } else {
338 break;
339 }
340 }
341 }
342 return rc;
343}
c14423fe 344
a2fbb9ea
ET
345static void bnx2x_fw_dump(struct bnx2x *bp)
346{
347 u32 mark, offset;
348 u32 data[9];
349 int word;
350
351 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
352 mark = ((mark + 0x3) & ~0x3);
353 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
354
355 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
356 for (word = 0; word < 8; word++)
357 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
358 offset + 4*word));
359 data[8] = 0x0;
49d66772 360 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
361 }
362 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
363 for (word = 0; word < 8; word++)
364 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
365 offset + 4*word));
366 data[8] = 0x0;
49d66772 367 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
368 }
369 printk("\n" KERN_ERR PFX "end of fw dump\n");
370}
371
372static void bnx2x_panic_dump(struct bnx2x *bp)
373{
374 int i;
375 u16 j, start, end;
376
377 BNX2X_ERR("begin crash dump -----------------\n");
378
379 for_each_queue(bp, i) {
380 struct bnx2x_fastpath *fp = &bp->fp[i];
381 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
382
383 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
384 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
385 " *rx_cons_sb(%x) rx_comp_prod(%x)"
386 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
387 " bd data(%x,%x)\n",
388 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
389 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
390 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
391 fp->fp_u_idx, hw_prods->packets_prod,
392 hw_prods->bds_prod);
393
394 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
395 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
396 for (j = start; j < end; j++) {
397 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
398
399 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
400 sw_bd->skb, sw_bd->first_bd);
401 }
402
403 start = TX_BD(fp->tx_bd_cons - 10);
404 end = TX_BD(fp->tx_bd_cons + 254);
405 for (j = start; j < end; j++) {
406 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
407
408 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
409 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
410 }
411
412 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
413 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
414 for (j = start; j < end; j++) {
415 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
416 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
417
418 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
419 j, rx_bd[0], rx_bd[1], sw_bd->skb);
420 }
421
422 start = RCQ_BD(fp->rx_comp_cons - 10);
423 end = RCQ_BD(fp->rx_comp_cons + 503);
424 for (j = start; j < end; j++) {
425 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
426
427 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
428 j, cqe[0], cqe[1], cqe[2], cqe[3]);
429 }
430 }
431
49d66772
ET
432 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
433 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 434 " spq_prod_idx(%u)\n",
49d66772 435 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
436 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
437
438
439 bnx2x_mc_assert(bp);
440 BNX2X_ERR("end crash dump -----------------\n");
441
442 bp->stats_state = STATS_STATE_DISABLE;
443 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
444}
445
446static void bnx2x_enable_int(struct bnx2x *bp)
447{
448 int port = bp->port;
449 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
450 u32 val = REG_RD(bp, addr);
451 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
452
453 if (msix) {
454 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
455 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
456 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
457 } else {
458 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
459 HC_CONFIG_0_REG_INT_LINE_EN_0 |
460 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
461 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
462 }
463
464 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) msi %d\n",
465 val, port, addr, msix);
466
467 REG_WR(bp, addr, val);
468}
469
470static void bnx2x_disable_int(struct bnx2x *bp)
471{
472 int port = bp->port;
473 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
474 u32 val = REG_RD(bp, addr);
475
476 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
477 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
478 HC_CONFIG_0_REG_INT_LINE_EN_0 |
479 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
480
481 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
482 val, port, addr);
483
484 REG_WR(bp, addr, val);
485 if (REG_RD(bp, addr) != val)
486 BNX2X_ERR("BUG! proper val not read from IGU!\n");
487}
488
489static void bnx2x_disable_int_sync(struct bnx2x *bp)
490{
491
492 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
493 int i;
494
495 atomic_inc(&bp->intr_sem);
c14423fe 496 /* prevent the HW from sending interrupts */
a2fbb9ea
ET
497 bnx2x_disable_int(bp);
498
499 /* make sure all ISRs are done */
500 if (msix) {
501 for_each_queue(bp, i)
502 synchronize_irq(bp->msix_table[i].vector);
503
504 /* one more for the Slow Path IRQ */
505 synchronize_irq(bp->msix_table[i].vector);
506 } else
507 synchronize_irq(bp->pdev->irq);
508
509 /* make sure sp_task is not running */
510 cancel_work_sync(&bp->sp_task);
511
512}
513
514/* fast path code */
515
516/*
517 * general service functions
518 */
519
520static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
521 u8 storm, u16 index, u8 op, u8 update)
522{
523 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
524 struct igu_ack_register igu_ack;
525
526 igu_ack.status_block_index = index;
527 igu_ack.sb_id_and_flags =
528 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
529 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
530 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
531 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
532
533/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
534 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
535 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
536}
537
538static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
539{
540 struct host_status_block *fpsb = fp->status_blk;
541 u16 rc = 0;
542
543 barrier(); /* status block is written to by the chip */
544 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
545 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
546 rc |= 1;
547 }
548 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
549 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
550 rc |= 2;
551 }
552 return rc;
553}
554
555static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
556{
557 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
558
559 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
560 rx_cons_sb++;
561
562 if ((rx_cons_sb != fp->rx_comp_cons) ||
563 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
564 return 1;
565
566 return 0;
567}
568
569static u16 bnx2x_ack_int(struct bnx2x *bp)
570{
571 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
572 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
573
574/* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
575 result, BAR_IGU_INTMEM + igu_addr); */
576
577#ifdef IGU_DEBUG
578#warning IGU_DEBUG active
579 if (result == 0) {
580 BNX2X_ERR("read %x from IGU\n", result);
581 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
582 }
583#endif
584 return result;
585}
586
587
588/*
589 * fast path service functions
590 */
591
592/* free skb in the packet ring at pos idx
593 * return idx of last bd freed
594 */
595static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
596 u16 idx)
597{
598 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
599 struct eth_tx_bd *tx_bd;
600 struct sk_buff *skb = tx_buf->skb;
601 u16 bd_idx = tx_buf->first_bd;
602 int nbd;
603
604 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
605 idx, tx_buf, skb);
606
607 /* unmap first bd */
608 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
609 tx_bd = &fp->tx_desc_ring[bd_idx];
610 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
611 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
612
613 nbd = le16_to_cpu(tx_bd->nbd) - 1;
614#ifdef BNX2X_STOP_ON_ERROR
615 if (nbd > (MAX_SKB_FRAGS + 2)) {
616 BNX2X_ERR("bad nbd!\n");
617 bnx2x_panic();
618 }
619#endif
620
621 /* Skip a parse bd and the TSO split header bd
622 since they have no mapping */
623 if (nbd)
624 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
625
626 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
627 ETH_TX_BD_FLAGS_TCP_CSUM |
628 ETH_TX_BD_FLAGS_SW_LSO)) {
629 if (--nbd)
630 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
631 tx_bd = &fp->tx_desc_ring[bd_idx];
632 /* is this a TSO split header bd? */
633 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
634 if (--nbd)
635 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
636 }
637 }
638
639 /* now free frags */
640 while (nbd > 0) {
641
642 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
643 tx_bd = &fp->tx_desc_ring[bd_idx];
644 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
645 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
646 if (--nbd)
647 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
648 }
649
650 /* release skb */
651 BUG_TRAP(skb);
652 dev_kfree_skb(skb);
653 tx_buf->first_bd = 0;
654 tx_buf->skb = NULL;
655
656 return bd_idx;
657}
658
659static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
660{
661 u16 used;
662 u32 prod;
663 u32 cons;
664
665 /* Tell compiler that prod and cons can change */
666 barrier();
667 prod = fp->tx_bd_prod;
668 cons = fp->tx_bd_cons;
669
670 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
671 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
672
673 if (prod >= cons) {
674 /* used = prod - cons - prod/size + cons/size */
675 used -= NUM_TX_BD - NUM_TX_RINGS;
676 }
677
678 BUG_TRAP(used <= fp->bp->tx_ring_size);
679 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
680
681 return (fp->bp->tx_ring_size - used);
682}
683
684static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
685{
686 struct bnx2x *bp = fp->bp;
687 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
688 int done = 0;
689
690#ifdef BNX2X_STOP_ON_ERROR
691 if (unlikely(bp->panic))
692 return;
693#endif
694
695 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
696 sw_cons = fp->tx_pkt_cons;
697
698 while (sw_cons != hw_cons) {
699 u16 pkt_cons;
700
701 pkt_cons = TX_BD(sw_cons);
702
703 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
704
705 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
706 hw_cons, sw_cons, pkt_cons);
707
708/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
709 rmb();
710 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
711 }
712*/
713 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
714 sw_cons++;
715 done++;
716
717 if (done == work)
718 break;
719 }
720
721 fp->tx_pkt_cons = sw_cons;
722 fp->tx_bd_cons = bd_cons;
723
724 /* Need to make the tx_cons update visible to start_xmit()
725 * before checking for netif_queue_stopped(). Without the
726 * memory barrier, there is a small possibility that start_xmit()
727 * will miss it and cause the queue to be stopped forever.
728 */
729 smp_mb();
730
731 /* TBD need a thresh? */
732 if (unlikely(netif_queue_stopped(bp->dev))) {
733
734 netif_tx_lock(bp->dev);
735
736 if (netif_queue_stopped(bp->dev) &&
737 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
738 netif_wake_queue(bp->dev);
739
740 netif_tx_unlock(bp->dev);
741
742 }
743}
744
745static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
746 union eth_rx_cqe *rr_cqe)
747{
748 struct bnx2x *bp = fp->bp;
749 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
750 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
751
752 DP(NETIF_MSG_RX_STATUS,
753 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
754 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
755
756 bp->spq_left++;
757
758 if (fp->index) {
759 switch (command | fp->state) {
760 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
761 BNX2X_FP_STATE_OPENING):
762 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
763 cid);
764 fp->state = BNX2X_FP_STATE_OPEN;
765 break;
766
767 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
768 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
769 cid);
770 fp->state = BNX2X_FP_STATE_HALTED;
771 break;
772
773 default:
774 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
775 command, fp->state);
776 }
777 mb(); /* force bnx2x_wait_ramrod to see the change */
778 return;
779 }
c14423fe 780
a2fbb9ea
ET
781 switch (command | bp->state) {
782 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
783 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
784 bp->state = BNX2X_STATE_OPEN;
785 break;
786
787 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
788 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
789 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
790 fp->state = BNX2X_FP_STATE_HALTED;
791 break;
792
a2fbb9ea 793 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
49d66772
ET
794 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
795 cid);
796 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
797 break;
798
799 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
800 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
801 break;
802
49d66772
ET
803 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
804 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
805 break;
806
a2fbb9ea
ET
807 default:
808 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
809 command, bp->state);
810 }
811
812 mb(); /* force bnx2x_wait_ramrod to see the change */
813}
814
815static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
816 struct bnx2x_fastpath *fp, u16 index)
817{
818 struct sk_buff *skb;
819 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
820 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
821 dma_addr_t mapping;
822
823 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
824 if (unlikely(skb == NULL))
825 return -ENOMEM;
826
827 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
828 PCI_DMA_FROMDEVICE);
829 if (unlikely(dma_mapping_error(mapping))) {
830
831 dev_kfree_skb(skb);
832 return -ENOMEM;
833 }
834
835 rx_buf->skb = skb;
836 pci_unmap_addr_set(rx_buf, mapping, mapping);
837
838 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
839 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
840
841 return 0;
842}
843
844/* note that we are not allocating a new skb,
845 * we are just moving one from cons to prod
846 * we are not creating a new mapping,
847 * so there is no need to check for dma_mapping_error().
848 */
849static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
850 struct sk_buff *skb, u16 cons, u16 prod)
851{
852 struct bnx2x *bp = fp->bp;
853 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
854 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
855 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
856 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
857
858 pci_dma_sync_single_for_device(bp->pdev,
859 pci_unmap_addr(cons_rx_buf, mapping),
860 bp->rx_offset + RX_COPY_THRESH,
861 PCI_DMA_FROMDEVICE);
862
863 prod_rx_buf->skb = cons_rx_buf->skb;
864 pci_unmap_addr_set(prod_rx_buf, mapping,
865 pci_unmap_addr(cons_rx_buf, mapping));
866 *prod_bd = *cons_bd;
867}
868
869static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
870{
871 struct bnx2x *bp = fp->bp;
872 u16 bd_cons, bd_prod, comp_ring_cons;
873 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
874 int rx_pkt = 0;
875
876#ifdef BNX2X_STOP_ON_ERROR
877 if (unlikely(bp->panic))
878 return 0;
879#endif
880
881 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
882 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
883 hw_comp_cons++;
884
885 bd_cons = fp->rx_bd_cons;
886 bd_prod = fp->rx_bd_prod;
887 sw_comp_cons = fp->rx_comp_cons;
888 sw_comp_prod = fp->rx_comp_prod;
889
890 /* Memory barrier necessary as speculative reads of the rx
891 * buffer can be ahead of the index in the status block
892 */
893 rmb();
894
895 DP(NETIF_MSG_RX_STATUS,
896 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
897 fp->index, hw_comp_cons, sw_comp_cons);
898
899 while (sw_comp_cons != hw_comp_cons) {
900 unsigned int len, pad;
901 struct sw_rx_bd *rx_buf;
902 struct sk_buff *skb;
903 union eth_rx_cqe *cqe;
904
905 comp_ring_cons = RCQ_BD(sw_comp_cons);
906 bd_prod = RX_BD(bd_prod);
907 bd_cons = RX_BD(bd_cons);
908
909 cqe = &fp->rx_comp_ring[comp_ring_cons];
910
911 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
912 " comp_ring (%u) bd_ring (%u,%u)\n",
913 hw_comp_cons, sw_comp_cons,
914 comp_ring_cons, bd_prod, bd_cons);
915 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
916 " queue %x vlan %x len %x\n",
917 cqe->fast_path_cqe.type,
918 cqe->fast_path_cqe.error_type_flags,
919 cqe->fast_path_cqe.status_flags,
920 cqe->fast_path_cqe.rss_hash_result,
921 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
922
923 /* is this a slowpath msg? */
924 if (unlikely(cqe->fast_path_cqe.type)) {
925 bnx2x_sp_event(fp, cqe);
926 goto next_cqe;
927
928 /* this is an rx packet */
929 } else {
930 rx_buf = &fp->rx_buf_ring[bd_cons];
931 skb = rx_buf->skb;
932
933 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
934 pad = cqe->fast_path_cqe.placement_offset;
935
936 pci_dma_sync_single_for_device(bp->pdev,
937 pci_unmap_addr(rx_buf, mapping),
938 pad + RX_COPY_THRESH,
939 PCI_DMA_FROMDEVICE);
940 prefetch(skb);
941 prefetch(((char *)(skb)) + 128);
942
943 /* is this an error packet? */
944 if (unlikely(cqe->fast_path_cqe.error_type_flags &
945 ETH_RX_ERROR_FALGS)) {
946 /* do we sometimes forward error packets anyway? */
947 DP(NETIF_MSG_RX_ERR,
948 "ERROR flags(%u) Rx packet(%u)\n",
949 cqe->fast_path_cqe.error_type_flags,
950 sw_comp_cons);
951 /* TBD make sure MC counts this as a drop */
952 goto reuse_rx;
953 }
954
955 /* Since we don't have a jumbo ring
956 * copy small packets if mtu > 1500
957 */
958 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
959 (len <= RX_COPY_THRESH)) {
960 struct sk_buff *new_skb;
961
962 new_skb = netdev_alloc_skb(bp->dev,
963 len + pad);
964 if (new_skb == NULL) {
965 DP(NETIF_MSG_RX_ERR,
966 "ERROR packet dropped "
967 "because of alloc failure\n");
968 /* TBD count this as a drop? */
969 goto reuse_rx;
970 }
971
972 /* aligned copy */
973 skb_copy_from_linear_data_offset(skb, pad,
974 new_skb->data + pad, len);
975 skb_reserve(new_skb, pad);
976 skb_put(new_skb, len);
977
978 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
979
980 skb = new_skb;
981
982 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
983 pci_unmap_single(bp->pdev,
984 pci_unmap_addr(rx_buf, mapping),
985 bp->rx_buf_use_size,
986 PCI_DMA_FROMDEVICE);
987 skb_reserve(skb, pad);
988 skb_put(skb, len);
989
990 } else {
991 DP(NETIF_MSG_RX_ERR,
992 "ERROR packet dropped because "
993 "of alloc failure\n");
994reuse_rx:
995 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
996 goto next_rx;
997 }
998
999 skb->protocol = eth_type_trans(skb, bp->dev);
1000
1001 skb->ip_summed = CHECKSUM_NONE;
1002 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1003 skb->ip_summed = CHECKSUM_UNNECESSARY;
1004
1005 /* TBD do we pass bad csum packets in promisc */
1006 }
1007
1008#ifdef BCM_VLAN
1009 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1010 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1011 && (bp->vlgrp != NULL))
1012 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1013 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1014 else
1015#endif
1016 netif_receive_skb(skb);
1017
1018 bp->dev->last_rx = jiffies;
1019
1020next_rx:
1021 rx_buf->skb = NULL;
1022
1023 bd_cons = NEXT_RX_IDX(bd_cons);
1024 bd_prod = NEXT_RX_IDX(bd_prod);
1025next_cqe:
1026 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1027 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1028 rx_pkt++;
1029
1030 if ((rx_pkt == budget))
1031 break;
1032 } /* while */
1033
1034 fp->rx_bd_cons = bd_cons;
1035 fp->rx_bd_prod = bd_prod;
1036 fp->rx_comp_cons = sw_comp_cons;
1037 fp->rx_comp_prod = sw_comp_prod;
1038
1039 REG_WR(bp, BAR_TSTRORM_INTMEM +
1040 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1041
1042 mmiowb(); /* keep prod updates ordered */
1043
1044 fp->rx_pkt += rx_pkt;
1045 fp->rx_calls++;
1046
1047 return rx_pkt;
1048}
1049
1050static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1051{
1052 struct bnx2x_fastpath *fp = fp_cookie;
1053 struct bnx2x *bp = fp->bp;
1054 struct net_device *dev = bp->dev;
1055 int index = fp->index;
1056
1057 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1058 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1059
1060#ifdef BNX2X_STOP_ON_ERROR
1061 if (unlikely(bp->panic))
1062 return IRQ_HANDLED;
1063#endif
1064
1065 prefetch(fp->rx_cons_sb);
1066 prefetch(fp->tx_cons_sb);
1067 prefetch(&fp->status_blk->c_status_block.status_block_index);
1068 prefetch(&fp->status_blk->u_status_block.status_block_index);
1069
1070 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1071 return IRQ_HANDLED;
1072}
1073
1074static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1075{
1076 struct net_device *dev = dev_instance;
1077 struct bnx2x *bp = netdev_priv(dev);
1078 u16 status = bnx2x_ack_int(bp);
1079
1080 if (unlikely(status == 0)) {
1081 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1082 return IRQ_NONE;
1083 }
1084
1085 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1086
1087#ifdef BNX2X_STOP_ON_ERROR
1088 if (unlikely(bp->panic))
1089 return IRQ_HANDLED;
1090#endif
1091
1092 /* Return here if interrupt is shared and is disabled */
1093 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1094 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1095 return IRQ_HANDLED;
1096 }
1097
1098 if (status & 0x2) {
1099 struct bnx2x_fastpath *fp = &bp->fp[0];
1100
1101 prefetch(fp->rx_cons_sb);
1102 prefetch(fp->tx_cons_sb);
1103 prefetch(&fp->status_blk->c_status_block.status_block_index);
1104 prefetch(&fp->status_blk->u_status_block.status_block_index);
1105
1106 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1107
1108 status &= ~0x2;
1109 if (!status)
1110 return IRQ_HANDLED;
1111 }
1112
1113 if (unlikely(status & 0x1)) {
1114
1115 schedule_work(&bp->sp_task);
1116
1117 status &= ~0x1;
1118 if (!status)
1119 return IRQ_HANDLED;
1120 }
1121
1122 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1123 status);
1124
1125 return IRQ_HANDLED;
1126}
1127
1128/* end of fast path */
1129
1130/* PHY/MAC */
1131
1132/*
1133 * General service functions
1134 */
1135
1136static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1137{
1138 int port = bp->port;
1139
1140 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1141 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1142 SHARED_HW_CFG_LED_MODE_SHIFT));
1143 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1144
1145 /* Set blinking rate to ~15.9Hz */
1146 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1147 LED_BLINK_RATE_VAL);
1148 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1149
1150 /* On Ax chip versions for speeds less than 10G
1151 LED scheme is different */
1152 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1153 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1154 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1155 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1156 }
1157}
1158
1159static void bnx2x_leds_unset(struct bnx2x *bp)
1160{
1161 int port = bp->port;
1162
1163 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1164 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1165}
1166
1167static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1168{
1169 u32 val = REG_RD(bp, reg);
1170
1171 val |= bits;
1172 REG_WR(bp, reg, val);
1173 return val;
1174}
1175
1176static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1177{
1178 u32 val = REG_RD(bp, reg);
1179
1180 val &= ~bits;
1181 REG_WR(bp, reg, val);
1182 return val;
1183}
1184
f1410647
ET
1185static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1186{
1187 u32 cnt;
1188 u32 lock_status;
1189 u32 resource_bit = (1 << resource);
1190 u8 func = bp->port;
1191
1192 /* Validating that the resource is within range */
1193 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1194 DP(NETIF_MSG_HW,
1195 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1196 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1197 return -EINVAL;
1198 }
1199
1200 /* Validating that the resource is not already taken */
1201 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1202 if (lock_status & resource_bit) {
1203 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1204 lock_status, resource_bit);
1205 return -EEXIST;
1206 }
1207
1208 /* Try for 1 second every 5ms */
1209 for (cnt = 0; cnt < 200; cnt++) {
1210 /* Try to acquire the lock */
1211 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1212 resource_bit);
1213 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1214 if (lock_status & resource_bit)
1215 return 0;
1216
1217 msleep(5);
1218 }
1219 DP(NETIF_MSG_HW, "Timeout\n");
1220 return -EAGAIN;
1221}
1222
1223static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1224{
1225 u32 lock_status;
1226 u32 resource_bit = (1 << resource);
1227 u8 func = bp->port;
1228
1229 /* Validating that the resource is within range */
1230 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1231 DP(NETIF_MSG_HW,
1232 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1233 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1234 return -EINVAL;
1235 }
1236
1237 /* Validating that the resource is currently taken */
1238 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1239 if (!(lock_status & resource_bit)) {
1240 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1241 lock_status, resource_bit);
1242 return -EFAULT;
1243 }
1244
1245 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1246 return 0;
1247}
1248
1249static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1250{
1251 /* The GPIO should be swapped if swap register is set and active */
1252 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1253 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1254 int gpio_shift = gpio_num +
1255 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1256 u32 gpio_mask = (1 << gpio_shift);
1257 u32 gpio_reg;
1258
1259 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1260 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1261 return -EINVAL;
1262 }
1263
1264 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1265 /* read GPIO and mask except the float bits */
1266 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1267
1268 switch (mode) {
1269 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1270 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1271 gpio_num, gpio_shift);
1272 /* clear FLOAT and set CLR */
1273 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1274 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1275 break;
1276
1277 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1278 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1279 gpio_num, gpio_shift);
1280 /* clear FLOAT and set SET */
1281 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1282 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1283 break;
1284
1285 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1286 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1287 gpio_num, gpio_shift);
1288 /* set FLOAT */
1289 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1290 break;
1291
1292 default:
1293 break;
1294 }
1295
1296 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1297 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1298
1299 return 0;
1300}
1301
1302static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1303{
1304 u32 spio_mask = (1 << spio_num);
1305 u32 spio_reg;
1306
1307 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1308 (spio_num > MISC_REGISTERS_SPIO_7)) {
1309 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1310 return -EINVAL;
1311 }
1312
1313 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1314 /* read SPIO and mask except the float bits */
1315 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1316
1317 switch (mode) {
1318 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1319 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1320 /* clear FLOAT and set CLR */
1321 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1322 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1323 break;
1324
1325 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1326 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1327 /* clear FLOAT and set SET */
1328 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1329 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1330 break;
1331
1332 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1333 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1334 /* set FLOAT */
1335 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1336 break;
1337
1338 default:
1339 break;
1340 }
1341
1342 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1343 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1344
1345 return 0;
1346}
1347
a2fbb9ea
ET
1348static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1349{
a2fbb9ea
ET
1350 int port = bp->port;
1351 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
f1410647
ET
1352 u32 tmp;
1353 int i, rc;
a2fbb9ea
ET
1354
1355/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1356 bp->phy_addr, reg, val); */
1357
1358 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1359
1360 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1361 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1362 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1363 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1364 udelay(40);
1365 }
1366
1367 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1368 (val & EMAC_MDIO_COMM_DATA) |
1369 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1370 EMAC_MDIO_COMM_START_BUSY);
1371 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1372
1373 for (i = 0; i < 50; i++) {
1374 udelay(10);
1375
1376 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1377 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1378 udelay(5);
1379 break;
1380 }
1381 }
1382
1383 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1384 BNX2X_ERR("write phy register failed\n");
1385
1386 rc = -EBUSY;
1387 } else {
1388 rc = 0;
1389 }
1390
1391 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1392
1393 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1394 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1395 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1396 }
1397
1398 return rc;
1399}
1400
1401static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1402{
1403 int port = bp->port;
1404 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
f1410647
ET
1405 u32 val;
1406 int i, rc;
a2fbb9ea
ET
1407
1408 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1409
1410 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1411 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1412 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1413 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1414 udelay(40);
1415 }
1416
1417 val = ((bp->phy_addr << 21) | (reg << 16) |
1418 EMAC_MDIO_COMM_COMMAND_READ_22 |
1419 EMAC_MDIO_COMM_START_BUSY);
1420 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1421
1422 for (i = 0; i < 50; i++) {
1423 udelay(10);
1424
1425 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1426 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1427 val &= EMAC_MDIO_COMM_DATA;
1428 break;
1429 }
1430 }
1431
1432 if (val & EMAC_MDIO_COMM_START_BUSY) {
1433 BNX2X_ERR("read phy register failed\n");
1434
1435 *ret_val = 0x0;
1436 rc = -EBUSY;
1437 } else {
1438 *ret_val = val;
1439 rc = 0;
1440 }
1441
1442 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1443
1444 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1445 val |= EMAC_MDIO_MODE_AUTO_POLL;
1446 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1447 }
1448
1449/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1450 bp->phy_addr, reg, *ret_val); */
1451
1452 return rc;
1453}
1454
f1410647
ET
1455static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1456 u32 phy_addr, u32 reg, u32 addr, u32 val)
a2fbb9ea 1457{
f1410647
ET
1458 u32 tmp;
1459 int i, rc = 0;
a2fbb9ea 1460
f1410647
ET
1461 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1462 * (a value of 49==0x31) and make sure that the AUTO poll is off
1463 */
1464 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1465 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1466 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1467 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1468 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1469 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1470 udelay(40);
a2fbb9ea
ET
1471
1472 /* address */
f1410647 1473 tmp = ((phy_addr << 21) | (reg << 16) | addr |
a2fbb9ea
ET
1474 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1475 EMAC_MDIO_COMM_START_BUSY);
f1410647 1476 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
a2fbb9ea
ET
1477
1478 for (i = 0; i < 50; i++) {
1479 udelay(10);
1480
f1410647 1481 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1482 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1483 udelay(5);
1484 break;
1485 }
1486 }
a2fbb9ea
ET
1487 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1488 BNX2X_ERR("write phy register failed\n");
1489
1490 rc = -EBUSY;
f1410647 1491
a2fbb9ea
ET
1492 } else {
1493 /* data */
f1410647 1494 tmp = ((phy_addr << 21) | (reg << 16) | val |
a2fbb9ea
ET
1495 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1496 EMAC_MDIO_COMM_START_BUSY);
f1410647 1497 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
a2fbb9ea
ET
1498
1499 for (i = 0; i < 50; i++) {
1500 udelay(10);
1501
f1410647 1502 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1503 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1504 udelay(5);
1505 break;
1506 }
1507 }
1508
1509 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1510 BNX2X_ERR("write phy register failed\n");
1511
1512 rc = -EBUSY;
1513 }
1514 }
1515
f1410647
ET
1516 /* unset clause 45 mode, set the MDIO clock to a faster value
1517 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1518 */
1519 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1520 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1521 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1522 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
a2fbb9ea 1523 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
f1410647 1524 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
a2fbb9ea
ET
1525
1526 return rc;
1527}
1528
f1410647
ET
1529static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1530 u32 addr, u32 val)
a2fbb9ea 1531{
f1410647 1532 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 1533
f1410647
ET
1534 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1535 reg, addr, val);
1536}
a2fbb9ea 1537
f1410647
ET
1538static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1539 u32 phy_addr, u32 reg, u32 addr,
1540 u32 *ret_val)
1541{
1542 u32 val;
1543 int i, rc = 0;
a2fbb9ea 1544
f1410647
ET
1545 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1546 * (a value of 49==0x31) and make sure that the AUTO poll is off
1547 */
1548 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1549 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1550 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1551 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1552 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1553 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1554 udelay(40);
a2fbb9ea
ET
1555
1556 /* address */
f1410647 1557 val = ((phy_addr << 21) | (reg << 16) | addr |
a2fbb9ea
ET
1558 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1559 EMAC_MDIO_COMM_START_BUSY);
f1410647 1560 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
a2fbb9ea
ET
1561
1562 for (i = 0; i < 50; i++) {
1563 udelay(10);
1564
f1410647 1565 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1566 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1567 udelay(5);
1568 break;
1569 }
1570 }
a2fbb9ea
ET
1571 if (val & EMAC_MDIO_COMM_START_BUSY) {
1572 BNX2X_ERR("read phy register failed\n");
1573
1574 *ret_val = 0;
1575 rc = -EBUSY;
f1410647 1576
a2fbb9ea
ET
1577 } else {
1578 /* data */
f1410647 1579 val = ((phy_addr << 21) | (reg << 16) |
a2fbb9ea
ET
1580 EMAC_MDIO_COMM_COMMAND_READ_45 |
1581 EMAC_MDIO_COMM_START_BUSY);
f1410647 1582 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
a2fbb9ea
ET
1583
1584 for (i = 0; i < 50; i++) {
1585 udelay(10);
1586
f1410647 1587 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1588 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1589 val &= EMAC_MDIO_COMM_DATA;
1590 break;
1591 }
1592 }
1593
1594 if (val & EMAC_MDIO_COMM_START_BUSY) {
1595 BNX2X_ERR("read phy register failed\n");
1596
1597 val = 0;
1598 rc = -EBUSY;
1599 }
1600
1601 *ret_val = val;
1602 }
1603
f1410647
ET
1604 /* unset clause 45 mode, set the MDIO clock to a faster value
1605 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1606 */
1607 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1608 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1609 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1610 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
a2fbb9ea 1611 val |= EMAC_MDIO_MODE_AUTO_POLL;
f1410647 1612 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
a2fbb9ea
ET
1613
1614 return rc;
1615}
1616
f1410647
ET
1617static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1618 u32 addr, u32 *ret_val)
1619{
1620 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1621
1622 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1623 reg, addr, ret_val);
1624}
1625
1626static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1627 u32 addr, u32 val)
a2fbb9ea
ET
1628{
1629 int i;
1630 u32 rd_val;
1631
1632 might_sleep();
1633 for (i = 0; i < 10; i++) {
f1410647 1634 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
a2fbb9ea 1635 msleep(5);
f1410647 1636 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
a2fbb9ea
ET
1637 /* if the read value is not the same as the value we wrote,
1638 we should write it again */
1639 if (rd_val == val)
1640 return 0;
1641 }
1642 BNX2X_ERR("MDIO write in CL45 failed\n");
1643 return -EBUSY;
1644}
1645
1646/*
c14423fe 1647 * link management
a2fbb9ea
ET
1648 */
1649
f1410647
ET
1650static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1651{
1652 switch (pause_result) { /* ASYM P ASYM P */
1653 case 0xb: /* 1 0 1 1 */
1654 bp->flow_ctrl = FLOW_CTRL_TX;
1655 break;
1656
1657 case 0xe: /* 1 1 1 0 */
1658 bp->flow_ctrl = FLOW_CTRL_RX;
1659 break;
1660
1661 case 0x5: /* 0 1 0 1 */
1662 case 0x7: /* 0 1 1 1 */
1663 case 0xd: /* 1 1 0 1 */
1664 case 0xf: /* 1 1 1 1 */
1665 bp->flow_ctrl = FLOW_CTRL_BOTH;
1666 break;
1667
1668 default:
1669 break;
1670 }
1671}
1672
1673static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1674{
1675 u32 ext_phy_addr;
1676 u32 ld_pause; /* local */
1677 u32 lp_pause; /* link partner */
1678 u32 an_complete; /* AN complete */
1679 u32 pause_result;
1680 u8 ret = 0;
1681
1682 ext_phy_addr = ((bp->ext_phy_config &
1683 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1684 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1685
1686 /* read twice */
1687 bnx2x_mdio45_read(bp, ext_phy_addr,
1688 EXT_PHY_KR_AUTO_NEG_DEVAD,
1689 EXT_PHY_KR_STATUS, &an_complete);
1690 bnx2x_mdio45_read(bp, ext_phy_addr,
1691 EXT_PHY_KR_AUTO_NEG_DEVAD,
1692 EXT_PHY_KR_STATUS, &an_complete);
1693
1694 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1695 ret = 1;
1696 bnx2x_mdio45_read(bp, ext_phy_addr,
1697 EXT_PHY_KR_AUTO_NEG_DEVAD,
1698 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1699 bnx2x_mdio45_read(bp, ext_phy_addr,
1700 EXT_PHY_KR_AUTO_NEG_DEVAD,
1701 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1702 pause_result = (ld_pause &
1703 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1704 pause_result |= (lp_pause &
1705 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1706 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1707 pause_result);
1708 bnx2x_pause_resolve(bp, pause_result);
1709 }
1710 return ret;
1711}
1712
a2fbb9ea
ET
1713static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1714{
f1410647
ET
1715 u32 ld_pause; /* local driver */
1716 u32 lp_pause; /* link partner */
a2fbb9ea
ET
1717 u32 pause_result;
1718
1719 bp->flow_ctrl = 0;
1720
c14423fe 1721 /* resolve from gp_status in case of AN complete and not sgmii */
a2fbb9ea
ET
1722 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1723 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1724 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1725 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1726
1727 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1728 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1729 &ld_pause);
1730 bnx2x_mdio22_read(bp,
1731 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1732 &lp_pause);
1733 pause_result = (ld_pause &
1734 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1735 pause_result |= (lp_pause &
1736 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1737 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
f1410647
ET
1738 bnx2x_pause_resolve(bp, pause_result);
1739 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1740 !(bnx2x_ext_phy_resove_fc(bp))) {
1741 /* forced speed */
1742 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1743 switch (bp->req_flow_ctrl) {
1744 case FLOW_CTRL_AUTO:
1745 if (bp->dev->mtu <= 4500)
1746 bp->flow_ctrl = FLOW_CTRL_BOTH;
1747 else
1748 bp->flow_ctrl = FLOW_CTRL_TX;
1749 break;
a2fbb9ea 1750
f1410647
ET
1751 case FLOW_CTRL_TX:
1752 bp->flow_ctrl = FLOW_CTRL_TX;
1753 break;
a2fbb9ea 1754
f1410647
ET
1755 case FLOW_CTRL_RX:
1756 if (bp->dev->mtu <= 4500)
1757 bp->flow_ctrl = FLOW_CTRL_RX;
1758 break;
a2fbb9ea 1759
f1410647
ET
1760 case FLOW_CTRL_BOTH:
1761 if (bp->dev->mtu <= 4500)
1762 bp->flow_ctrl = FLOW_CTRL_BOTH;
1763 else
1764 bp->flow_ctrl = FLOW_CTRL_TX;
1765 break;
a2fbb9ea 1766
f1410647
ET
1767 case FLOW_CTRL_NONE:
1768 default:
1769 break;
1770 }
1771 } else { /* forced mode */
1772 switch (bp->req_flow_ctrl) {
1773 case FLOW_CTRL_AUTO:
1774 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1775 " req_autoneg 0x%x\n",
1776 bp->req_flow_ctrl, bp->req_autoneg);
1777 break;
a2fbb9ea 1778
f1410647
ET
1779 case FLOW_CTRL_TX:
1780 case FLOW_CTRL_RX:
1781 case FLOW_CTRL_BOTH:
1782 bp->flow_ctrl = bp->req_flow_ctrl;
1783 break;
a2fbb9ea 1784
f1410647
ET
1785 case FLOW_CTRL_NONE:
1786 default:
1787 break;
1788 }
a2fbb9ea
ET
1789 }
1790 }
1791 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1792}
1793
1794static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1795{
1796 bp->link_status = 0;
1797
1798 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
f1410647 1799 DP(NETIF_MSG_LINK, "phy link up\n");
a2fbb9ea 1800
f1410647 1801 bp->phy_link_up = 1;
a2fbb9ea
ET
1802 bp->link_status |= LINK_STATUS_LINK_UP;
1803
1804 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1805 bp->duplex = DUPLEX_FULL;
1806 else
1807 bp->duplex = DUPLEX_HALF;
1808
1809 bnx2x_flow_ctrl_resolve(bp, gp_status);
1810
1811 switch (gp_status & GP_STATUS_SPEED_MASK) {
1812 case GP_STATUS_10M:
1813 bp->line_speed = SPEED_10;
1814 if (bp->duplex == DUPLEX_FULL)
1815 bp->link_status |= LINK_10TFD;
1816 else
1817 bp->link_status |= LINK_10THD;
1818 break;
1819
1820 case GP_STATUS_100M:
1821 bp->line_speed = SPEED_100;
1822 if (bp->duplex == DUPLEX_FULL)
1823 bp->link_status |= LINK_100TXFD;
1824 else
1825 bp->link_status |= LINK_100TXHD;
1826 break;
1827
1828 case GP_STATUS_1G:
1829 case GP_STATUS_1G_KX:
1830 bp->line_speed = SPEED_1000;
1831 if (bp->duplex == DUPLEX_FULL)
1832 bp->link_status |= LINK_1000TFD;
1833 else
1834 bp->link_status |= LINK_1000THD;
1835 break;
1836
1837 case GP_STATUS_2_5G:
1838 bp->line_speed = SPEED_2500;
1839 if (bp->duplex == DUPLEX_FULL)
1840 bp->link_status |= LINK_2500TFD;
1841 else
1842 bp->link_status |= LINK_2500THD;
1843 break;
1844
1845 case GP_STATUS_5G:
1846 case GP_STATUS_6G:
1847 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1848 gp_status);
1849 break;
1850
1851 case GP_STATUS_10G_KX4:
1852 case GP_STATUS_10G_HIG:
1853 case GP_STATUS_10G_CX4:
1854 bp->line_speed = SPEED_10000;
1855 bp->link_status |= LINK_10GTFD;
1856 break;
1857
1858 case GP_STATUS_12G_HIG:
1859 bp->line_speed = SPEED_12000;
1860 bp->link_status |= LINK_12GTFD;
1861 break;
1862
1863 case GP_STATUS_12_5G:
1864 bp->line_speed = SPEED_12500;
1865 bp->link_status |= LINK_12_5GTFD;
1866 break;
1867
1868 case GP_STATUS_13G:
1869 bp->line_speed = SPEED_13000;
1870 bp->link_status |= LINK_13GTFD;
1871 break;
1872
1873 case GP_STATUS_15G:
1874 bp->line_speed = SPEED_15000;
1875 bp->link_status |= LINK_15GTFD;
1876 break;
1877
1878 case GP_STATUS_16G:
1879 bp->line_speed = SPEED_16000;
1880 bp->link_status |= LINK_16GTFD;
1881 break;
1882
1883 default:
1884 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1885 gp_status);
1886 break;
1887 }
1888
1889 bp->link_status |= LINK_STATUS_SERDES_LINK;
1890
1891 if (bp->req_autoneg & AUTONEG_SPEED) {
1892 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1893
1894 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1895 bp->link_status |=
1896 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1897
1898 if (bp->autoneg & AUTONEG_PARALLEL)
1899 bp->link_status |=
1900 LINK_STATUS_PARALLEL_DETECTION_USED;
1901 }
1902
1903 if (bp->flow_ctrl & FLOW_CTRL_TX)
1904 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1905
1906 if (bp->flow_ctrl & FLOW_CTRL_RX)
1907 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1908
1909 } else { /* link_down */
f1410647 1910 DP(NETIF_MSG_LINK, "phy link down\n");
a2fbb9ea 1911
f1410647 1912 bp->phy_link_up = 0;
a2fbb9ea
ET
1913
1914 bp->line_speed = 0;
1915 bp->duplex = DUPLEX_FULL;
1916 bp->flow_ctrl = 0;
1917 }
1918
f1410647 1919 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
a2fbb9ea
ET
1920 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1921 " link_status 0x%x\n",
f1410647
ET
1922 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1923 bp->flow_ctrl, bp->link_status);
a2fbb9ea
ET
1924}
1925
1926static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1927{
1928 int port = bp->port;
1929
1930 /* first reset all status
c14423fe 1931 * we assume only one line will be change at a time */
a2fbb9ea 1932 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647
ET
1933 (NIG_STATUS_XGXS0_LINK10G |
1934 NIG_STATUS_XGXS0_LINK_STATUS |
1935 NIG_STATUS_SERDES0_LINK_STATUS));
1936 if (bp->phy_link_up) {
a2fbb9ea
ET
1937 if (is_10g) {
1938 /* Disable the 10G link interrupt
1939 * by writing 1 to the status register
1940 */
f1410647 1941 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
a2fbb9ea
ET
1942 bnx2x_bits_en(bp,
1943 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647 1944 NIG_STATUS_XGXS0_LINK10G);
a2fbb9ea
ET
1945
1946 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1947 /* Disable the link interrupt
1948 * by writing 1 to the relevant lane
1949 * in the status register
1950 */
f1410647 1951 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
a2fbb9ea
ET
1952 bnx2x_bits_en(bp,
1953 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1954 ((1 << bp->ser_lane) <<
f1410647 1955 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
a2fbb9ea
ET
1956
1957 } else { /* SerDes */
f1410647 1958 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
a2fbb9ea
ET
1959 /* Disable the link interrupt
1960 * by writing 1 to the status register
1961 */
1962 bnx2x_bits_en(bp,
1963 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647 1964 NIG_STATUS_SERDES0_LINK_STATUS);
a2fbb9ea
ET
1965 }
1966
1967 } else { /* link_down */
1968 }
1969}
1970
1971static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1972{
1973 u32 ext_phy_type;
1974 u32 ext_phy_addr;
f1410647 1975 u32 val1 = 0, val2;
a2fbb9ea
ET
1976 u32 rx_sd, pcs_status;
1977
1978 if (bp->phy_flags & PHY_XGXS_FLAG) {
a2fbb9ea
ET
1979 ext_phy_addr = ((bp->ext_phy_config &
1980 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1981 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
a2fbb9ea
ET
1982
1983 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1984 switch (ext_phy_type) {
1985 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1986 DP(NETIF_MSG_LINK, "XGXS Direct\n");
f1410647 1987 val1 = 1;
a2fbb9ea
ET
1988 break;
1989
1990 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1991 DP(NETIF_MSG_LINK, "XGXS 8705\n");
f1410647
ET
1992 bnx2x_mdio45_read(bp, ext_phy_addr,
1993 EXT_PHY_OPT_WIS_DEVAD,
1994 EXT_PHY_OPT_LASI_STATUS, &val1);
1995 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
1996
1997 bnx2x_mdio45_read(bp, ext_phy_addr,
1998 EXT_PHY_OPT_WIS_DEVAD,
1999 EXT_PHY_OPT_LASI_STATUS, &val1);
2000 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2001
2002 bnx2x_mdio45_read(bp, ext_phy_addr,
2003 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 2004 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
f1410647
ET
2005 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2006 val1 = (rx_sd & 0x1);
a2fbb9ea
ET
2007 break;
2008
2009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2010 DP(NETIF_MSG_LINK, "XGXS 8706\n");
f1410647
ET
2011 bnx2x_mdio45_read(bp, ext_phy_addr,
2012 EXT_PHY_OPT_PMA_PMD_DEVAD,
2013 EXT_PHY_OPT_LASI_STATUS, &val1);
2014 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2015
2016 bnx2x_mdio45_read(bp, ext_phy_addr,
2017 EXT_PHY_OPT_PMA_PMD_DEVAD,
2018 EXT_PHY_OPT_LASI_STATUS, &val1);
2019 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2020
2021 bnx2x_mdio45_read(bp, ext_phy_addr,
2022 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 2023 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
f1410647
ET
2024 bnx2x_mdio45_read(bp, ext_phy_addr,
2025 EXT_PHY_OPT_PCS_DEVAD,
2026 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2027 bnx2x_mdio45_read(bp, ext_phy_addr,
2028 EXT_PHY_AUTO_NEG_DEVAD,
2029 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2030
a2fbb9ea 2031 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
f1410647
ET
2032 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2033 rx_sd, pcs_status, val2, (val2 & (1<<1)));
2034 /* link is up if both bit 0 of pmd_rx_sd and
2035 * bit 0 of pcs_status are set, or if the autoneg bit
2036 1 is set
2037 */
2038 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2039 break;
2040
2041 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2042 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2043
2044 /* clear the interrupt LASI status register */
2045 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2046 ext_phy_addr,
2047 EXT_PHY_KR_PCS_DEVAD,
2048 EXT_PHY_KR_LASI_STATUS, &val2);
2049 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2050 ext_phy_addr,
2051 EXT_PHY_KR_PCS_DEVAD,
2052 EXT_PHY_KR_LASI_STATUS, &val1);
2053 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2054 val2, val1);
2055 /* Check the LASI */
2056 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2057 ext_phy_addr,
2058 EXT_PHY_KR_PMA_PMD_DEVAD,
2059 0x9003, &val2);
2060 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2061 ext_phy_addr,
2062 EXT_PHY_KR_PMA_PMD_DEVAD,
2063 0x9003, &val1);
2064 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2065 val2, val1);
2066 /* Check the link status */
2067 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2068 ext_phy_addr,
2069 EXT_PHY_KR_PCS_DEVAD,
2070 EXT_PHY_KR_PCS_STATUS, &val2);
2071 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2072 /* Check the link status on 1.1.2 */
2073 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2074 ext_phy_addr,
2075 EXT_PHY_OPT_PMA_PMD_DEVAD,
2076 EXT_PHY_KR_STATUS, &val2);
2077 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2078 ext_phy_addr,
2079 EXT_PHY_OPT_PMA_PMD_DEVAD,
2080 EXT_PHY_KR_STATUS, &val1);
2081 DP(NETIF_MSG_LINK,
2082 "KR PMA status 0x%x->0x%x\n", val2, val1);
2083 val1 = ((val1 & 4) == 4);
2084 /* If 1G was requested assume the link is up */
2085 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2086 (bp->req_line_speed == SPEED_1000))
2087 val1 = 1;
2088 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2089 break;
2090
2091 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2092 bnx2x_mdio45_read(bp, ext_phy_addr,
2093 EXT_PHY_OPT_PMA_PMD_DEVAD,
2094 EXT_PHY_OPT_LASI_STATUS, &val2);
2095 bnx2x_mdio45_read(bp, ext_phy_addr,
2096 EXT_PHY_OPT_PMA_PMD_DEVAD,
2097 EXT_PHY_OPT_LASI_STATUS, &val1);
2098 DP(NETIF_MSG_LINK,
2099 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2100 bnx2x_mdio45_read(bp, ext_phy_addr,
2101 EXT_PHY_OPT_PMA_PMD_DEVAD,
2102 EXT_PHY_KR_STATUS, &val2);
2103 bnx2x_mdio45_read(bp, ext_phy_addr,
2104 EXT_PHY_OPT_PMA_PMD_DEVAD,
2105 EXT_PHY_KR_STATUS, &val1);
2106 DP(NETIF_MSG_LINK,
2107 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2108 val1 = ((val1 & 4) == 4);
2109 /* if link is up
2110 * print the AN outcome of the SFX7101 PHY
a2fbb9ea 2111 */
f1410647
ET
2112 if (val1) {
2113 bnx2x_mdio45_read(bp, ext_phy_addr,
2114 EXT_PHY_KR_AUTO_NEG_DEVAD,
2115 0x21, &val2);
2116 DP(NETIF_MSG_LINK,
2117 "SFX7101 AN status 0x%x->%s\n", val2,
2118 (val2 & (1<<14)) ? "Master" : "Slave");
2119 }
a2fbb9ea
ET
2120 break;
2121
2122 default:
2123 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2124 bp->ext_phy_config);
f1410647 2125 val1 = 0;
a2fbb9ea
ET
2126 break;
2127 }
a2fbb9ea
ET
2128
2129 } else { /* SerDes */
2130 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2131 switch (ext_phy_type) {
2132 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2133 DP(NETIF_MSG_LINK, "SerDes Direct\n");
f1410647 2134 val1 = 1;
a2fbb9ea
ET
2135 break;
2136
2137 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2138 DP(NETIF_MSG_LINK, "SerDes 5482\n");
f1410647 2139 val1 = 1;
a2fbb9ea
ET
2140 break;
2141
2142 default:
2143 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2144 bp->ext_phy_config);
f1410647 2145 val1 = 0;
a2fbb9ea
ET
2146 break;
2147 }
2148 }
2149
f1410647 2150 return val1;
a2fbb9ea
ET
2151}
2152
2153static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2154{
2155 int port = bp->port;
2156 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2157 NIG_REG_INGRESS_BMAC0_MEM;
2158 u32 wb_write[2];
2159 u32 val;
2160
c14423fe 2161 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
a2fbb9ea
ET
2162 /* reset and unreset the BigMac */
2163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2164 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2165 msleep(5);
2166 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2167 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2168
2169 /* enable access for bmac registers */
2170 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2171
2172 /* XGXS control */
2173 wb_write[0] = 0x3c;
2174 wb_write[1] = 0;
2175 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2176 wb_write, 2);
2177
2178 /* tx MAC SA */
2179 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2180 (bp->dev->dev_addr[3] << 16) |
2181 (bp->dev->dev_addr[4] << 8) |
2182 bp->dev->dev_addr[5]);
2183 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2184 bp->dev->dev_addr[1]);
2185 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2186 wb_write, 2);
2187
2188 /* tx control */
2189 val = 0xc0;
2190 if (bp->flow_ctrl & FLOW_CTRL_TX)
2191 val |= 0x800000;
2192 wb_write[0] = val;
2193 wb_write[1] = 0;
2194 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2195
2196 /* set tx mtu */
2197 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2198 wb_write[1] = 0;
2199 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2200
2201 /* mac control */
2202 val = 0x3;
2203 if (is_lb) {
2204 val |= 0x4;
2205 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2206 }
2207 wb_write[0] = val;
2208 wb_write[1] = 0;
2209 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2210 wb_write, 2);
2211
2212 /* rx control set to don't strip crc */
2213 val = 0x14;
2214 if (bp->flow_ctrl & FLOW_CTRL_RX)
2215 val |= 0x20;
2216 wb_write[0] = val;
2217 wb_write[1] = 0;
2218 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2219
2220 /* set rx mtu */
2221 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2222 wb_write[1] = 0;
2223 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2224
2225 /* set cnt max size */
2226 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2227 wb_write[1] = 0;
2228 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2229 wb_write, 2);
2230
2231 /* configure safc */
2232 wb_write[0] = 0x1000200;
2233 wb_write[1] = 0;
2234 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2235 wb_write, 2);
2236
2237 /* fix for emulation */
2238 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2239 wb_write[0] = 0xf000;
2240 wb_write[1] = 0;
2241 REG_WR_DMAE(bp,
2242 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2243 wb_write, 2);
2244 }
2245
2246 /* reset old bmac stats */
2247 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2248
2249 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2250
2251 /* select XGXS */
2252 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2253 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2254
2255 /* disable the NIG in/out to the emac */
2256 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2257 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2258 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2259
2260 /* enable the NIG in/out to the bmac */
2261 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2262
2263 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2264 val = 0;
2265 if (bp->flow_ctrl & FLOW_CTRL_TX)
2266 val = 1;
2267 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2268 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2269
2270 bp->phy_flags |= PHY_BMAC_FLAG;
2271
2272 bp->stats_state = STATS_STATE_ENABLE;
2273}
2274
f1410647
ET
2275static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2276{
2277 int port = bp->port;
2278 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2279 NIG_REG_INGRESS_BMAC0_MEM;
2280 u32 wb_write[2];
2281
2282 /* Only if the bmac is out of reset */
2283 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2284 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2285 /* Clear Rx Enable bit in BMAC_CONTROL register */
2286#ifdef BNX2X_DMAE_RD
2287 bnx2x_read_dmae(bp, bmac_addr +
2288 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2289 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2290 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2291#else
2292 wb_write[0] = REG_RD(bp,
2293 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2294 wb_write[1] = REG_RD(bp,
2295 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2296#endif
2297 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2298 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2299 wb_write, 2);
2300 msleep(1);
2301 }
2302}
2303
a2fbb9ea
ET
2304static void bnx2x_emac_enable(struct bnx2x *bp)
2305{
2306 int port = bp->port;
2307 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2308 u32 val;
2309 int timeout;
2310
c14423fe 2311 DP(NETIF_MSG_LINK, "enabling EMAC\n");
a2fbb9ea
ET
2312 /* reset and unreset the emac core */
2313 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2314 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2315 msleep(5);
2316 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2317 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2318
2319 /* enable emac and not bmac */
2320 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2321
2322 /* for paladium */
2323 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2324 /* Use lane 1 (of lanes 0-3) */
2325 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2326 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2327 }
2328 /* for fpga */
2329 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2330 /* Use lane 1 (of lanes 0-3) */
2331 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2332 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2333 }
2334 /* ASIC */
2335 else {
2336 if (bp->phy_flags & PHY_XGXS_FLAG) {
2337 DP(NETIF_MSG_LINK, "XGXS\n");
2338 /* select the master lanes (out of 0-3) */
2339 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2340 bp->ser_lane);
2341 /* select XGXS */
2342 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2343
2344 } else { /* SerDes */
2345 DP(NETIF_MSG_LINK, "SerDes\n");
2346 /* select SerDes */
2347 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2348 }
2349 }
2350
2351 /* enable emac */
2352 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2353
2354 /* init emac - use read-modify-write */
2355 /* self clear reset */
2356 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2357 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2358
2359 timeout = 200;
2360 while (val & EMAC_MODE_RESET) {
2361 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2362 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2363 if (!timeout) {
2364 BNX2X_ERR("EMAC timeout!\n");
2365 break;
2366 }
2367 timeout--;
2368 }
2369
2370 /* reset tx part */
2371 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2372
2373 timeout = 200;
2374 while (val & EMAC_TX_MODE_RESET) {
2375 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2376 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2377 if (!timeout) {
2378 BNX2X_ERR("EMAC timeout!\n");
2379 break;
2380 }
2381 timeout--;
2382 }
2383
2384 if (CHIP_REV_IS_SLOW(bp)) {
2385 /* config GMII mode */
2386 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2387 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2388
2389 } else { /* ASIC */
2390 /* pause enable/disable */
2391 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2392 EMAC_RX_MODE_FLOW_EN);
2393 if (bp->flow_ctrl & FLOW_CTRL_RX)
2394 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2395 EMAC_RX_MODE_FLOW_EN);
2396
2397 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2398 EMAC_TX_MODE_EXT_PAUSE_EN);
2399 if (bp->flow_ctrl & FLOW_CTRL_TX)
2400 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2401 EMAC_TX_MODE_EXT_PAUSE_EN);
2402 }
2403
c14423fe 2404 /* KEEP_VLAN_TAG, promiscuous */
a2fbb9ea
ET
2405 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2406 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2407 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2408
2409 /* identify magic packets */
2410 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2411 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2412
2413 /* enable emac for jumbo packets */
2414 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2415 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2416 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2417
2418 /* strip CRC */
2419 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2420
2421 val = ((bp->dev->dev_addr[0] << 8) |
2422 bp->dev->dev_addr[1]);
2423 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2424
2425 val = ((bp->dev->dev_addr[2] << 24) |
2426 (bp->dev->dev_addr[3] << 16) |
2427 (bp->dev->dev_addr[4] << 8) |
2428 bp->dev->dev_addr[5]);
2429 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2430
2431 /* disable the NIG in/out to the bmac */
2432 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2433 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2434 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2435
2436 /* enable the NIG in/out to the emac */
2437 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2438 val = 0;
2439 if (bp->flow_ctrl & FLOW_CTRL_TX)
2440 val = 1;
2441 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2442 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2443
2444 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2445 /* take the BigMac out of reset */
2446 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2447 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2448
2449 /* enable access for bmac registers */
2450 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2451 }
2452
2453 bp->phy_flags |= PHY_EMAC_FLAG;
2454
2455 bp->stats_state = STATS_STATE_ENABLE;
2456}
2457
2458static void bnx2x_emac_program(struct bnx2x *bp)
2459{
2460 u16 mode = 0;
2461 int port = bp->port;
2462
2463 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2464 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2465 (EMAC_MODE_25G_MODE |
2466 EMAC_MODE_PORT_MII_10M |
2467 EMAC_MODE_HALF_DUPLEX));
2468 switch (bp->line_speed) {
2469 case SPEED_10:
2470 mode |= EMAC_MODE_PORT_MII_10M;
2471 break;
2472
2473 case SPEED_100:
2474 mode |= EMAC_MODE_PORT_MII;
2475 break;
2476
2477 case SPEED_1000:
2478 mode |= EMAC_MODE_PORT_GMII;
2479 break;
2480
2481 case SPEED_2500:
2482 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2483 break;
2484
2485 default:
2486 /* 10G not valid for EMAC */
2487 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2488 break;
2489 }
2490
2491 if (bp->duplex == DUPLEX_HALF)
2492 mode |= EMAC_MODE_HALF_DUPLEX;
2493 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2494 mode);
2495
2496 bnx2x_leds_set(bp, bp->line_speed);
2497}
2498
2499static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2500{
2501 u32 lp_up2;
2502 u32 tx_driver;
2503
2504 /* read precomp */
2505 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2506 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2507
2508 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2509 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2510
2511 /* bits [10:7] at lp_up2, positioned at [15:12] */
2512 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2513 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2514 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2515
2516 if ((lp_up2 != 0) &&
2517 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2518 /* replace tx_driver bits [15:12] */
2519 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2520 tx_driver |= lp_up2;
2521 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2522 }
2523}
2524
2525static void bnx2x_pbf_update(struct bnx2x *bp)
2526{
2527 int port = bp->port;
2528 u32 init_crd, crd;
2529 u32 count = 1000;
2530 u32 pause = 0;
2531
a2fbb9ea
ET
2532 /* disable port */
2533 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2534
2535 /* wait for init credit */
2536 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2537 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2538 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2539
2540 while ((init_crd != crd) && count) {
2541 msleep(5);
2542
2543 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2544 count--;
2545 }
2546 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2547 if (init_crd != crd)
2548 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2549
2550 if (bp->flow_ctrl & FLOW_CTRL_RX)
2551 pause = 1;
2552 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2553 if (pause) {
2554 /* update threshold */
2555 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2556 /* update init credit */
2557 init_crd = 778; /* (800-18-4) */
2558
2559 } else {
2560 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2561
2562 /* update threshold */
2563 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2564 /* update init credit */
2565 switch (bp->line_speed) {
2566 case SPEED_10:
2567 case SPEED_100:
2568 case SPEED_1000:
2569 init_crd = thresh + 55 - 22;
2570 break;
2571
2572 case SPEED_2500:
2573 init_crd = thresh + 138 - 22;
2574 break;
2575
2576 case SPEED_10000:
2577 init_crd = thresh + 553 - 22;
2578 break;
2579
2580 default:
2581 BNX2X_ERR("Invalid line_speed 0x%x\n",
2582 bp->line_speed);
2583 break;
2584 }
2585 }
2586 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2587 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2588 bp->line_speed, init_crd);
2589
2590 /* probe the credit changes */
2591 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2592 msleep(5);
2593 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2594
2595 /* enable port */
2596 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2597}
2598
2599static void bnx2x_update_mng(struct bnx2x *bp)
2600{
2601 if (!nomcp)
f1410647 2602 SHMEM_WR(bp, port_mb[bp->port].link_status,
a2fbb9ea
ET
2603 bp->link_status);
2604}
2605
2606static void bnx2x_link_report(struct bnx2x *bp)
2607{
2608 if (bp->link_up) {
2609 netif_carrier_on(bp->dev);
2610 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2611
2612 printk("%d Mbps ", bp->line_speed);
2613
2614 if (bp->duplex == DUPLEX_FULL)
2615 printk("full duplex");
2616 else
2617 printk("half duplex");
2618
2619 if (bp->flow_ctrl) {
2620 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2621 printk(", receive ");
2622 if (bp->flow_ctrl & FLOW_CTRL_TX)
2623 printk("& transmit ");
2624 } else {
2625 printk(", transmit ");
2626 }
2627 printk("flow control ON");
2628 }
2629 printk("\n");
2630
2631 } else { /* link_down */
2632 netif_carrier_off(bp->dev);
2633 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2634 }
2635}
2636
2637static void bnx2x_link_up(struct bnx2x *bp)
2638{
2639 int port = bp->port;
2640
2641 /* PBF - link up */
2642 bnx2x_pbf_update(bp);
2643
2644 /* disable drain */
2645 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2646
2647 /* update shared memory */
2648 bnx2x_update_mng(bp);
2649
2650 /* indicate link up */
2651 bnx2x_link_report(bp);
2652}
2653
2654static void bnx2x_link_down(struct bnx2x *bp)
2655{
2656 int port = bp->port;
2657
2658 /* notify stats */
2659 if (bp->stats_state != STATS_STATE_DISABLE) {
2660 bp->stats_state = STATS_STATE_STOP;
2661 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2662 }
2663
f1410647 2664 /* indicate no mac active */
a2fbb9ea
ET
2665 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2666
f1410647
ET
2667 /* update shared memory */
2668 bnx2x_update_mng(bp);
a2fbb9ea 2669
a2fbb9ea
ET
2670 /* activate nig drain */
2671 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2672
f1410647
ET
2673 /* reset BigMac */
2674 bnx2x_bmac_rx_disable(bp);
2675 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2676 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
a2fbb9ea
ET
2677
2678 /* indicate link down */
2679 bnx2x_link_report(bp);
2680}
2681
2682static void bnx2x_init_mac_stats(struct bnx2x *bp);
2683
2684/* This function is called upon link interrupt */
2685static void bnx2x_link_update(struct bnx2x *bp)
2686{
a2fbb9ea
ET
2687 int port = bp->port;
2688 int i;
f1410647 2689 u32 gp_status;
a2fbb9ea
ET
2690 int link_10g;
2691
f1410647 2692 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
a2fbb9ea 2693 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
f1410647
ET
2694 " 10G %x, XGXS_LINK %x\n", port,
2695 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
a2fbb9ea
ET
2696 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2697 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2698 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2699 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2700 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2701 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2702 );
2703
2704 might_sleep();
2705 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2706 /* avoid fast toggling */
f1410647 2707 for (i = 0; i < 10; i++) {
a2fbb9ea
ET
2708 msleep(10);
2709 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2710 &gp_status);
2711 }
2712
2713 bnx2x_link_settings_status(bp, gp_status);
2714
2715 /* anything 10 and over uses the bmac */
2716 link_10g = ((bp->line_speed >= SPEED_10000) &&
2717 (bp->line_speed <= SPEED_16000));
2718
2719 bnx2x_link_int_ack(bp, link_10g);
2720
2721 /* link is up only if both local phy and external phy are up */
f1410647
ET
2722 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2723 if (bp->link_up) {
a2fbb9ea
ET
2724 if (link_10g) {
2725 bnx2x_bmac_enable(bp, 0);
2726 bnx2x_leds_set(bp, SPEED_10000);
2727
2728 } else {
2729 bnx2x_emac_enable(bp);
2730 bnx2x_emac_program(bp);
2731
2732 /* AN complete? */
2733 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2734 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2735 bnx2x_set_sgmii_tx_driver(bp);
2736 }
2737 }
2738 bnx2x_link_up(bp);
2739
2740 } else { /* link down */
2741 bnx2x_leds_unset(bp);
2742 bnx2x_link_down(bp);
2743 }
2744
2745 bnx2x_init_mac_stats(bp);
2746}
2747
2748/*
2749 * Init service functions
2750 */
2751
2752static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2753{
2754 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2755 (bp->phy_addr + bp->ser_lane) : 0;
2756
2757 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2758 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2759}
2760
2761static void bnx2x_set_master_ln(struct bnx2x *bp)
2762{
2763 u32 new_master_ln;
2764
2765 /* set the master_ln for AN */
2766 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2767 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2768 &new_master_ln);
2769 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2770 (new_master_ln | bp->ser_lane));
2771}
2772
2773static void bnx2x_reset_unicore(struct bnx2x *bp)
2774{
2775 u32 mii_control;
2776 int i;
2777
2778 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2779 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2780 /* reset the unicore */
2781 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2782 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2783
2784 /* wait for the reset to self clear */
2785 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2786 udelay(5);
2787
2788 /* the reset erased the previous bank value */
2789 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2790 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2791 &mii_control);
2792
2793 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2794 udelay(5);
2795 return;
2796 }
2797 }
2798
f1410647
ET
2799 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2800 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2801 bp->phy_addr);
a2fbb9ea
ET
2802}
2803
2804static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2805{
2806 /* Each two bits represents a lane number:
2807 No swap is 0123 => 0x1b no need to enable the swap */
2808
2809 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2810 if (bp->rx_lane_swap != 0x1b) {
2811 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2812 (bp->rx_lane_swap |
2813 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2814 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2815 } else {
2816 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2817 }
2818
2819 if (bp->tx_lane_swap != 0x1b) {
2820 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2821 (bp->tx_lane_swap |
2822 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2823 } else {
2824 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2825 }
2826}
2827
2828static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2829{
2830 u32 control2;
2831
2832 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2833 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2834 &control2);
2835
2836 if (bp->autoneg & AUTONEG_PARALLEL) {
2837 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2838 } else {
2839 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2840 }
2841 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2842 control2);
2843
2844 if (bp->phy_flags & PHY_XGXS_FLAG) {
2845 DP(NETIF_MSG_LINK, "XGXS\n");
2846 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2847
2848 bnx2x_mdio22_write(bp,
f1410647 2849 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
a2fbb9ea
ET
2850 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2851
2852 bnx2x_mdio22_read(bp,
f1410647
ET
2853 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2854 &control2);
a2fbb9ea
ET
2855
2856 if (bp->autoneg & AUTONEG_PARALLEL) {
2857 control2 |=
2858 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2859 } else {
2860 control2 &=
2861 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2862 }
2863 bnx2x_mdio22_write(bp,
f1410647
ET
2864 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2865 control2);
2866
2867 /* Disable parallel detection of HiG */
2868 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2869 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2870 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2871 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
a2fbb9ea
ET
2872 }
2873}
2874
2875static void bnx2x_set_autoneg(struct bnx2x *bp)
2876{
2877 u32 reg_val;
2878
2879 /* CL37 Autoneg */
2880 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2881 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2882 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2883 (bp->autoneg & AUTONEG_CL37)) {
2884 /* CL37 Autoneg Enabled */
2885 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2886 } else {
2887 /* CL37 Autoneg Disabled */
2888 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2889 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2890 }
2891 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2892
2893 /* Enable/Disable Autodetection */
2894 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2895 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2896 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2897
2898 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2899 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2900 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2901 } else {
2902 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2903 }
2904 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2905
2906 /* Enable TetonII and BAM autoneg */
2907 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2908 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2909 &reg_val);
2910 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2911 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2912 /* Enable BAM aneg Mode and TetonII aneg Mode */
2913 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2914 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2915 } else {
2916 /* TetonII and BAM Autoneg Disabled */
2917 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2918 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2919 }
2920 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2921 reg_val);
2922
2923 /* Enable Clause 73 Aneg */
2924 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2925 (bp->autoneg & AUTONEG_CL73)) {
2926 /* Enable BAM Station Manager */
2927 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2928 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2929 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2930 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2931 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2932
2933 /* Merge CL73 and CL37 aneg resolution */
2934 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2935 &reg_val);
2936 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2937 (reg_val |
2938 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2939
2940 /* Set the CL73 AN speed */
2941 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2942 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2943 /* In the SerDes we support only the 1G.
2944 In the XGXS we support the 10G KX4
2945 but we currently do not support the KR */
2946 if (bp->phy_flags & PHY_XGXS_FLAG) {
2947 DP(NETIF_MSG_LINK, "XGXS\n");
2948 /* 10G KX4 */
2949 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2950 } else {
2951 DP(NETIF_MSG_LINK, "SerDes\n");
2952 /* 1000M KX */
2953 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2954 }
2955 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2956
2957 /* CL73 Autoneg Enabled */
2958 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2959 } else {
2960 /* CL73 Autoneg Disabled */
2961 reg_val = 0;
2962 }
2963 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2964 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2965}
2966
2967/* program SerDes, forced speed */
2968static void bnx2x_program_serdes(struct bnx2x *bp)
2969{
2970 u32 reg_val;
2971
2972 /* program duplex, disable autoneg */
2973 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2974 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2975 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2976 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2977 if (bp->req_duplex == DUPLEX_FULL)
2978 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2979 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2980
2981 /* program speed
2982 - needed only if the speed is greater than 1G (2.5G or 10G) */
2983 if (bp->req_line_speed > SPEED_1000) {
2984 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2985 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2986 /* clearing the speed value before setting the right speed */
2987 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2988 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2989 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2990 if (bp->req_line_speed == SPEED_10000)
2991 reg_val |=
2992 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
2993 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
2994 }
2995}
2996
2997static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
2998{
2999 u32 val = 0;
3000
3001 /* configure the 48 bits for BAM AN */
3002 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3003
3004 /* set extended capabilities */
f1410647 3005 if (bp->advertising & ADVERTISED_2500baseX_Full)
a2fbb9ea
ET
3006 val |= MDIO_OVER_1G_UP1_2_5G;
3007 if (bp->advertising & ADVERTISED_10000baseT_Full)
3008 val |= MDIO_OVER_1G_UP1_10G;
3009 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3010
3011 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3012}
3013
3014static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3015{
3016 u32 an_adv;
3017
3018 /* for AN, we are always publishing full duplex */
3019 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3020
f1410647
ET
3021 /* resolve pause mode and advertisement
3022 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3023 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3024 switch (bp->req_flow_ctrl) {
3025 case FLOW_CTRL_AUTO:
3026 if (bp->dev->mtu <= 4500) {
3027 an_adv |=
3028 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3029 bp->advertising |= (ADVERTISED_Pause |
3030 ADVERTISED_Asym_Pause);
3031 } else {
3032 an_adv |=
3033 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3034 bp->advertising |= ADVERTISED_Asym_Pause;
3035 }
3036 break;
3037
3038 case FLOW_CTRL_TX:
3039 an_adv |=
3040 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3041 bp->advertising |= ADVERTISED_Asym_Pause;
3042 break;
3043
3044 case FLOW_CTRL_RX:
3045 if (bp->dev->mtu <= 4500) {
3046 an_adv |=
3047 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3048 bp->advertising |= (ADVERTISED_Pause |
3049 ADVERTISED_Asym_Pause);
3050 } else {
3051 an_adv |=
3052 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3053 bp->advertising &= ~(ADVERTISED_Pause |
3054 ADVERTISED_Asym_Pause);
3055 }
3056 break;
3057
3058 case FLOW_CTRL_BOTH:
3059 if (bp->dev->mtu <= 4500) {
3060 an_adv |=
3061 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3062 bp->advertising |= (ADVERTISED_Pause |
3063 ADVERTISED_Asym_Pause);
3064 } else {
3065 an_adv |=
3066 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3067 bp->advertising |= ADVERTISED_Asym_Pause;
3068 }
3069 break;
3070
3071 case FLOW_CTRL_NONE:
3072 default:
3073 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3074 bp->advertising &= ~(ADVERTISED_Pause |
3075 ADVERTISED_Asym_Pause);
3076 break;
3077 }
3078 } else { /* forced mode */
3079 switch (bp->req_flow_ctrl) {
3080 case FLOW_CTRL_AUTO:
3081 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3082 " req_autoneg 0x%x\n",
3083 bp->req_flow_ctrl, bp->req_autoneg);
3084 break;
3085
3086 case FLOW_CTRL_TX:
3087 an_adv |=
3088 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3089 bp->advertising |= ADVERTISED_Asym_Pause;
3090 break;
3091
3092 case FLOW_CTRL_RX:
3093 case FLOW_CTRL_BOTH:
3094 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3095 bp->advertising |= (ADVERTISED_Pause |
3096 ADVERTISED_Asym_Pause);
3097 break;
3098
3099 case FLOW_CTRL_NONE:
3100 default:
3101 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3102 bp->advertising &= ~(ADVERTISED_Pause |
3103 ADVERTISED_Asym_Pause);
3104 break;
3105 }
a2fbb9ea
ET
3106 }
3107
3108 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3109 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3110}
3111
3112static void bnx2x_restart_autoneg(struct bnx2x *bp)
3113{
3114 if (bp->autoneg & AUTONEG_CL73) {
3115 /* enable and restart clause 73 aneg */
3116 u32 an_ctrl;
3117
3118 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3119 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3120 &an_ctrl);
3121 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3122 (an_ctrl |
3123 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3124 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3125
3126 } else {
3127 /* Enable and restart BAM/CL37 aneg */
3128 u32 mii_control;
3129
3130 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3131 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3132 &mii_control);
3133 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3134 (mii_control |
3135 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3136 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3137 }
3138}
3139
3140static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3141{
3142 u32 control1;
3143
3144 /* in SGMII mode, the unicore is always slave */
3145 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3146 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3147 &control1);
3148 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3149 /* set sgmii mode (and not fiber) */
3150 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3151 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3152 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3153 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3154 control1);
3155
3156 /* if forced speed */
3157 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3158 /* set speed, disable autoneg */
3159 u32 mii_control;
3160
3161 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3162 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3163 &mii_control);
3164 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3165 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3166 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3167
3168 switch (bp->req_line_speed) {
3169 case SPEED_100:
3170 mii_control |=
3171 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3172 break;
3173 case SPEED_1000:
3174 mii_control |=
3175 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3176 break;
3177 case SPEED_10:
3178 /* there is nothing to set for 10M */
3179 break;
3180 default:
3181 /* invalid speed for SGMII */
3182 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3183 bp->req_line_speed);
3184 break;
3185 }
3186
3187 /* setting the full duplex */
3188 if (bp->req_duplex == DUPLEX_FULL)
3189 mii_control |=
3190 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3191 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3192 mii_control);
3193
3194 } else { /* AN mode */
3195 /* enable and restart AN */
3196 bnx2x_restart_autoneg(bp);
3197 }
3198}
3199
3200static void bnx2x_link_int_enable(struct bnx2x *bp)
3201{
3202 int port = bp->port;
f1410647
ET
3203 u32 ext_phy_type;
3204 u32 mask;
a2fbb9ea
ET
3205
3206 /* setting the status to report on link up
3207 for either XGXS or SerDes */
3208 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647
ET
3209 (NIG_STATUS_XGXS0_LINK10G |
3210 NIG_STATUS_XGXS0_LINK_STATUS |
3211 NIG_STATUS_SERDES0_LINK_STATUS));
a2fbb9ea
ET
3212
3213 if (bp->phy_flags & PHY_XGXS_FLAG) {
f1410647
ET
3214 mask = (NIG_MASK_XGXS0_LINK10G |
3215 NIG_MASK_XGXS0_LINK_STATUS);
3216 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3217 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3218 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3219 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3220 (ext_phy_type !=
3221 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3222 mask |= NIG_MASK_MI_INT;
3223 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3224 }
a2fbb9ea
ET
3225
3226 } else { /* SerDes */
f1410647
ET
3227 mask = NIG_MASK_SERDES0_LINK_STATUS;
3228 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3229 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3230 if ((ext_phy_type !=
3231 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3232 (ext_phy_type !=
3233 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3234 mask |= NIG_MASK_MI_INT;
3235 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3236 }
a2fbb9ea 3237 }
f1410647
ET
3238 bnx2x_bits_en(bp,
3239 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3240 mask);
3241 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3242 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3243 " 10G %x, XGXS_LINK %x\n", port,
3244 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3245 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3246 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3247 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3248 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3249 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3250 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3251 );
3252}
3253
3254static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3255{
3256 u32 ext_phy_addr = ((bp->ext_phy_config &
3257 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3258 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3259 u32 fw_ver1, fw_ver2;
3260
3261 /* Need to wait 200ms after reset */
3262 msleep(200);
3263 /* Boot port from external ROM
3264 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3265 */
3266 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3267 EXT_PHY_KR_PMA_PMD_DEVAD,
3268 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3269
3270 /* Reset internal microprocessor */
3271 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3272 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3273 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3274 /* set micro reset = 0 */
3275 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3276 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3277 EXT_PHY_KR_ROM_MICRO_RESET);
3278 /* Reset internal microprocessor */
3279 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3280 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3281 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3282 /* wait for 100ms for code download via SPI port */
3283 msleep(100);
3284
3285 /* Clear ser_boot_ctl bit */
3286 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3287 EXT_PHY_KR_PMA_PMD_DEVAD,
3288 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3289 /* Wait 100ms */
3290 msleep(100);
3291
3292 /* Print the PHY FW version */
3293 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3294 EXT_PHY_KR_PMA_PMD_DEVAD,
3295 0xca19, &fw_ver1);
3296 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3297 EXT_PHY_KR_PMA_PMD_DEVAD,
3298 0xca1a, &fw_ver2);
3299 DP(NETIF_MSG_LINK,
3300 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3301}
3302
3303static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3304{
3305 u32 ext_phy_addr = ((bp->ext_phy_config &
3306 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3307 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3308
3309 /* Force KR or KX */
3310 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3311 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3312 0x2040);
3313 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3314 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3315 0x000b);
3316 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3317 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3318 0x0000);
3319 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3320 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3321 0x0000);
a2fbb9ea
ET
3322}
3323
3324static void bnx2x_ext_phy_init(struct bnx2x *bp)
3325{
a2fbb9ea
ET
3326 u32 ext_phy_type;
3327 u32 ext_phy_addr;
f1410647
ET
3328 u32 cnt;
3329 u32 ctrl;
3330 u32 val = 0;
a2fbb9ea
ET
3331
3332 if (bp->phy_flags & PHY_XGXS_FLAG) {
a2fbb9ea
ET
3333 ext_phy_addr = ((bp->ext_phy_config &
3334 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3335 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3336
3337 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
f1410647
ET
3338 /* Make sure that the soft reset is off (expect for the 8072:
3339 * due to the lock, it will be done inside the specific
3340 * handling)
3341 */
3342 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3343 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3344 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3345 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3346 /* Wait for soft reset to get cleared upto 1 sec */
3347 for (cnt = 0; cnt < 1000; cnt++) {
3348 bnx2x_mdio45_read(bp, ext_phy_addr,
3349 EXT_PHY_OPT_PMA_PMD_DEVAD,
3350 EXT_PHY_OPT_CNTL, &ctrl);
3351 if (!(ctrl & (1<<15)))
3352 break;
3353 msleep(1);
3354 }
3355 DP(NETIF_MSG_LINK,
3356 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3357 }
3358
a2fbb9ea
ET
3359 switch (ext_phy_type) {
3360 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3361 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3362 break;
3363
3364 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3365 DP(NETIF_MSG_LINK, "XGXS 8705\n");
a2fbb9ea 3366
f1410647
ET
3367 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3368 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3369 EXT_PHY_OPT_PMD_MISC_CNTL,
3370 0x8288);
f1410647
ET
3371 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3372 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3373 EXT_PHY_OPT_PHY_IDENTIFIER,
3374 0x7fbf);
f1410647
ET
3375 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3376 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3377 EXT_PHY_OPT_CMU_PLL_BYPASS,
3378 0x0100);
f1410647
ET
3379 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3380 EXT_PHY_OPT_WIS_DEVAD,
a2fbb9ea
ET
3381 EXT_PHY_OPT_LASI_CNTL, 0x1);
3382 break;
3383
3384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3385 DP(NETIF_MSG_LINK, "XGXS 8706\n");
a2fbb9ea 3386
f1410647
ET
3387 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3388 /* Force speed */
3389 if (bp->req_line_speed == SPEED_10000) {
3390 DP(NETIF_MSG_LINK,
3391 "XGXS 8706 force 10Gbps\n");
3392 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3393 EXT_PHY_OPT_PMA_PMD_DEVAD,
3394 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3395 0x400);
3396 } else {
3397 /* Force 1Gbps */
3398 DP(NETIF_MSG_LINK,
3399 "XGXS 8706 force 1Gbps\n");
3400
3401 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3402 EXT_PHY_OPT_PMA_PMD_DEVAD,
3403 EXT_PHY_OPT_CNTL,
3404 0x0040);
3405
3406 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3407 EXT_PHY_OPT_PMA_PMD_DEVAD,
3408 EXT_PHY_OPT_CNTL2,
3409 0x000D);
3410 }
3411
3412 /* Enable LASI */
3413 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3414 EXT_PHY_OPT_PMA_PMD_DEVAD,
3415 EXT_PHY_OPT_LASI_CNTL,
3416 0x1);
3417 } else {
3418 /* AUTONEG */
3419 /* Allow CL37 through CL73 */
3420 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3421 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3422 EXT_PHY_AUTO_NEG_DEVAD,
3423 EXT_PHY_OPT_AN_CL37_CL73,
3424 0x040c);
3425
3426 /* Enable Full-Duplex advertisment on CL37 */
3427 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3428 EXT_PHY_AUTO_NEG_DEVAD,
3429 EXT_PHY_OPT_AN_CL37_FD,
3430 0x0020);
3431 /* Enable CL37 AN */
3432 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3433 EXT_PHY_AUTO_NEG_DEVAD,
3434 EXT_PHY_OPT_AN_CL37_AN,
3435 0x1000);
3436 /* Advertise 10G/1G support */
3437 if (bp->advertising &
3438 ADVERTISED_1000baseT_Full)
3439 val = (1<<5);
3440 if (bp->advertising &
3441 ADVERTISED_10000baseT_Full)
3442 val |= (1<<7);
3443
3444 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3445 EXT_PHY_AUTO_NEG_DEVAD,
3446 EXT_PHY_OPT_AN_ADV, val);
3447 /* Enable LASI */
3448 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3449 EXT_PHY_OPT_PMA_PMD_DEVAD,
3450 EXT_PHY_OPT_LASI_CNTL,
3451 0x1);
3452
3453 /* Enable clause 73 AN */
3454 bnx2x_mdio45_write(bp, ext_phy_addr,
3455 EXT_PHY_AUTO_NEG_DEVAD,
3456 EXT_PHY_OPT_CNTL,
3457 0x1200);
3458 }
3459 break;
3460
3461 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3462 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3463 /* Wait for soft reset to get cleared upto 1 sec */
3464 for (cnt = 0; cnt < 1000; cnt++) {
3465 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3466 ext_phy_addr,
3467 EXT_PHY_OPT_PMA_PMD_DEVAD,
3468 EXT_PHY_OPT_CNTL, &ctrl);
3469 if (!(ctrl & (1<<15)))
3470 break;
3471 msleep(1);
3472 }
3473 DP(NETIF_MSG_LINK,
3474 "8072 control reg 0x%x (after %d ms)\n",
3475 ctrl, cnt);
3476
3477 bnx2x_bcm8072_external_rom_boot(bp);
3478 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3479
3480 /* enable LASI */
3481 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3482 ext_phy_addr,
3483 EXT_PHY_KR_PMA_PMD_DEVAD,
3484 0x9000, 0x0400);
3485 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3486 ext_phy_addr,
3487 EXT_PHY_KR_PMA_PMD_DEVAD,
3488 EXT_PHY_KR_LASI_CNTL, 0x0004);
3489
3490 /* If this is forced speed, set to KR or KX
3491 * (all other are not supported)
3492 */
3493 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3494 if (bp->req_line_speed == SPEED_10000) {
3495 bnx2x_bcm8072_force_10G(bp);
3496 DP(NETIF_MSG_LINK,
3497 "Forced speed 10G on 8072\n");
3498 /* unlock */
3499 bnx2x_hw_unlock(bp,
3500 HW_LOCK_RESOURCE_8072_MDIO);
3501 break;
3502 } else
3503 val = (1<<5);
3504 } else {
3505
3506 /* Advertise 10G/1G support */
3507 if (bp->advertising &
3508 ADVERTISED_1000baseT_Full)
3509 val = (1<<5);
3510 if (bp->advertising &
3511 ADVERTISED_10000baseT_Full)
3512 val |= (1<<7);
3513 }
3514 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3515 ext_phy_addr,
3516 EXT_PHY_KR_AUTO_NEG_DEVAD,
3517 0x11, val);
3518 /* Add support for CL37 ( passive mode ) I */
3519 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3520 ext_phy_addr,
3521 EXT_PHY_KR_AUTO_NEG_DEVAD,
3522 0x8370, 0x040c);
3523 /* Add support for CL37 ( passive mode ) II */
3524 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3525 ext_phy_addr,
3526 EXT_PHY_KR_AUTO_NEG_DEVAD,
3527 0xffe4, 0x20);
3528 /* Add support for CL37 ( passive mode ) III */
3529 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3530 ext_phy_addr,
3531 EXT_PHY_KR_AUTO_NEG_DEVAD,
3532 0xffe0, 0x1000);
3533 /* Restart autoneg */
3534 msleep(500);
3535 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3536 ext_phy_addr,
3537 EXT_PHY_KR_AUTO_NEG_DEVAD,
3538 EXT_PHY_KR_CTRL, 0x1200);
3539 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3540 "1G %ssupported 10G %ssupported\n",
3541 (val & (1<<5)) ? "" : "not ",
3542 (val & (1<<7)) ? "" : "not ");
3543
3544 /* unlock */
3545 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3546 break;
3547
3548 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3549 DP(NETIF_MSG_LINK,
3550 "Setting the SFX7101 LASI indication\n");
3551 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3552 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 3553 EXT_PHY_OPT_LASI_CNTL, 0x1);
f1410647
ET
3554 DP(NETIF_MSG_LINK,
3555 "Setting the SFX7101 LED to blink on traffic\n");
3556 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3557 EXT_PHY_OPT_PMA_PMD_DEVAD,
3558 0xC007, (1<<3));
3559
3560 /* read modify write pause advertizing */
3561 bnx2x_mdio45_read(bp, ext_phy_addr,
3562 EXT_PHY_KR_AUTO_NEG_DEVAD,
3563 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3564 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3565 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3566 if (bp->advertising & ADVERTISED_Pause)
3567 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3568
3569 if (bp->advertising & ADVERTISED_Asym_Pause) {
3570 val |=
3571 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3572 }
3573 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3574 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3575 EXT_PHY_KR_AUTO_NEG_DEVAD,
3576 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3577 /* Restart autoneg */
3578 bnx2x_mdio45_read(bp, ext_phy_addr,
3579 EXT_PHY_KR_AUTO_NEG_DEVAD,
3580 EXT_PHY_KR_CTRL, &val);
3581 val |= 0x200;
3582 bnx2x_mdio45_write(bp, ext_phy_addr,
3583 EXT_PHY_KR_AUTO_NEG_DEVAD,
3584 EXT_PHY_KR_CTRL, val);
a2fbb9ea
ET
3585 break;
3586
3587 default:
f1410647
ET
3588 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3589 bp->ext_phy_config);
a2fbb9ea
ET
3590 break;
3591 }
a2fbb9ea
ET
3592
3593 } else { /* SerDes */
f1410647 3594/* ext_phy_addr = ((bp->ext_phy_config &
a2fbb9ea
ET
3595 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3596 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3597*/
3598 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3599 switch (ext_phy_type) {
3600 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3601 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3602 break;
3603
3604 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3605 DP(NETIF_MSG_LINK, "SerDes 5482\n");
a2fbb9ea
ET
3606 break;
3607
3608 default:
3609 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3610 bp->ext_phy_config);
3611 break;
3612 }
3613 }
3614}
3615
3616static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3617{
3618 u32 ext_phy_type;
f1410647
ET
3619 u32 ext_phy_addr = ((bp->ext_phy_config &
3620 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3621 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3622 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3623
3624 /* The PHY reset is controled by GPIO 1
3625 * Give it 1ms of reset pulse
3626 */
3627 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3628 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3629 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3630 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3631 msleep(1);
3632 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3633 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3634 }
a2fbb9ea
ET
3635
3636 if (bp->phy_flags & PHY_XGXS_FLAG) {
3637 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3638 switch (ext_phy_type) {
3639 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3640 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3641 break;
3642
3643 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3644 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
3645 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3646 bnx2x_mdio45_write(bp, ext_phy_addr,
3647 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 3648 EXT_PHY_OPT_CNTL, 0xa040);
f1410647
ET
3649 break;
3650
3651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3652 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3653 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3654 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3655 ext_phy_addr,
3656 EXT_PHY_KR_PMA_PMD_DEVAD,
3657 0, 1<<15);
3658 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3659 break;
3660
3661 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3662 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
a2fbb9ea
ET
3663 break;
3664
3665 default:
3666 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3667 bp->ext_phy_config);
3668 break;
3669 }
3670
3671 } else { /* SerDes */
3672 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3673 switch (ext_phy_type) {
3674 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3675 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3676 break;
3677
3678 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3679 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3680 break;
3681
3682 default:
3683 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3684 bp->ext_phy_config);
3685 break;
3686 }
3687 }
3688}
3689
3690static void bnx2x_link_initialize(struct bnx2x *bp)
3691{
3692 int port = bp->port;
3693
3694 /* disable attentions */
3695 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3696 (NIG_MASK_XGXS0_LINK_STATUS |
3697 NIG_MASK_XGXS0_LINK10G |
3698 NIG_MASK_SERDES0_LINK_STATUS |
3699 NIG_MASK_MI_INT));
3700
f1410647 3701 /* Activate the external PHY */
a2fbb9ea
ET
3702 bnx2x_ext_phy_reset(bp);
3703
3704 bnx2x_set_aer_mmd(bp);
3705
3706 if (bp->phy_flags & PHY_XGXS_FLAG)
3707 bnx2x_set_master_ln(bp);
3708
3709 /* reset the SerDes and wait for reset bit return low */
3710 bnx2x_reset_unicore(bp);
3711
3712 bnx2x_set_aer_mmd(bp);
3713
3714 /* setting the masterLn_def again after the reset */
3715 if (bp->phy_flags & PHY_XGXS_FLAG) {
3716 bnx2x_set_master_ln(bp);
3717 bnx2x_set_swap_lanes(bp);
3718 }
3719
3720 /* Set Parallel Detect */
3721 if (bp->req_autoneg & AUTONEG_SPEED)
3722 bnx2x_set_parallel_detection(bp);
3723
3724 if (bp->phy_flags & PHY_XGXS_FLAG) {
3725 if (bp->req_line_speed &&
3726 bp->req_line_speed < SPEED_1000) {
3727 bp->phy_flags |= PHY_SGMII_FLAG;
3728 } else {
3729 bp->phy_flags &= ~PHY_SGMII_FLAG;
3730 }
3731 }
3732
3733 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3734 u16 bank, rx_eq;
3735
3736 rx_eq = ((bp->serdes_config &
3737 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3738 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3739
3740 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3741 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3742 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3743 MDIO_SET_REG_BANK(bp, bank);
3744 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3745 ((rx_eq &
3746 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3747 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3748 }
3749
3750 /* forced speed requested? */
3751 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3752 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3753
3754 /* disable autoneg */
3755 bnx2x_set_autoneg(bp);
3756
3757 /* program speed and duplex */
3758 bnx2x_program_serdes(bp);
3759
3760 } else { /* AN_mode */
3761 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3762
3763 /* AN enabled */
3764 bnx2x_set_brcm_cl37_advertisment(bp);
3765
c14423fe 3766 /* program duplex & pause advertisement (for aneg) */
a2fbb9ea
ET
3767 bnx2x_set_ieee_aneg_advertisment(bp);
3768
3769 /* enable autoneg */
3770 bnx2x_set_autoneg(bp);
3771
c14423fe 3772 /* enable and restart AN */
a2fbb9ea
ET
3773 bnx2x_restart_autoneg(bp);
3774 }
3775
3776 } else { /* SGMII mode */
3777 DP(NETIF_MSG_LINK, "SGMII\n");
3778
3779 bnx2x_initialize_sgmii_process(bp);
3780 }
3781
a2fbb9ea
ET
3782 /* init ext phy and enable link state int */
3783 bnx2x_ext_phy_init(bp);
f1410647
ET
3784
3785 /* enable the interrupt */
3786 bnx2x_link_int_enable(bp);
a2fbb9ea
ET
3787}
3788
3789static void bnx2x_phy_deassert(struct bnx2x *bp)
3790{
3791 int port = bp->port;
3792 u32 val;
3793
3794 if (bp->phy_flags & PHY_XGXS_FLAG) {
3795 DP(NETIF_MSG_LINK, "XGXS\n");
3796 val = XGXS_RESET_BITS;
3797
3798 } else { /* SerDes */
3799 DP(NETIF_MSG_LINK, "SerDes\n");
3800 val = SERDES_RESET_BITS;
3801 }
3802
3803 val = val << (port*16);
3804
3805 /* reset and unreset the SerDes/XGXS */
3806 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3807 msleep(5);
3808 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3809}
3810
3811static int bnx2x_phy_init(struct bnx2x *bp)
3812{
3813 DP(NETIF_MSG_LINK, "started\n");
3814 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3815 bp->phy_flags |= PHY_EMAC_FLAG;
3816 bp->link_up = 1;
3817 bp->line_speed = SPEED_10000;
3818 bp->duplex = DUPLEX_FULL;
3819 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3820 bnx2x_emac_enable(bp);
3821 bnx2x_link_report(bp);
3822 return 0;
3823
3824 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3825 bp->phy_flags |= PHY_BMAC_FLAG;
3826 bp->link_up = 1;
3827 bp->line_speed = SPEED_10000;
3828 bp->duplex = DUPLEX_FULL;
3829 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3830 bnx2x_bmac_enable(bp, 0);
3831 bnx2x_link_report(bp);
3832 return 0;
3833
3834 } else {
3835 bnx2x_phy_deassert(bp);
3836 bnx2x_link_initialize(bp);
3837 }
3838
3839 return 0;
3840}
3841
3842static void bnx2x_link_reset(struct bnx2x *bp)
3843{
3844 int port = bp->port;
f1410647
ET
3845 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3846
3847 /* update shared memory */
3848 bp->link_status = 0;
3849 bnx2x_update_mng(bp);
a2fbb9ea
ET
3850
3851 /* disable attentions */
3852 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3853 (NIG_MASK_XGXS0_LINK_STATUS |
3854 NIG_MASK_XGXS0_LINK10G |
3855 NIG_MASK_SERDES0_LINK_STATUS |
3856 NIG_MASK_MI_INT));
3857
f1410647
ET
3858 /* activate nig drain */
3859 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3860
3861 /* disable nig egress interface */
3862 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3863 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3864
3865 /* Stop BigMac rx */
3866 bnx2x_bmac_rx_disable(bp);
3867
3868 /* disable emac */
3869 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3870
3871 msleep(10);
3872
3873 /* The PHY reset is controled by GPIO 1
3874 * Hold it as output low
3875 */
3876 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3877 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3878 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3879 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3880 DP(NETIF_MSG_LINK, "reset external PHY\n");
3881 }
a2fbb9ea
ET
3882
3883 /* reset the SerDes/XGXS */
3884 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3885 (0x1ff << (port*16)));
3886
f1410647
ET
3887 /* reset BigMac */
3888 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3889 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
a2fbb9ea 3890
f1410647
ET
3891 /* disable nig ingress interface */
3892 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
a2fbb9ea 3893 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
a2fbb9ea 3894
f1410647
ET
3895 /* set link down */
3896 bp->link_up = 0;
a2fbb9ea
ET
3897}
3898
3899#ifdef BNX2X_XGXS_LB
3900static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3901{
3902 int port = bp->port;
3903
3904 if (is_10g) {
3905 u32 md_devad;
3906
3907 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3908
3909 /* change the uni_phy_addr in the nig */
3910 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3911 &md_devad);
3912 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3913
3914 /* change the aer mmd */
3915 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3916 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3917
3918 /* config combo IEEE0 control reg for loopback */
3919 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3920 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3921 0x6041);
3922
3923 /* set aer mmd back */
3924 bnx2x_set_aer_mmd(bp);
3925
3926 /* and md_devad */
3927 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3928
3929 } else {
3930 u32 mii_control;
3931
3932 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3933
3934 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3935 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3936 &mii_control);
3937 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3938 (mii_control |
3939 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3940 }
3941}
3942#endif
3943
3944/* end of PHY/MAC */
3945
3946/* slow path */
3947
3948/*
3949 * General service functions
3950 */
3951
3952/* the slow path queue is odd since completions arrive on the fastpath ring */
3953static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3954 u32 data_hi, u32 data_lo, int common)
3955{
3956 int port = bp->port;
3957
3958 DP(NETIF_MSG_TIMER,
c14423fe 3959 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
3960 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3961 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3962 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3963
3964#ifdef BNX2X_STOP_ON_ERROR
3965 if (unlikely(bp->panic))
3966 return -EIO;
3967#endif
3968
3969 spin_lock(&bp->spq_lock);
3970
3971 if (!bp->spq_left) {
3972 BNX2X_ERR("BUG! SPQ ring full!\n");
3973 spin_unlock(&bp->spq_lock);
3974 bnx2x_panic();
3975 return -EBUSY;
3976 }
f1410647 3977
a2fbb9ea
ET
3978 /* CID needs port number to be encoded int it */
3979 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3980 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3981 HW_CID(bp, cid)));
3982 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3983 if (common)
3984 bp->spq_prod_bd->hdr.type |=
3985 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3986
3987 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3988 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3989
3990 bp->spq_left--;
3991
3992 if (bp->spq_prod_bd == bp->spq_last_bd) {
3993 bp->spq_prod_bd = bp->spq;
3994 bp->spq_prod_idx = 0;
3995 DP(NETIF_MSG_TIMER, "end of spq\n");
3996
3997 } else {
3998 bp->spq_prod_bd++;
3999 bp->spq_prod_idx++;
4000 }
4001
4002 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4003 bp->spq_prod_idx);
4004
4005 spin_unlock(&bp->spq_lock);
4006 return 0;
4007}
4008
4009/* acquire split MCP access lock register */
4010static int bnx2x_lock_alr(struct bnx2x *bp)
4011{
4012 int rc = 0;
4013 u32 i, j, val;
4014
4015 might_sleep();
4016 i = 100;
4017 for (j = 0; j < i*10; j++) {
4018 val = (1UL << 31);
4019 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4020 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4021 if (val & (1L << 31))
4022 break;
4023
4024 msleep(5);
4025 }
4026
4027 if (!(val & (1L << 31))) {
4028 BNX2X_ERR("Cannot acquire nvram interface\n");
4029
4030 rc = -EBUSY;
4031 }
4032
4033 return rc;
4034}
4035
4036/* Release split MCP access lock register */
4037static void bnx2x_unlock_alr(struct bnx2x *bp)
4038{
4039 u32 val = 0;
4040
4041 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4042}
4043
4044static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4045{
4046 struct host_def_status_block *def_sb = bp->def_status_blk;
4047 u16 rc = 0;
4048
4049 barrier(); /* status block is written to by the chip */
4050
4051 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4052 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4053 rc |= 1;
4054 }
4055 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4056 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4057 rc |= 2;
4058 }
4059 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4060 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4061 rc |= 4;
4062 }
4063 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4064 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4065 rc |= 8;
4066 }
4067 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4068 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4069 rc |= 16;
4070 }
4071 return rc;
4072}
4073
4074/*
4075 * slow path service functions
4076 */
4077
4078static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4079{
4080 int port = bp->port;
4081 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4082 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4083 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4084 u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4085 NIG_REG_MASK_INTERRUPT_PORT0;
4086
4087 if (~bp->aeu_mask & (asserted & 0xff))
4088 BNX2X_ERR("IGU ERROR\n");
4089 if (bp->attn_state & asserted)
4090 BNX2X_ERR("IGU ERROR\n");
4091
4092 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4093 bp->aeu_mask, asserted);
4094 bp->aeu_mask &= ~(asserted & 0xff);
4095 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4096
4097 REG_WR(bp, aeu_addr, bp->aeu_mask);
4098
4099 bp->attn_state |= asserted;
4100
4101 if (asserted & ATTN_HARD_WIRED_MASK) {
4102 if (asserted & ATTN_NIG_FOR_FUNC) {
4103 u32 nig_status_port;
4104 u32 nig_int_addr = port ?
4105 NIG_REG_STATUS_INTERRUPT_PORT1 :
4106 NIG_REG_STATUS_INTERRUPT_PORT0;
4107
4108 bp->nig_mask = REG_RD(bp, nig_mask_addr);
4109 REG_WR(bp, nig_mask_addr, 0);
4110
4111 nig_status_port = REG_RD(bp, nig_int_addr);
4112 bnx2x_link_update(bp);
4113
4114 /* handle unicore attn? */
4115 }
4116 if (asserted & ATTN_SW_TIMER_4_FUNC)
4117 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4118
4119 if (asserted & GPIO_2_FUNC)
4120 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4121
4122 if (asserted & GPIO_3_FUNC)
4123 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4124
4125 if (asserted & GPIO_4_FUNC)
4126 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4127
4128 if (port == 0) {
4129 if (asserted & ATTN_GENERAL_ATTN_1) {
4130 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4131 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4132 }
4133 if (asserted & ATTN_GENERAL_ATTN_2) {
4134 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4136 }
4137 if (asserted & ATTN_GENERAL_ATTN_3) {
4138 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4140 }
4141 } else {
4142 if (asserted & ATTN_GENERAL_ATTN_4) {
4143 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4144 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4145 }
4146 if (asserted & ATTN_GENERAL_ATTN_5) {
4147 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4148 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4149 }
4150 if (asserted & ATTN_GENERAL_ATTN_6) {
4151 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4152 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4153 }
4154 }
4155
4156 } /* if hardwired */
4157
4158 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4159 asserted, BAR_IGU_INTMEM + igu_addr);
4160 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4161
4162 /* now set back the mask */
4163 if (asserted & ATTN_NIG_FOR_FUNC)
4164 REG_WR(bp, nig_mask_addr, bp->nig_mask);
4165}
4166
4167static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4168{
4169 int port = bp->port;
4170 int index;
4171 struct attn_route attn;
4172 struct attn_route group_mask;
4173 u32 reg_addr;
4174 u32 val;
4175
4176 /* need to take HW lock because MCP or other port might also
4177 try to handle this event */
4178 bnx2x_lock_alr(bp);
4179
4180 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4181 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4182 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4183 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4184 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4185
4186 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4187 if (deasserted & (1 << index)) {
4188 group_mask = bp->attn_group[index];
4189
4190 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4191 (unsigned long long)group_mask.sig[0]);
4192
4193 if (attn.sig[3] & group_mask.sig[3] &
4194 EVEREST_GEN_ATTN_IN_USE_MASK) {
4195
4196 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
4197
4198 BNX2X_ERR("MC assert!\n");
4199 bnx2x_panic();
4200
4201 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
4202
4203 BNX2X_ERR("MCP assert!\n");
4204 REG_WR(bp,
4205 MISC_REG_AEU_GENERAL_ATTN_11, 0);
4206 bnx2x_mc_assert(bp);
4207
4208 } else {
4209 BNX2X_ERR("UNKOWEN HW ASSERT!\n");
4210 }
4211 }
4212
4213 if (attn.sig[1] & group_mask.sig[1] &
4214 BNX2X_DOORQ_ASSERT) {
4215
4216 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4217 BNX2X_ERR("DB hw attention 0x%x\n", val);
4218 /* DORQ discard attention */
4219 if (val & 0x2)
4220 BNX2X_ERR("FATAL error from DORQ\n");
4221 }
4222
4223 if (attn.sig[2] & group_mask.sig[2] &
4224 AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4225
4226 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4227 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4228 /* CFC error attention */
4229 if (val & 0x2)
4230 BNX2X_ERR("FATAL error from CFC\n");
4231 }
4232
4233 if (attn.sig[2] & group_mask.sig[2] &
4234 AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4235
4236 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4237 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4238 /* RQ_USDMDP_FIFO_OVERFLOW */
4239 if (val & 0x18000)
4240 BNX2X_ERR("FATAL error from PXP\n");
4241 }
4242
4243 if (attn.sig[3] & group_mask.sig[3] &
4244 EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4245
4246 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4247 0x7ff);
4248 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
4249 attn.sig[3]);
4250 }
4251
4252 if ((attn.sig[0] & group_mask.sig[0] &
4253 HW_INTERRUT_ASSERT_SET_0) ||
4254 (attn.sig[1] & group_mask.sig[1] &
4255 HW_INTERRUT_ASSERT_SET_1) ||
4256 (attn.sig[2] & group_mask.sig[2] &
4257 HW_INTERRUT_ASSERT_SET_2))
4258 BNX2X_ERR("FATAL HW block attention\n");
4259
4260 if ((attn.sig[0] & group_mask.sig[0] &
4261 HW_PRTY_ASSERT_SET_0) ||
4262 (attn.sig[1] & group_mask.sig[1] &
4263 HW_PRTY_ASSERT_SET_1) ||
4264 (attn.sig[2] & group_mask.sig[2] &
4265 HW_PRTY_ASSERT_SET_2))
c14423fe 4266 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
4267 }
4268 }
4269
4270 bnx2x_unlock_alr(bp);
4271
4272 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4273
4274 val = ~deasserted;
4275/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4276 val, BAR_IGU_INTMEM + reg_addr); */
4277 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4278
4279 if (bp->aeu_mask & (deasserted & 0xff))
4280 BNX2X_ERR("IGU BUG\n");
4281 if (~bp->attn_state & deasserted)
4282 BNX2X_ERR("IGU BUG\n");
4283
4284 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4285 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4286
4287 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4288 bp->aeu_mask |= (deasserted & 0xff);
4289
4290 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4291 REG_WR(bp, reg_addr, bp->aeu_mask);
4292
4293 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4294 bp->attn_state &= ~deasserted;
4295 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4296}
4297
4298static void bnx2x_attn_int(struct bnx2x *bp)
4299{
4300 /* read local copy of bits */
4301 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4302 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4303 u32 attn_state = bp->attn_state;
4304
4305 /* look for changed bits */
4306 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4307 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4308
4309 DP(NETIF_MSG_HW,
4310 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4311 attn_bits, attn_ack, asserted, deasserted);
4312
4313 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4314 BNX2X_ERR("bad attention state\n");
4315
4316 /* handle bits that were raised */
4317 if (asserted)
4318 bnx2x_attn_int_asserted(bp, asserted);
4319
4320 if (deasserted)
4321 bnx2x_attn_int_deasserted(bp, deasserted);
4322}
4323
4324static void bnx2x_sp_task(struct work_struct *work)
4325{
4326 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4327 u16 status;
4328
4329 /* Return here if interrupt is disabled */
4330 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4331 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4332 return;
4333 }
4334
4335 status = bnx2x_update_dsb_idx(bp);
4336 if (status == 0)
4337 BNX2X_ERR("spurious slowpath interrupt!\n");
4338
4339 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4340
4341 if (status & 0x1) {
4342 /* HW attentions */
4343 bnx2x_attn_int(bp);
4344 }
4345
4346 /* CStorm events: query_stats, cfc delete ramrods */
4347 if (status & 0x2)
4348 bp->stat_pending = 0;
4349
4350 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4351 IGU_INT_NOP, 1);
4352 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4353 IGU_INT_NOP, 1);
4354 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4355 IGU_INT_NOP, 1);
4356 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4357 IGU_INT_NOP, 1);
4358 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4359 IGU_INT_ENABLE, 1);
4360}
4361
4362static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4363{
4364 struct net_device *dev = dev_instance;
4365 struct bnx2x *bp = netdev_priv(dev);
4366
4367 /* Return here if interrupt is disabled */
4368 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4369 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4370 return IRQ_HANDLED;
4371 }
4372
4373 bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
4374
4375#ifdef BNX2X_STOP_ON_ERROR
4376 if (unlikely(bp->panic))
4377 return IRQ_HANDLED;
4378#endif
4379
4380 schedule_work(&bp->sp_task);
4381
4382 return IRQ_HANDLED;
4383}
4384
4385/* end of slow path */
4386
4387/* Statistics */
4388
4389/****************************************************************************
4390* Macros
4391****************************************************************************/
4392
4393#define UPDATE_STAT(s, t) \
4394 do { \
4395 estats->t += new->s - old->s; \
4396 old->s = new->s; \
4397 } while (0)
4398
4399/* sum[hi:lo] += add[hi:lo] */
4400#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4401 do { \
4402 s_lo += a_lo; \
4403 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4404 } while (0)
4405
4406/* difference = minuend - subtrahend */
4407#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4408 do { \
4409 if (m_lo < s_lo) { /* underflow */ \
4410 d_hi = m_hi - s_hi; \
4411 if (d_hi > 0) { /* we can 'loan' 1 */ \
4412 d_hi--; \
4413 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4414 } else { /* m_hi <= s_hi */ \
4415 d_hi = 0; \
4416 d_lo = 0; \
4417 } \
4418 } else { /* m_lo >= s_lo */ \
4419 if (m_hi < s_hi) { \
4420 d_hi = 0; \
4421 d_lo = 0; \
4422 } else { /* m_hi >= s_hi */ \
4423 d_hi = m_hi - s_hi; \
4424 d_lo = m_lo - s_lo; \
4425 } \
4426 } \
4427 } while (0)
4428
4429/* minuend -= subtrahend */
4430#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4431 do { \
4432 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4433 } while (0)
4434
4435#define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4436 do { \
4437 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4438 diff.lo, new->s_lo, old->s_lo); \
4439 old->s_hi = new->s_hi; \
4440 old->s_lo = new->s_lo; \
4441 ADD_64(estats->t_hi, diff.hi, \
4442 estats->t_lo, diff.lo); \
4443 } while (0)
4444
4445/* sum[hi:lo] += add */
4446#define ADD_EXTEND_64(s_hi, s_lo, a) \
4447 do { \
4448 s_lo += a; \
4449 s_hi += (s_lo < a) ? 1 : 0; \
4450 } while (0)
4451
4452#define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4453 do { \
4454 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4455 } while (0)
4456
4457#define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4458 do { \
4459 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4460 old_tclient->s = le32_to_cpu(tclient->s); \
4461 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4462 } while (0)
4463
4464/*
4465 * General service functions
4466 */
4467
4468static inline long bnx2x_hilo(u32 *hiref)
4469{
4470 u32 lo = *(hiref + 1);
4471#if (BITS_PER_LONG == 64)
4472 u32 hi = *hiref;
4473
4474 return HILO_U64(hi, lo);
4475#else
4476 return lo;
4477#endif
4478}
4479
4480/*
4481 * Init service functions
4482 */
4483
4484static void bnx2x_init_mac_stats(struct bnx2x *bp)
4485{
4486 struct dmae_command *dmae;
4487 int port = bp->port;
4488 int loader_idx = port * 8;
4489 u32 opcode;
4490 u32 mac_addr;
4491
4492 bp->executer_idx = 0;
4493 if (bp->fw_mb) {
4494 /* MCP */
4495 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4496 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4497#ifdef __BIG_ENDIAN
4498 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4499#else
4500 DMAE_CMD_ENDIANITY_DW_SWAP |
4501#endif
4502 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4503
4504 if (bp->link_up)
4505 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4506
4507 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4508 dmae->opcode = opcode;
4509 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4510 sizeof(u32));
4511 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4512 sizeof(u32));
4513 dmae->dst_addr_lo = bp->fw_mb >> 2;
4514 dmae->dst_addr_hi = 0;
4515 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4516 sizeof(u32)) >> 2;
4517 if (bp->link_up) {
4518 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4519 dmae->comp_addr_hi = 0;
4520 dmae->comp_val = 1;
4521 } else {
4522 dmae->comp_addr_lo = 0;
4523 dmae->comp_addr_hi = 0;
4524 dmae->comp_val = 0;
4525 }
4526 }
4527
4528 if (!bp->link_up) {
4529 /* no need to collect statistics in link down */
4530 return;
4531 }
4532
4533 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4534 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4535 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4536#ifdef __BIG_ENDIAN
4537 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4538#else
4539 DMAE_CMD_ENDIANITY_DW_SWAP |
4540#endif
4541 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4542
4543 if (bp->phy_flags & PHY_BMAC_FLAG) {
4544
4545 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4546 NIG_REG_INGRESS_BMAC0_MEM);
4547
4548 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4549 BIGMAC_REGISTER_TX_STAT_GTBYT */
4550 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4551 dmae->opcode = opcode;
4552 dmae->src_addr_lo = (mac_addr +
4553 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4554 dmae->src_addr_hi = 0;
4555 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4556 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4557 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4558 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4559 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4560 dmae->comp_addr_hi = 0;
4561 dmae->comp_val = 1;
4562
4563 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4564 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4565 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4566 dmae->opcode = opcode;
4567 dmae->src_addr_lo = (mac_addr +
4568 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4569 dmae->src_addr_hi = 0;
4570 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4571 offsetof(struct bmac_stats, rx_gr64));
4572 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4573 offsetof(struct bmac_stats, rx_gr64));
4574 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4575 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4576 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4577 dmae->comp_addr_hi = 0;
4578 dmae->comp_val = 1;
4579
4580 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4581
4582 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4583
4584 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4586 dmae->opcode = opcode;
4587 dmae->src_addr_lo = (mac_addr +
4588 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4589 dmae->src_addr_hi = 0;
4590 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4591 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4592 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4593 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4594 dmae->comp_addr_hi = 0;
4595 dmae->comp_val = 1;
4596
4597 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4598 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4599 dmae->opcode = opcode;
4600 dmae->src_addr_lo = (mac_addr +
4601 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4602 dmae->src_addr_hi = 0;
4603 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4604 offsetof(struct emac_stats,
4605 rx_falsecarriererrors));
4606 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4607 offsetof(struct emac_stats,
4608 rx_falsecarriererrors));
4609 dmae->len = 1;
4610 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4611 dmae->comp_addr_hi = 0;
4612 dmae->comp_val = 1;
4613
4614 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4615 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4616 dmae->opcode = opcode;
4617 dmae->src_addr_lo = (mac_addr +
4618 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4619 dmae->src_addr_hi = 0;
4620 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4621 offsetof(struct emac_stats,
4622 tx_ifhcoutoctets));
4623 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4624 offsetof(struct emac_stats,
4625 tx_ifhcoutoctets));
4626 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4627 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4628 dmae->comp_addr_hi = 0;
4629 dmae->comp_val = 1;
4630 }
4631
4632 /* NIG */
4633 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4634 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4635 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4636 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4637#ifdef __BIG_ENDIAN
4638 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4639#else
4640 DMAE_CMD_ENDIANITY_DW_SWAP |
4641#endif
4642 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4643 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4644 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4645 dmae->src_addr_hi = 0;
4646 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4647 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4648 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4649 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4650 offsetof(struct nig_stats, done));
4651 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4652 offsetof(struct nig_stats, done));
4653 dmae->comp_val = 0xffffffff;
4654}
4655
4656static void bnx2x_init_stats(struct bnx2x *bp)
4657{
4658 int port = bp->port;
4659
4660 bp->stats_state = STATS_STATE_DISABLE;
4661 bp->executer_idx = 0;
4662
4663 bp->old_brb_discard = REG_RD(bp,
4664 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4665
4666 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4667 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4668 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4669
4670 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4671 REG_WR(bp, BAR_XSTRORM_INTMEM +
4672 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4673
4674 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4675 REG_WR(bp, BAR_TSTRORM_INTMEM +
4676 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4677
4678 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4679 REG_WR(bp, BAR_CSTRORM_INTMEM +
4680 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4681
4682 REG_WR(bp, BAR_XSTRORM_INTMEM +
4683 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4684 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4685 REG_WR(bp, BAR_XSTRORM_INTMEM +
4686 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4687 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4688
4689 REG_WR(bp, BAR_TSTRORM_INTMEM +
4690 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4691 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4692 REG_WR(bp, BAR_TSTRORM_INTMEM +
4693 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4694 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4695}
4696
4697static void bnx2x_stop_stats(struct bnx2x *bp)
4698{
4699 might_sleep();
4700 if (bp->stats_state != STATS_STATE_DISABLE) {
4701 int timeout = 10;
4702
4703 bp->stats_state = STATS_STATE_STOP;
4704 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4705
4706 while (bp->stats_state != STATS_STATE_DISABLE) {
4707 if (!timeout) {
c14423fe 4708 BNX2X_ERR("timeout waiting for stats stop\n");
a2fbb9ea
ET
4709 break;
4710 }
4711 timeout--;
4712 msleep(100);
4713 }
4714 }
4715 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4716}
4717
4718/*
4719 * Statistics service functions
4720 */
4721
4722static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4723{
4724 struct regp diff;
4725 struct regp sum;
4726 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4727 struct bmac_stats *old = &bp->old_bmac;
4728 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4729
4730 sum.hi = 0;
4731 sum.lo = 0;
4732
4733 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4734 tx_gtbyt.lo, total_bytes_transmitted_lo);
4735
4736 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4737 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4738 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4739
4740 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4741 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4742 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4743
4744 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4745 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4746 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4747 estats->total_unicast_packets_transmitted_lo, sum.lo);
4748
4749 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4750 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4751 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4752 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4753 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4754 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4755 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4756 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4757 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4758 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4759 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4760
4761 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4762 UPDATE_STAT(rx_grund.lo, runt_packets_received);
4763 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4764 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4765 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4766 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4767 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4768 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4769
4770 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4771 rx_grerb.lo, stat_IfHCInBadOctets_lo);
4772 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4773 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4774 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4775 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4776 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4777}
4778
4779static void bnx2x_update_emac_stats(struct bnx2x *bp)
4780{
4781 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4782 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4783
4784 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4785 total_bytes_transmitted_lo);
4786 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4787 total_unicast_packets_transmitted_hi,
4788 total_unicast_packets_transmitted_lo);
4789 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4790 total_multicast_packets_transmitted_hi,
4791 total_multicast_packets_transmitted_lo);
4792 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4793 total_broadcast_packets_transmitted_hi,
4794 total_broadcast_packets_transmitted_lo);
4795
4796 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4797 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4798 estats->single_collision_transmit_frames +=
4799 new->tx_dot3statssinglecollisionframes;
4800 estats->multiple_collision_transmit_frames +=
4801 new->tx_dot3statsmultiplecollisionframes;
4802 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4803 estats->excessive_collision_frames +=
4804 new->tx_dot3statsexcessivecollisions;
4805 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4806 estats->frames_transmitted_65_127_bytes +=
4807 new->tx_etherstatspkts65octetsto127octets;
4808 estats->frames_transmitted_128_255_bytes +=
4809 new->tx_etherstatspkts128octetsto255octets;
4810 estats->frames_transmitted_256_511_bytes +=
4811 new->tx_etherstatspkts256octetsto511octets;
4812 estats->frames_transmitted_512_1023_bytes +=
4813 new->tx_etherstatspkts512octetsto1023octets;
4814 estats->frames_transmitted_1024_1522_bytes +=
4815 new->tx_etherstatspkts1024octetsto1522octet;
4816 estats->frames_transmitted_1523_9022_bytes +=
4817 new->tx_etherstatspktsover1522octets;
4818
4819 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4820 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4821 estats->false_carrier_detections += new->rx_falsecarriererrors;
4822 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4823 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4824 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4825 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4826 estats->control_frames_received += new->rx_maccontrolframesreceived;
4827 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4828 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4829
4830 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4831 stat_IfHCInBadOctets_lo);
4832 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4833 stat_IfHCOutBadOctets_lo);
4834 estats->stat_Dot3statsInternalMacTransmitErrors +=
4835 new->tx_dot3statsinternalmactransmiterrors;
4836 estats->stat_Dot3StatsCarrierSenseErrors +=
4837 new->rx_dot3statscarriersenseerrors;
4838 estats->stat_Dot3StatsDeferredTransmissions +=
4839 new->tx_dot3statsdeferredtransmissions;
4840 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4841 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4842}
4843
4844static int bnx2x_update_storm_stats(struct bnx2x *bp)
4845{
4846 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4847 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4848 struct tstorm_per_client_stats *tclient =
4849 &tstats->client_statistics[0];
4850 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4851 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4852 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4853 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4854 u32 diff;
4855
4856 /* are DMAE stats valid? */
4857 if (nstats->done != 0xffffffff) {
4858 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4859 return -1;
4860 }
4861
4862 /* are storm stats valid? */
4863 if (tstats->done.hi != 0xffffffff) {
4864 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4865 return -2;
4866 }
4867 if (xstats->done.hi != 0xffffffff) {
4868 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4869 return -3;
4870 }
4871
4872 estats->total_bytes_received_hi =
4873 estats->valid_bytes_received_hi =
4874 le32_to_cpu(tclient->total_rcv_bytes.hi);
4875 estats->total_bytes_received_lo =
4876 estats->valid_bytes_received_lo =
4877 le32_to_cpu(tclient->total_rcv_bytes.lo);
4878 ADD_64(estats->total_bytes_received_hi,
4879 le32_to_cpu(tclient->rcv_error_bytes.hi),
4880 estats->total_bytes_received_lo,
4881 le32_to_cpu(tclient->rcv_error_bytes.lo));
4882
4883 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4884 total_unicast_packets_received_hi,
4885 total_unicast_packets_received_lo);
4886 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4887 total_multicast_packets_received_hi,
4888 total_multicast_packets_received_lo);
4889 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4890 total_broadcast_packets_received_hi,
4891 total_broadcast_packets_received_lo);
4892
4893 estats->frames_received_64_bytes = MAC_STX_NA;
4894 estats->frames_received_65_127_bytes = MAC_STX_NA;
4895 estats->frames_received_128_255_bytes = MAC_STX_NA;
4896 estats->frames_received_256_511_bytes = MAC_STX_NA;
4897 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4898 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4899 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4900
4901 estats->x_total_sent_bytes_hi =
4902 le32_to_cpu(xstats->total_sent_bytes.hi);
4903 estats->x_total_sent_bytes_lo =
4904 le32_to_cpu(xstats->total_sent_bytes.lo);
4905 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4906
4907 estats->t_rcv_unicast_bytes_hi =
4908 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4909 estats->t_rcv_unicast_bytes_lo =
4910 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4911 estats->t_rcv_broadcast_bytes_hi =
4912 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4913 estats->t_rcv_broadcast_bytes_lo =
4914 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4915 estats->t_rcv_multicast_bytes_hi =
4916 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4917 estats->t_rcv_multicast_bytes_lo =
4918 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4919 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4920
4921 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4922 estats->packets_too_big_discard =
4923 le32_to_cpu(tclient->packets_too_big_discard);
4924 estats->jabber_packets_received = estats->packets_too_big_discard +
4925 estats->stat_Dot3statsFramesTooLong;
4926 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4927 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4928 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4929 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4930 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4931 estats->brb_truncate_discard =
4932 le32_to_cpu(tstats->brb_truncate_discard);
4933
4934 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4935 bp->old_brb_discard = nstats->brb_discard;
4936
4937 estats->brb_packet = nstats->brb_packet;
4938 estats->brb_truncate = nstats->brb_truncate;
4939 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4940 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4941 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4942 estats->mng_discard = nstats->mng_discard;
4943 estats->mng_octet_inp = nstats->mng_octet_inp;
4944 estats->mng_octet_out = nstats->mng_octet_out;
4945 estats->mng_packet_inp = nstats->mng_packet_inp;
4946 estats->mng_packet_out = nstats->mng_packet_out;
4947 estats->pbf_octets = nstats->pbf_octets;
4948 estats->pbf_packet = nstats->pbf_packet;
4949 estats->safc_inp = nstats->safc_inp;
4950
4951 xstats->done.hi = 0;
4952 tstats->done.hi = 0;
4953 nstats->done = 0;
4954
4955 return 0;
4956}
4957
4958static void bnx2x_update_net_stats(struct bnx2x *bp)
4959{
4960 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4961 struct net_device_stats *nstats = &bp->dev->stats;
4962
4963 nstats->rx_packets =
4964 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4965 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4966 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4967
4968 nstats->tx_packets =
4969 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4970 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4971 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4972
4973 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4974
0e39e645 4975 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4976
0e39e645 4977 nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
a2fbb9ea
ET
4978 nstats->tx_dropped = 0;
4979
4980 nstats->multicast =
4981 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4982
0e39e645
ET
4983 nstats->collisions = estats->single_collision_transmit_frames +
4984 estats->multiple_collision_transmit_frames +
4985 estats->late_collision_frames +
4986 estats->excessive_collision_frames;
a2fbb9ea
ET
4987
4988 nstats->rx_length_errors = estats->runt_packets_received +
4989 estats->jabber_packets_received;
0e39e645
ET
4990 nstats->rx_over_errors = estats->brb_discard +
4991 estats->brb_truncate_discard;
a2fbb9ea
ET
4992 nstats->rx_crc_errors = estats->crc_receive_errors;
4993 nstats->rx_frame_errors = estats->alignment_errors;
0e39e645 4994 nstats->rx_fifo_errors = estats->no_buff_discard;
a2fbb9ea
ET
4995 nstats->rx_missed_errors = estats->xxoverflow_discard;
4996
4997 nstats->rx_errors = nstats->rx_length_errors +
4998 nstats->rx_over_errors +
4999 nstats->rx_crc_errors +
5000 nstats->rx_frame_errors +
0e39e645
ET
5001 nstats->rx_fifo_errors +
5002 nstats->rx_missed_errors;
a2fbb9ea
ET
5003
5004 nstats->tx_aborted_errors = estats->late_collision_frames +
0e39e645 5005 estats->excessive_collision_frames;
a2fbb9ea
ET
5006 nstats->tx_carrier_errors = estats->false_carrier_detections;
5007 nstats->tx_fifo_errors = 0;
5008 nstats->tx_heartbeat_errors = 0;
5009 nstats->tx_window_errors = 0;
5010
5011 nstats->tx_errors = nstats->tx_aborted_errors +
5012 nstats->tx_carrier_errors;
5013
5014 estats->mac_stx_start = ++estats->mac_stx_end;
5015}
5016
5017static void bnx2x_update_stats(struct bnx2x *bp)
5018{
5019 int i;
5020
5021 if (!bnx2x_update_storm_stats(bp)) {
5022
5023 if (bp->phy_flags & PHY_BMAC_FLAG) {
5024 bnx2x_update_bmac_stats(bp);
5025
5026 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5027 bnx2x_update_emac_stats(bp);
5028
5029 } else { /* unreached */
5030 BNX2X_ERR("no MAC active\n");
5031 return;
5032 }
5033
5034 bnx2x_update_net_stats(bp);
5035 }
5036
5037 if (bp->msglevel & NETIF_MSG_TIMER) {
5038 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5039 struct net_device_stats *nstats = &bp->dev->stats;
5040
5041 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5042 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
5043 " tx pkt (%lx)\n",
5044 bnx2x_tx_avail(bp->fp),
5045 *bp->fp->tx_cons_sb, nstats->tx_packets);
5046 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
5047 " rx pkt (%lx)\n",
5048 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5049 *bp->fp->rx_cons_sb, nstats->rx_packets);
5050 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
5051 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5052 estats->driver_xoff, estats->brb_discard);
5053 printk(KERN_DEBUG "tstats: checksum_discard %u "
5054 "packets_too_big_discard %u no_buff_discard %u "
5055 "mac_discard %u mac_filter_discard %u "
5056 "xxovrflow_discard %u brb_truncate_discard %u "
5057 "ttl0_discard %u\n",
5058 estats->checksum_discard,
5059 estats->packets_too_big_discard,
5060 estats->no_buff_discard, estats->mac_discard,
5061 estats->mac_filter_discard, estats->xxoverflow_discard,
5062 estats->brb_truncate_discard, estats->ttl0_discard);
5063
5064 for_each_queue(bp, i) {
5065 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5066 bnx2x_fp(bp, i, tx_pkt),
5067 bnx2x_fp(bp, i, rx_pkt),
5068 bnx2x_fp(bp, i, rx_calls));
5069 }
5070 }
5071
5072 if (bp->state != BNX2X_STATE_OPEN) {
5073 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5074 return;
5075 }
5076
5077#ifdef BNX2X_STOP_ON_ERROR
5078 if (unlikely(bp->panic))
5079 return;
5080#endif
5081
5082 /* loader */
5083 if (bp->executer_idx) {
5084 struct dmae_command *dmae = &bp->dmae;
5085 int port = bp->port;
5086 int loader_idx = port * 8;
5087
5088 memset(dmae, 0, sizeof(struct dmae_command));
5089
5090 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5091 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5092 DMAE_CMD_DST_RESET |
5093#ifdef __BIG_ENDIAN
5094 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5095#else
5096 DMAE_CMD_ENDIANITY_DW_SWAP |
5097#endif
5098 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5099 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5100 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5101 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5102 sizeof(struct dmae_command) *
5103 (loader_idx + 1)) >> 2;
5104 dmae->dst_addr_hi = 0;
5105 dmae->len = sizeof(struct dmae_command) >> 2;
5106 dmae->len--; /* !!! for A0/1 only */
5107 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5108 dmae->comp_addr_hi = 0;
5109 dmae->comp_val = 1;
5110
5111 bnx2x_post_dmae(bp, dmae, loader_idx);
5112 }
5113
5114 if (bp->stats_state != STATS_STATE_ENABLE) {
5115 bp->stats_state = STATS_STATE_DISABLE;
5116 return;
5117 }
5118
5119 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5120 /* stats ramrod has it's own slot on the spe */
5121 bp->spq_left++;
5122 bp->stat_pending = 1;
5123 }
5124}
5125
5126static void bnx2x_timer(unsigned long data)
5127{
5128 struct bnx2x *bp = (struct bnx2x *) data;
5129
5130 if (!netif_running(bp->dev))
5131 return;
5132
5133 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5134 goto timer_restart;
a2fbb9ea
ET
5135
5136 if (poll) {
5137 struct bnx2x_fastpath *fp = &bp->fp[0];
5138 int rc;
5139
5140 bnx2x_tx_int(fp, 1000);
5141 rc = bnx2x_rx_int(fp, 1000);
5142 }
5143
f1410647 5144 if (!nomcp) {
a2fbb9ea
ET
5145 int port = bp->port;
5146 u32 drv_pulse;
5147 u32 mcp_pulse;
5148
5149 ++bp->fw_drv_pulse_wr_seq;
5150 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5151 /* TBD - add SYSTEM_TIME */
5152 drv_pulse = bp->fw_drv_pulse_wr_seq;
f1410647 5153 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
a2fbb9ea 5154
f1410647 5155 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
a2fbb9ea
ET
5156 MCP_PULSE_SEQ_MASK);
5157 /* The delta between driver pulse and mcp response
5158 * should be 1 (before mcp response) or 0 (after mcp response)
5159 */
5160 if ((drv_pulse != mcp_pulse) &&
5161 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5162 /* someone lost a heartbeat... */
5163 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5164 drv_pulse, mcp_pulse);
5165 }
5166 }
5167
5168 if (bp->stats_state == STATS_STATE_DISABLE)
f1410647 5169 goto timer_restart;
a2fbb9ea
ET
5170
5171 bnx2x_update_stats(bp);
5172
f1410647 5173timer_restart:
a2fbb9ea
ET
5174 mod_timer(&bp->timer, jiffies + bp->current_interval);
5175}
5176
5177/* end of Statistics */
5178
5179/* nic init */
5180
5181/*
5182 * nic init service functions
5183 */
5184
5185static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5186 dma_addr_t mapping, int id)
5187{
5188 int port = bp->port;
5189 u64 section;
5190 int index;
5191
5192 /* USTORM */
5193 section = ((u64)mapping) + offsetof(struct host_status_block,
5194 u_status_block);
5195 sb->u_status_block.status_block_id = id;
5196
5197 REG_WR(bp, BAR_USTRORM_INTMEM +
5198 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5199 REG_WR(bp, BAR_USTRORM_INTMEM +
5200 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5201 U64_HI(section));
5202
5203 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5204 REG_WR16(bp, BAR_USTRORM_INTMEM +
5205 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5206
5207 /* CSTORM */
5208 section = ((u64)mapping) + offsetof(struct host_status_block,
5209 c_status_block);
5210 sb->c_status_block.status_block_id = id;
5211
5212 REG_WR(bp, BAR_CSTRORM_INTMEM +
5213 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5214 REG_WR(bp, BAR_CSTRORM_INTMEM +
5215 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5216 U64_HI(section));
5217
5218 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5219 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5220 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5221
5222 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5223}
5224
5225static void bnx2x_init_def_sb(struct bnx2x *bp,
5226 struct host_def_status_block *def_sb,
5227 dma_addr_t mapping, int id)
5228{
5229 int port = bp->port;
5230 int index, val, reg_offset;
5231 u64 section;
5232
5233 /* ATTN */
5234 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5235 atten_status_block);
5236 def_sb->atten_status_block.status_block_id = id;
5237
49d66772
ET
5238 bp->def_att_idx = 0;
5239 bp->attn_state = 0;
5240
a2fbb9ea
ET
5241 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5242 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5243
5244 for (index = 0; index < 3; index++) {
5245 bp->attn_group[index].sig[0] = REG_RD(bp,
5246 reg_offset + 0x10*index);
5247 bp->attn_group[index].sig[1] = REG_RD(bp,
5248 reg_offset + 0x4 + 0x10*index);
5249 bp->attn_group[index].sig[2] = REG_RD(bp,
5250 reg_offset + 0x8 + 0x10*index);
5251 bp->attn_group[index].sig[3] = REG_RD(bp,
5252 reg_offset + 0xc + 0x10*index);
5253 }
5254
5255 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5256 MISC_REG_AEU_MASK_ATTN_FUNC_0));
5257
5258 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5259 HC_REG_ATTN_MSG0_ADDR_L);
5260
5261 REG_WR(bp, reg_offset, U64_LO(section));
5262 REG_WR(bp, reg_offset + 4, U64_HI(section));
5263
5264 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5265
5266 val = REG_RD(bp, reg_offset);
5267 val |= id;
5268 REG_WR(bp, reg_offset, val);
5269
5270 /* USTORM */
5271 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5272 u_def_status_block);
5273 def_sb->u_def_status_block.status_block_id = id;
5274
49d66772
ET
5275 bp->def_u_idx = 0;
5276
a2fbb9ea
ET
5277 REG_WR(bp, BAR_USTRORM_INTMEM +
5278 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5279 REG_WR(bp, BAR_USTRORM_INTMEM +
5280 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5281 U64_HI(section));
5282 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5283 BNX2X_BTR);
5284
5285 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5286 REG_WR16(bp, BAR_USTRORM_INTMEM +
5287 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5288
5289 /* CSTORM */
5290 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5291 c_def_status_block);
5292 def_sb->c_def_status_block.status_block_id = id;
5293
49d66772
ET
5294 bp->def_c_idx = 0;
5295
a2fbb9ea
ET
5296 REG_WR(bp, BAR_CSTRORM_INTMEM +
5297 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5298 REG_WR(bp, BAR_CSTRORM_INTMEM +
5299 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5300 U64_HI(section));
5301 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5302 BNX2X_BTR);
5303
5304 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5305 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5306 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5307
5308 /* TSTORM */
5309 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5310 t_def_status_block);
5311 def_sb->t_def_status_block.status_block_id = id;
5312
49d66772
ET
5313 bp->def_t_idx = 0;
5314
a2fbb9ea
ET
5315 REG_WR(bp, BAR_TSTRORM_INTMEM +
5316 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5317 REG_WR(bp, BAR_TSTRORM_INTMEM +
5318 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5319 U64_HI(section));
5320 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5321 BNX2X_BTR);
5322
5323 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5324 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5325 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5326
5327 /* XSTORM */
5328 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5329 x_def_status_block);
5330 def_sb->x_def_status_block.status_block_id = id;
5331
49d66772
ET
5332 bp->def_x_idx = 0;
5333
a2fbb9ea
ET
5334 REG_WR(bp, BAR_XSTRORM_INTMEM +
5335 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5336 REG_WR(bp, BAR_XSTRORM_INTMEM +
5337 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5338 U64_HI(section));
5339 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5340 BNX2X_BTR);
5341
5342 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5343 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5344 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5345
49d66772
ET
5346 bp->stat_pending = 0;
5347
a2fbb9ea
ET
5348 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5349}
5350
5351static void bnx2x_update_coalesce(struct bnx2x *bp)
5352{
5353 int port = bp->port;
5354 int i;
5355
5356 for_each_queue(bp, i) {
5357
5358 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5359 REG_WR8(bp, BAR_USTRORM_INTMEM +
5360 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5361 HC_INDEX_U_ETH_RX_CQ_CONS),
5362 bp->rx_ticks_int/12);
5363 REG_WR16(bp, BAR_USTRORM_INTMEM +
5364 USTORM_SB_HC_DISABLE_OFFSET(port, i,
5365 HC_INDEX_U_ETH_RX_CQ_CONS),
5366 bp->rx_ticks_int ? 0 : 1);
5367
5368 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5369 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5370 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5371 HC_INDEX_C_ETH_TX_CQ_CONS),
5372 bp->tx_ticks_int/12);
5373 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5374 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5375 HC_INDEX_C_ETH_TX_CQ_CONS),
5376 bp->tx_ticks_int ? 0 : 1);
5377 }
5378}
5379
5380static void bnx2x_init_rx_rings(struct bnx2x *bp)
5381{
5382 u16 ring_prod;
5383 int i, j;
5384 int port = bp->port;
5385
5386 bp->rx_buf_use_size = bp->dev->mtu;
5387
5388 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5389 bp->rx_buf_size = bp->rx_buf_use_size + 64;
5390
5391 for_each_queue(bp, j) {
5392 struct bnx2x_fastpath *fp = &bp->fp[j];
5393
5394 fp->rx_bd_cons = 0;
5395 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5396
5397 for (i = 1; i <= NUM_RX_RINGS; i++) {
5398 struct eth_rx_bd *rx_bd;
5399
5400 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5401 rx_bd->addr_hi =
5402 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5403 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5404 rx_bd->addr_lo =
5405 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5406 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5407
5408 }
5409
5410 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5411 struct eth_rx_cqe_next_page *nextpg;
5412
5413 nextpg = (struct eth_rx_cqe_next_page *)
5414 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5415 nextpg->addr_hi =
5416 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5417 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5418 nextpg->addr_lo =
5419 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5420 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5421 }
5422
5423 /* rx completion queue */
5424 fp->rx_comp_cons = ring_prod = 0;
5425
5426 for (i = 0; i < bp->rx_ring_size; i++) {
5427 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5428 BNX2X_ERR("was only able to allocate "
5429 "%d rx skbs\n", i);
5430 break;
5431 }
5432 ring_prod = NEXT_RX_IDX(ring_prod);
5433 BUG_TRAP(ring_prod > i);
5434 }
5435
5436 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
5437 fp->rx_pkt = fp->rx_calls = 0;
5438
c14423fe 5439 /* Warning! this will generate an interrupt (to the TSTORM) */
a2fbb9ea
ET
5440 /* must only be done when chip is initialized */
5441 REG_WR(bp, BAR_TSTRORM_INTMEM +
5442 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
5443 if (j != 0)
5444 continue;
5445
5446 REG_WR(bp, BAR_USTRORM_INTMEM +
5447 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
5448 U64_LO(fp->rx_comp_mapping));
5449 REG_WR(bp, BAR_USTRORM_INTMEM +
5450 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
5451 U64_HI(fp->rx_comp_mapping));
5452 }
5453}
5454
5455static void bnx2x_init_tx_ring(struct bnx2x *bp)
5456{
5457 int i, j;
5458
5459 for_each_queue(bp, j) {
5460 struct bnx2x_fastpath *fp = &bp->fp[j];
5461
5462 for (i = 1; i <= NUM_TX_RINGS; i++) {
5463 struct eth_tx_bd *tx_bd =
5464 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
5465
5466 tx_bd->addr_hi =
5467 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5468 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5469 tx_bd->addr_lo =
5470 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5471 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5472 }
5473
5474 fp->tx_pkt_prod = 0;
5475 fp->tx_pkt_cons = 0;
5476 fp->tx_bd_prod = 0;
5477 fp->tx_bd_cons = 0;
5478 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5479 fp->tx_pkt = 0;
5480 }
5481}
5482
5483static void bnx2x_init_sp_ring(struct bnx2x *bp)
5484{
5485 int port = bp->port;
5486
5487 spin_lock_init(&bp->spq_lock);
5488
5489 bp->spq_left = MAX_SPQ_PENDING;
5490 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5491 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5492 bp->spq_prod_bd = bp->spq;
5493 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5494
5495 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
5496 U64_LO(bp->spq_mapping));
5497 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
5498 U64_HI(bp->spq_mapping));
5499
5500 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
5501 bp->spq_prod_idx);
5502}
5503
5504static void bnx2x_init_context(struct bnx2x *bp)
5505{
5506 int i;
5507
5508 for_each_queue(bp, i) {
5509 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5510 struct bnx2x_fastpath *fp = &bp->fp[i];
5511
5512 context->xstorm_st_context.tx_bd_page_base_hi =
5513 U64_HI(fp->tx_desc_mapping);
5514 context->xstorm_st_context.tx_bd_page_base_lo =
5515 U64_LO(fp->tx_desc_mapping);
5516 context->xstorm_st_context.db_data_addr_hi =
5517 U64_HI(fp->tx_prods_mapping);
5518 context->xstorm_st_context.db_data_addr_lo =
5519 U64_LO(fp->tx_prods_mapping);
5520
5521 context->ustorm_st_context.rx_bd_page_base_hi =
5522 U64_HI(fp->rx_desc_mapping);
5523 context->ustorm_st_context.rx_bd_page_base_lo =
5524 U64_LO(fp->rx_desc_mapping);
5525 context->ustorm_st_context.status_block_id = i;
5526 context->ustorm_st_context.sb_index_number =
5527 HC_INDEX_U_ETH_RX_CQ_CONS;
5528 context->ustorm_st_context.rcq_base_address_hi =
5529 U64_HI(fp->rx_comp_mapping);
5530 context->ustorm_st_context.rcq_base_address_lo =
5531 U64_LO(fp->rx_comp_mapping);
5532 context->ustorm_st_context.flags =
5533 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
5534 context->ustorm_st_context.mc_alignment_size = 64;
5535 context->ustorm_st_context.num_rss = bp->num_queues;
5536
5537 context->cstorm_st_context.sb_index_number =
5538 HC_INDEX_C_ETH_TX_CQ_CONS;
5539 context->cstorm_st_context.status_block_id = i;
5540
5541 context->xstorm_ag_context.cdu_reserved =
5542 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5543 CDU_REGION_NUMBER_XCM_AG,
5544 ETH_CONNECTION_TYPE);
5545 context->ustorm_ag_context.cdu_usage =
5546 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5547 CDU_REGION_NUMBER_UCM_AG,
5548 ETH_CONNECTION_TYPE);
5549 }
5550}
5551
5552static void bnx2x_init_ind_table(struct bnx2x *bp)
5553{
5554 int port = bp->port;
5555 int i;
5556
5557 if (!is_multi(bp))
5558 return;
5559
5560 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5561 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
5562 i % bp->num_queues);
5563
5564 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5565}
5566
49d66772
ET
5567static void bnx2x_set_client_config(struct bnx2x *bp)
5568{
5569#ifdef BCM_VLAN
5570 int mode = bp->rx_mode;
5571#endif
5572 int i, port = bp->port;
5573 struct tstorm_eth_client_config tstorm_client = {0};
5574
5575 tstorm_client.mtu = bp->dev->mtu;
5576 tstorm_client.statistics_counter_id = 0;
5577 tstorm_client.config_flags =
5578 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5579#ifdef BCM_VLAN
5580 if (mode && bp->vlgrp) {
5581 tstorm_client.config_flags |=
5582 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5583 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5584 }
5585#endif
5586 if (mode != BNX2X_RX_MODE_PROMISC)
5587 tstorm_client.drop_flags =
5588 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
5589
5590 for_each_queue(bp, i) {
5591 REG_WR(bp, BAR_TSTRORM_INTMEM +
5592 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
5593 ((u32 *)&tstorm_client)[0]);
5594 REG_WR(bp, BAR_TSTRORM_INTMEM +
5595 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
5596 ((u32 *)&tstorm_client)[1]);
5597 }
5598
5599/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5600 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5601}
5602
a2fbb9ea
ET
5603static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5604{
5605 int mode = bp->rx_mode;
5606 int port = bp->port;
5607 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5608 int i;
5609
5610 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
5611
5612 switch (mode) {
5613 case BNX2X_RX_MODE_NONE: /* no Rx */
5614 tstorm_mac_filter.ucast_drop_all = 1;
5615 tstorm_mac_filter.mcast_drop_all = 1;
5616 tstorm_mac_filter.bcast_drop_all = 1;
5617 break;
5618 case BNX2X_RX_MODE_NORMAL:
5619 tstorm_mac_filter.bcast_accept_all = 1;
5620 break;
5621 case BNX2X_RX_MODE_ALLMULTI:
5622 tstorm_mac_filter.mcast_accept_all = 1;
5623 tstorm_mac_filter.bcast_accept_all = 1;
5624 break;
5625 case BNX2X_RX_MODE_PROMISC:
5626 tstorm_mac_filter.ucast_accept_all = 1;
5627 tstorm_mac_filter.mcast_accept_all = 1;
5628 tstorm_mac_filter.bcast_accept_all = 1;
5629 break;
5630 default:
5631 BNX2X_ERR("bad rx mode (%d)\n", mode);
5632 }
5633
5634 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5635 REG_WR(bp, BAR_TSTRORM_INTMEM +
5636 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
5637 ((u32 *)&tstorm_mac_filter)[i]);
5638
5639/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5640 ((u32 *)&tstorm_mac_filter)[i]); */
5641 }
a2fbb9ea 5642
49d66772
ET
5643 if (mode != BNX2X_RX_MODE_NONE)
5644 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5645}
5646
5647static void bnx2x_init_internal(struct bnx2x *bp)
5648{
5649 int port = bp->port;
5650 struct tstorm_eth_function_common_config tstorm_config = {0};
5651 struct stats_indication_flags stats_flags = {0};
a2fbb9ea
ET
5652
5653 if (is_multi(bp)) {
5654 tstorm_config.config_flags = MULTI_FLAGS;
5655 tstorm_config.rss_result_mask = MULTI_MASK;
5656 }
5657
5658 REG_WR(bp, BAR_TSTRORM_INTMEM +
5659 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
5660 (*(u32 *)&tstorm_config));
5661
5662/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
5663 (*(u32 *)&tstorm_config)); */
5664
c14423fe 5665 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5666 bnx2x_set_storm_rx_mode(bp);
5667
a2fbb9ea
ET
5668 stats_flags.collect_eth = cpu_to_le32(1);
5669
5670 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
5671 ((u32 *)&stats_flags)[0]);
5672 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
5673 ((u32 *)&stats_flags)[1]);
5674
5675 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
5676 ((u32 *)&stats_flags)[0]);
5677 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
5678 ((u32 *)&stats_flags)[1]);
5679
5680 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
5681 ((u32 *)&stats_flags)[0]);
5682 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
5683 ((u32 *)&stats_flags)[1]);
5684
5685/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
5686 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
5687}
5688
5689static void bnx2x_nic_init(struct bnx2x *bp)
5690{
5691 int i;
5692
5693 for_each_queue(bp, i) {
5694 struct bnx2x_fastpath *fp = &bp->fp[i];
5695
5696 fp->state = BNX2X_FP_STATE_CLOSED;
5697 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
5698 bp, fp->status_blk, i);
5699 fp->index = i;
5700 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
5701 }
5702
5703 bnx2x_init_def_sb(bp, bp->def_status_blk,
5704 bp->def_status_blk_mapping, 0x10);
5705 bnx2x_update_coalesce(bp);
5706 bnx2x_init_rx_rings(bp);
5707 bnx2x_init_tx_ring(bp);
5708 bnx2x_init_sp_ring(bp);
5709 bnx2x_init_context(bp);
5710 bnx2x_init_internal(bp);
5711 bnx2x_init_stats(bp);
5712 bnx2x_init_ind_table(bp);
5713 bnx2x_enable_int(bp);
5714
5715}
5716
5717/* end of nic init */
5718
5719/*
5720 * gzip service functions
5721 */
5722
5723static int bnx2x_gunzip_init(struct bnx2x *bp)
5724{
5725 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5726 &bp->gunzip_mapping);
5727 if (bp->gunzip_buf == NULL)
5728 goto gunzip_nomem1;
5729
5730 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5731 if (bp->strm == NULL)
5732 goto gunzip_nomem2;
5733
5734 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5735 GFP_KERNEL);
5736 if (bp->strm->workspace == NULL)
5737 goto gunzip_nomem3;
5738
5739 return 0;
5740
5741gunzip_nomem3:
5742 kfree(bp->strm);
5743 bp->strm = NULL;
5744
5745gunzip_nomem2:
5746 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5747 bp->gunzip_mapping);
5748 bp->gunzip_buf = NULL;
5749
5750gunzip_nomem1:
5751 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5752 " uncompression\n", bp->dev->name);
5753 return -ENOMEM;
5754}
5755
5756static void bnx2x_gunzip_end(struct bnx2x *bp)
5757{
5758 kfree(bp->strm->workspace);
5759
5760 kfree(bp->strm);
5761 bp->strm = NULL;
5762
5763 if (bp->gunzip_buf) {
5764 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5765 bp->gunzip_mapping);
5766 bp->gunzip_buf = NULL;
5767 }
5768}
5769
5770static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5771{
5772 int n, rc;
5773
5774 /* check gzip header */
5775 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5776 return -EINVAL;
5777
5778 n = 10;
5779
5780#define FNAME 0x8
5781
5782 if (zbuf[3] & FNAME)
5783 while ((zbuf[n++] != 0) && (n < len));
5784
5785 bp->strm->next_in = zbuf + n;
5786 bp->strm->avail_in = len - n;
5787 bp->strm->next_out = bp->gunzip_buf;
5788 bp->strm->avail_out = FW_BUF_SIZE;
5789
5790 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5791 if (rc != Z_OK)
5792 return rc;
5793
5794 rc = zlib_inflate(bp->strm, Z_FINISH);
5795 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5796 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5797 bp->dev->name, bp->strm->msg);
5798
5799 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5800 if (bp->gunzip_outlen & 0x3)
5801 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5802 " gunzip_outlen (%d) not aligned\n",
5803 bp->dev->name, bp->gunzip_outlen);
5804 bp->gunzip_outlen >>= 2;
5805
5806 zlib_inflateEnd(bp->strm);
5807
5808 if (rc == Z_STREAM_END)
5809 return 0;
5810
5811 return rc;
5812}
5813
5814/* nic load/unload */
5815
5816/*
5817 * general service functions
5818 */
5819
5820/* send a NIG loopback debug packet */
5821static void bnx2x_lb_pckt(struct bnx2x *bp)
5822{
5823#ifdef USE_DMAE
5824 u32 wb_write[3];
5825#endif
5826
5827 /* Ethernet source and destination addresses */
5828#ifdef USE_DMAE
5829 wb_write[0] = 0x55555555;
5830 wb_write[1] = 0x55555555;
5831 wb_write[2] = 0x20; /* SOP */
5832 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5833#else
5834 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5835 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5836 /* SOP */
5837 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5838#endif
5839
5840 /* NON-IP protocol */
5841#ifdef USE_DMAE
5842 wb_write[0] = 0x09000000;
5843 wb_write[1] = 0x55555555;
5844 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5845 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5846#else
5847 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5848 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5849 /* EOP, eop_bvalid = 0 */
5850 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5851#endif
5852}
5853
5854/* some of the internal memories
5855 * are not directly readable from the driver
5856 * to test them we send debug packets
5857 */
5858static int bnx2x_int_mem_test(struct bnx2x *bp)
5859{
5860 int factor;
5861 int count, i;
5862 u32 val = 0;
5863
5864 switch (CHIP_REV(bp)) {
5865 case CHIP_REV_EMUL:
5866 factor = 200;
5867 break;
5868 case CHIP_REV_FPGA:
5869 factor = 120;
5870 break;
5871 default:
5872 factor = 1;
5873 break;
5874 }
5875
5876 DP(NETIF_MSG_HW, "start part1\n");
5877
5878 /* Disable inputs of parser neighbor blocks */
5879 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5880 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5881 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5882 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5883
5884 /* Write 0 to parser credits for CFC search request */
5885 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5886
5887 /* send Ethernet packet */
5888 bnx2x_lb_pckt(bp);
5889
5890 /* TODO do i reset NIG statistic? */
5891 /* Wait until NIG register shows 1 packet of size 0x10 */
5892 count = 1000 * factor;
5893 while (count) {
5894#ifdef BNX2X_DMAE_RD
5895 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5896 val = *bnx2x_sp(bp, wb_data[0]);
5897#else
5898 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5899 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5900#endif
5901 if (val == 0x10)
5902 break;
5903
5904 msleep(10);
5905 count--;
5906 }
5907 if (val != 0x10) {
5908 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5909 return -1;
5910 }
5911
5912 /* Wait until PRS register shows 1 packet */
5913 count = 1000 * factor;
5914 while (count) {
5915 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5916
5917 if (val == 1)
5918 break;
5919
5920 msleep(10);
5921 count--;
5922 }
5923 if (val != 0x1) {
5924 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5925 return -2;
5926 }
5927
5928 /* Reset and init BRB, PRS */
5929 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
5930 msleep(50);
5931 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
5932 msleep(50);
5933 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5934 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5935
5936 DP(NETIF_MSG_HW, "part2\n");
5937
5938 /* Disable inputs of parser neighbor blocks */
5939 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5940 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5941 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5942 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5943
5944 /* Write 0 to parser credits for CFC search request */
5945 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5946
5947 /* send 10 Ethernet packets */
5948 for (i = 0; i < 10; i++)
5949 bnx2x_lb_pckt(bp);
5950
5951 /* Wait until NIG register shows 10 + 1
5952 packets of size 11*0x10 = 0xb0 */
5953 count = 1000 * factor;
5954 while (count) {
5955#ifdef BNX2X_DMAE_RD
5956 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5957 val = *bnx2x_sp(bp, wb_data[0]);
5958#else
5959 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5960 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5961#endif
5962 if (val == 0xb0)
5963 break;
5964
5965 msleep(10);
5966 count--;
5967 }
5968 if (val != 0xb0) {
5969 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5970 return -3;
5971 }
5972
5973 /* Wait until PRS register shows 2 packets */
5974 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5975 if (val != 2)
5976 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5977
5978 /* Write 1 to parser credits for CFC search request */
5979 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5980
5981 /* Wait until PRS register shows 3 packets */
5982 msleep(10 * factor);
5983 /* Wait until NIG register shows 1 packet of size 0x10 */
5984 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5985 if (val != 3)
5986 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5987
5988 /* clear NIG EOP FIFO */
5989 for (i = 0; i < 11; i++)
5990 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5991 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5992 if (val != 1) {
5993 BNX2X_ERR("clear of NIG failed\n");
5994 return -4;
5995 }
5996
5997 /* Reset and init BRB, PRS, NIG */
5998 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5999 msleep(50);
6000 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6001 msleep(50);
6002 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6003 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6004#ifndef BCM_ISCSI
6005 /* set NIC mode */
6006 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6007#endif
6008
6009 /* Enable inputs of parser neighbor blocks */
6010 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6011 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6012 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6013 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
6014
6015 DP(NETIF_MSG_HW, "done\n");
6016
6017 return 0; /* OK */
6018}
6019
6020static void enable_blocks_attention(struct bnx2x *bp)
6021{
6022 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6023 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6024 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6025 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6026 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6027 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6028 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6029 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6030 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6031/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6032/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6033 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6034 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6035 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6036/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6037/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6038 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6039 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6040 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6041 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6042/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6043/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6044 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
6045 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6046 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6047 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6048/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6049/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6050 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6051 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6052/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6053 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6054}
6055
6056static int bnx2x_function_init(struct bnx2x *bp, int mode)
6057{
6058 int func = bp->port;
6059 int port = func ? PORT1 : PORT0;
6060 u32 val, i;
6061#ifdef USE_DMAE
6062 u32 wb_write[2];
6063#endif
6064
6065 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
6066 if ((func != 0) && (func != 1)) {
6067 BNX2X_ERR("BAD function number (%d)\n", func);
6068 return -ENODEV;
6069 }
6070
6071 bnx2x_gunzip_init(bp);
6072
6073 if (mode & 0x1) { /* init common */
6074 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
6075 func, mode);
f1410647
ET
6076 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6077 0xffffffff);
6078 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6079 0xfffc);
a2fbb9ea
ET
6080 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
6081
6082 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6083 msleep(30);
6084 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6085
6086 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
6087 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
6088
6089 bnx2x_init_pxp(bp);
6090
6091 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6092 /* enable HW interrupt from PXP on USDM
6093 overflow bit 16 on INT_MASK_0 */
6094 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6095 }
6096
6097#ifdef __BIG_ENDIAN
6098 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6099 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6100 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6101 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6102 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6103 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
6104
6105/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6106 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6107 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6108 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6109 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6110#endif
6111
6112#ifndef BCM_ISCSI
6113 /* set NIC mode */
6114 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6115#endif
6116
6117 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
6118#ifdef BCM_ISCSI
6119 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6120 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6121 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6122#endif
6123
6124 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
6125
6126 /* let the HW do it's magic ... */
6127 msleep(100);
6128 /* finish PXP init
6129 (can be moved up if we want to use the DMAE) */
6130 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6131 if (val != 1) {
6132 BNX2X_ERR("PXP2 CFG failed\n");
6133 return -EBUSY;
6134 }
6135
6136 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6137 if (val != 1) {
6138 BNX2X_ERR("PXP2 RD_INIT failed\n");
6139 return -EBUSY;
6140 }
6141
6142 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6143 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6144
6145 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6146
6147 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
6148 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
6149 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
6150 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
6151
6152#ifdef BNX2X_DMAE_RD
6153 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6154 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6155 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6156 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6157#else
6158 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
6159 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
6160 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
6161 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
6162 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
6163 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
6164 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
6165 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
6166 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
6167 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
6168 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
6169 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
6170#endif
6171 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
c14423fe 6172 /* soft reset pulse */
a2fbb9ea
ET
6173 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6174 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6175
6176#ifdef BCM_ISCSI
6177 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
6178#endif
6179 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
6180 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
6181 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6182 /* enable hw interrupt from doorbell Q */
6183 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6184 }
6185
6186 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6187
6188 if (CHIP_REV_IS_SLOW(bp)) {
6189 /* fix for emulation and FPGA for no pause */
6190 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
6191 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
6192 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
6193 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
6194 }
6195
6196 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6197
6198 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
6199 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
6200 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
6201 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
6202
6203 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6204 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6205 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6206 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6207
6208 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
6209 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
6210 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
6211 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
6212
6213 /* sync semi rtc */
6214 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6215 0x80000000);
6216 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6217 0x80000000);
6218
6219 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
6220 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
6221 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
6222
6223 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6224 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6225 REG_WR(bp, i, 0xc0cac01a);
c14423fe 6226 /* TODO: replace with something meaningful */
a2fbb9ea
ET
6227 }
6228 /* SRCH COMMON comes here */
6229 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6230
6231 if (sizeof(union cdu_context) != 1024) {
6232 /* we currently assume that a context is 1024 bytes */
6233 printk(KERN_ALERT PFX "please adjust the size of"
6234 " cdu_context(%ld)\n",
6235 (long)sizeof(union cdu_context));
6236 }
6237 val = (4 << 24) + (0 << 12) + 1024;
6238 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6239 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
6240
6241 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
6242 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6243
6244 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
6245 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
6246 MISC_AEU_COMMON_END);
6247 /* RXPCS COMMON comes here */
6248 /* EMAC0 COMMON comes here */
6249 /* EMAC1 COMMON comes here */
6250 /* DBU COMMON comes here */
6251 /* DBG COMMON comes here */
6252 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
6253
6254 if (CHIP_REV_IS_SLOW(bp))
6255 msleep(200);
6256
6257 /* finish CFC init */
6258 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
6259 if (val != 1) {
6260 BNX2X_ERR("CFC LL_INIT failed\n");
6261 return -EBUSY;
6262 }
6263
6264 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
6265 if (val != 1) {
6266 BNX2X_ERR("CFC AC_INIT failed\n");
6267 return -EBUSY;
6268 }
6269
6270 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
6271 if (val != 1) {
6272 BNX2X_ERR("CFC CAM_INIT failed\n");
6273 return -EBUSY;
6274 }
6275
6276 REG_WR(bp, CFC_REG_DEBUG0, 0);
6277
6278 /* read NIG statistic
6279 to see if this is our first up since powerup */
6280#ifdef BNX2X_DMAE_RD
6281 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6282 val = *bnx2x_sp(bp, wb_data[0]);
6283#else
6284 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6285 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6286#endif
6287 /* do internal memory self test */
6288 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6289 BNX2X_ERR("internal mem selftest failed\n");
6290 return -EBUSY;
6291 }
6292
6293 /* clear PXP2 attentions */
6294 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
6295
6296 enable_blocks_attention(bp);
6297 /* enable_blocks_parity(bp); */
6298
f1410647
ET
6299 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6300 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6301 /* Fan failure is indicated by SPIO 5 */
6302 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6303 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6304
6305 /* set to active low mode */
6306 val = REG_RD(bp, MISC_REG_SPIO_INT);
6307 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6308 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6309 REG_WR(bp, MISC_REG_SPIO_INT, val);
6310
6311 /* enable interrupt to signal the IGU */
6312 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6313 val |= (1 << MISC_REGISTERS_SPIO_5);
6314 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6315 break;
6316
6317 default:
6318 break;
6319 }
6320
a2fbb9ea
ET
6321 } /* end of common init */
6322
6323 /* per port init */
6324
6325 /* the phys address is shifted right 12 bits and has an added
6326 1=valid bit added to the 53rd bit
6327 then since this is a wide register(TM)
6328 we split it into two 32 bit writes
6329 */
6330#define RQ_ONCHIP_AT_PORT_SIZE 384
6331#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6332#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6333#define PXP_ONE_ILT(x) ((x << 10) | x)
6334
6335 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
6336
6337 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
6338
6339 /* Port PXP comes here */
6340 /* Port PXP2 comes here */
6341
6342 /* Offset is
6343 * Port0 0
6344 * Port1 384 */
6345 i = func * RQ_ONCHIP_AT_PORT_SIZE;
6346#ifdef USE_DMAE
6347 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
6348 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
6349 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6350#else
6351 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
6352 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
6353 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
6354 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
6355#endif
6356 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
6357
6358#ifdef BCM_ISCSI
6359 /* Port0 1
6360 * Port1 385 */
6361 i++;
6362 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6363 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6364 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6365 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6366
6367 /* Port0 2
6368 * Port1 386 */
6369 i++;
6370 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6371 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6372 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6373 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6374
6375 /* Port0 3
6376 * Port1 387 */
6377 i++;
6378 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6379 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6380 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6381 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6382#endif
6383
6384 /* Port TCM comes here */
6385 /* Port UCM comes here */
6386 /* Port CCM comes here */
6387 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
6388 func ? XCM_PORT1_END : XCM_PORT0_END);
6389
6390#ifdef USE_DMAE
6391 wb_write[0] = 0;
6392 wb_write[1] = 0;
6393#endif
6394 for (i = 0; i < 32; i++) {
6395 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
6396#ifdef USE_DMAE
6397 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
6398#else
6399 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
6400 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
6401#endif
6402 }
6403 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
6404
6405 /* Port QM comes here */
6406
6407#ifdef BCM_ISCSI
6408 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6409 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6410
6411 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
6412 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
6413#endif
6414 /* Port DQ comes here */
6415 /* Port BRB1 comes here */
6416 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
6417 func ? PRS_PORT1_END : PRS_PORT0_END);
6418 /* Port TSDM comes here */
6419 /* Port CSDM comes here */
6420 /* Port USDM comes here */
6421 /* Port XSDM comes here */
6422 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
6423 func ? TSEM_PORT1_END : TSEM_PORT0_END);
6424 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
6425 func ? USEM_PORT1_END : USEM_PORT0_END);
6426 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
6427 func ? CSEM_PORT1_END : CSEM_PORT0_END);
6428 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
6429 func ? XSEM_PORT1_END : XSEM_PORT0_END);
6430 /* Port UPB comes here */
6431 /* Port XSDM comes here */
6432 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
6433 func ? PBF_PORT1_END : PBF_PORT0_END);
6434
6435 /* configure PBF to work without PAUSE mtu 9000 */
6436 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
6437
6438 /* update threshold */
6439 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
6440 /* update init credit */
6441 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
6442
6443 /* probe changes */
6444 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
6445 msleep(5);
6446 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
6447
6448#ifdef BCM_ISCSI
6449 /* tell the searcher where the T2 table is */
6450 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6451
6452 wb_write[0] = U64_LO(bp->t2_mapping);
6453 wb_write[1] = U64_HI(bp->t2_mapping);
6454 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6455 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6456 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6457 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6458
6459 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6460 /* Port SRCH comes here */
6461#endif
6462 /* Port CDU comes here */
6463 /* Port CFC comes here */
6464 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
6465 func ? HC_PORT1_END : HC_PORT0_END);
6466 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
6467 MISC_AEU_PORT0_START,
6468 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
6469 /* Port PXPCS comes here */
6470 /* Port EMAC0 comes here */
6471 /* Port EMAC1 comes here */
6472 /* Port DBU comes here */
6473 /* Port DBG comes here */
6474 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
6475 func ? NIG_PORT1_END : NIG_PORT0_END);
6476 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
6477 /* Port MCP comes here */
6478 /* Port DMAE comes here */
6479
f1410647
ET
6480 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6481 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6482 /* add SPIO 5 to group 0 */
6483 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6484 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6485 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6486 break;
6487
6488 default:
6489 break;
6490 }
6491
a2fbb9ea
ET
6492 bnx2x_link_reset(bp);
6493
c14423fe 6494 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6495 REG_WR(bp, 0x2114, 0xffffffff);
6496 REG_WR(bp, 0x2120, 0xffffffff);
6497 REG_WR(bp, 0x2814, 0xffffffff);
6498
6499 /* !!! move to init_values.h */
6500 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6501 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6502 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6503 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6504
6505 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
6506 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
6507 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
6508 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
6509
6510 bnx2x_gunzip_end(bp);
6511
6512 if (!nomcp) {
6513 port = bp->port;
6514
6515 bp->fw_drv_pulse_wr_seq =
f1410647 6516 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
a2fbb9ea 6517 DRV_PULSE_SEQ_MASK);
f1410647 6518 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
a2fbb9ea
ET
6519 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
6520 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
6521 } else {
6522 bp->fw_mb = 0;
6523 }
6524
6525 return 0;
6526}
6527
c14423fe 6528/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6529static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6530{
a2fbb9ea 6531 int port = bp->port;
f1410647
ET
6532 u32 seq = ++bp->fw_seq;
6533 u32 rc = 0;
a2fbb9ea 6534
f1410647
ET
6535 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
6536 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea
ET
6537
6538 /* let the FW do it's magic ... */
6539 msleep(100); /* TBD */
6540
6541 if (CHIP_REV_IS_SLOW(bp))
6542 msleep(900);
6543
f1410647 6544 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
a2fbb9ea
ET
6545 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
6546
6547 /* is this a reply to our command? */
6548 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6549 rc &= FW_MSG_CODE_MASK;
f1410647 6550
a2fbb9ea
ET
6551 } else {
6552 /* FW BUG! */
6553 BNX2X_ERR("FW failed to respond!\n");
6554 bnx2x_fw_dump(bp);
6555 rc = 0;
6556 }
f1410647 6557
a2fbb9ea
ET
6558 return rc;
6559}
6560
6561static void bnx2x_free_mem(struct bnx2x *bp)
6562{
6563
6564#define BNX2X_PCI_FREE(x, y, size) \
6565 do { \
6566 if (x) { \
6567 pci_free_consistent(bp->pdev, size, x, y); \
6568 x = NULL; \
6569 y = 0; \
6570 } \
6571 } while (0)
6572
6573#define BNX2X_FREE(x) \
6574 do { \
6575 if (x) { \
6576 vfree(x); \
6577 x = NULL; \
6578 } \
6579 } while (0)
6580
6581 int i;
6582
6583 /* fastpath */
6584 for_each_queue(bp, i) {
6585
6586 /* Status blocks */
6587 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6588 bnx2x_fp(bp, i, status_blk_mapping),
6589 sizeof(struct host_status_block) +
6590 sizeof(struct eth_tx_db_data));
6591
6592 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6593 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6594 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6595 bnx2x_fp(bp, i, tx_desc_mapping),
6596 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6597
6598 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6599 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6600 bnx2x_fp(bp, i, rx_desc_mapping),
6601 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6602
6603 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6604 bnx2x_fp(bp, i, rx_comp_mapping),
6605 sizeof(struct eth_fast_path_rx_cqe) *
6606 NUM_RCQ_BD);
6607 }
6608
6609 BNX2X_FREE(bp->fp);
6610
6611 /* end of fastpath */
6612
6613 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6614 (sizeof(struct host_def_status_block)));
6615
6616 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6617 (sizeof(struct bnx2x_slowpath)));
6618
6619#ifdef BCM_ISCSI
6620 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6621 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6622 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6623 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6624#endif
6625 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
6626
6627#undef BNX2X_PCI_FREE
6628#undef BNX2X_KFREE
6629}
6630
6631static int bnx2x_alloc_mem(struct bnx2x *bp)
6632{
6633
6634#define BNX2X_PCI_ALLOC(x, y, size) \
6635 do { \
6636 x = pci_alloc_consistent(bp->pdev, size, y); \
6637 if (x == NULL) \
6638 goto alloc_mem_err; \
6639 memset(x, 0, size); \
6640 } while (0)
6641
6642#define BNX2X_ALLOC(x, size) \
6643 do { \
6644 x = vmalloc(size); \
6645 if (x == NULL) \
6646 goto alloc_mem_err; \
6647 memset(x, 0, size); \
6648 } while (0)
6649
6650 int i;
6651
6652 /* fastpath */
6653 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
6654
6655 for_each_queue(bp, i) {
6656 bnx2x_fp(bp, i, bp) = bp;
6657
6658 /* Status blocks */
6659 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6660 &bnx2x_fp(bp, i, status_blk_mapping),
6661 sizeof(struct host_status_block) +
6662 sizeof(struct eth_tx_db_data));
6663
6664 bnx2x_fp(bp, i, hw_tx_prods) =
6665 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6666
6667 bnx2x_fp(bp, i, tx_prods_mapping) =
6668 bnx2x_fp(bp, i, status_blk_mapping) +
6669 sizeof(struct host_status_block);
6670
6671 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6672 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6673 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6674 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6675 &bnx2x_fp(bp, i, tx_desc_mapping),
6676 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6677
6678 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6679 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6680 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6681 &bnx2x_fp(bp, i, rx_desc_mapping),
6682 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6683
6684 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6685 &bnx2x_fp(bp, i, rx_comp_mapping),
6686 sizeof(struct eth_fast_path_rx_cqe) *
6687 NUM_RCQ_BD);
6688
6689 }
6690 /* end of fastpath */
6691
6692 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6693 sizeof(struct host_def_status_block));
6694
6695 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6696 sizeof(struct bnx2x_slowpath));
6697
6698#ifdef BCM_ISCSI
6699 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6700
6701 /* Initialize T1 */
6702 for (i = 0; i < 64*1024; i += 64) {
6703 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6704 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6705 }
6706
6707 /* allocate searcher T2 table
6708 we allocate 1/4 of alloc num for T2
6709 (which is not entered into the ILT) */
6710 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6711
6712 /* Initialize T2 */
6713 for (i = 0; i < 16*1024; i += 64)
6714 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6715
c14423fe 6716 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6717 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6718
6719 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6720 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6721
6722 /* QM queues (128*MAX_CONN) */
6723 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6724#endif
6725
6726 /* Slow path ring */
6727 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6728
6729 return 0;
6730
6731alloc_mem_err:
6732 bnx2x_free_mem(bp);
6733 return -ENOMEM;
6734
6735#undef BNX2X_PCI_ALLOC
6736#undef BNX2X_ALLOC
6737}
6738
6739static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6740{
6741 int i;
6742
6743 for_each_queue(bp, i) {
6744 struct bnx2x_fastpath *fp = &bp->fp[i];
6745
6746 u16 bd_cons = fp->tx_bd_cons;
6747 u16 sw_prod = fp->tx_pkt_prod;
6748 u16 sw_cons = fp->tx_pkt_cons;
6749
6750 BUG_TRAP(fp->tx_buf_ring != NULL);
6751
6752 while (sw_cons != sw_prod) {
6753 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6754 sw_cons++;
6755 }
6756 }
6757}
6758
6759static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6760{
6761 int i, j;
6762
6763 for_each_queue(bp, j) {
6764 struct bnx2x_fastpath *fp = &bp->fp[j];
6765
6766 BUG_TRAP(fp->rx_buf_ring != NULL);
6767
6768 for (i = 0; i < NUM_RX_BD; i++) {
6769 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6770 struct sk_buff *skb = rx_buf->skb;
6771
6772 if (skb == NULL)
6773 continue;
6774
6775 pci_unmap_single(bp->pdev,
6776 pci_unmap_addr(rx_buf, mapping),
6777 bp->rx_buf_use_size,
6778 PCI_DMA_FROMDEVICE);
6779
6780 rx_buf->skb = NULL;
6781 dev_kfree_skb(skb);
6782 }
6783 }
6784}
6785
6786static void bnx2x_free_skbs(struct bnx2x *bp)
6787{
6788 bnx2x_free_tx_skbs(bp);
6789 bnx2x_free_rx_skbs(bp);
6790}
6791
6792static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6793{
6794 int i;
6795
6796 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6797 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6798 bp->msix_table[0].vector);
6799
6800 for_each_queue(bp, i) {
c14423fe 6801 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
a2fbb9ea
ET
6802 "state(%x)\n", i, bp->msix_table[i + 1].vector,
6803 bnx2x_fp(bp, i, state));
6804
6805 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
6806
6807 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
6808 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
6809
6810 } else
6811 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
6812
6813 }
6814
6815}
6816
6817static void bnx2x_free_irq(struct bnx2x *bp)
6818{
6819
6820 if (bp->flags & USING_MSIX_FLAG) {
6821
6822 bnx2x_free_msix_irqs(bp);
6823 pci_disable_msix(bp->pdev);
6824
6825 bp->flags &= ~USING_MSIX_FLAG;
6826
6827 } else
6828 free_irq(bp->pdev->irq, bp->dev);
6829}
6830
6831static int bnx2x_enable_msix(struct bnx2x *bp)
6832{
6833
6834 int i;
6835
6836 bp->msix_table[0].entry = 0;
6837 for_each_queue(bp, i)
6838 bp->msix_table[i + 1].entry = i + 1;
6839
6840 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
6841 bp->num_queues + 1)){
6842 BNX2X_ERR("failed to enable msix\n");
6843 return -1;
6844
6845 }
6846
6847 bp->flags |= USING_MSIX_FLAG;
6848
6849 return 0;
6850
6851}
6852
6853
6854static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6855{
6856
a2fbb9ea
ET
6857 int i, rc;
6858
6859 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6860
6861 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6862 bp->dev->name, bp->dev);
6863
6864 if (rc) {
6865 BNX2X_ERR("request sp irq failed\n");
6866 return -EBUSY;
6867 }
6868
6869 for_each_queue(bp, i) {
6870 rc = request_irq(bp->msix_table[i + 1].vector,
6871 bnx2x_msix_fp_int, 0,
6872 bp->dev->name, &bp->fp[i]);
6873
6874 if (rc) {
6875 BNX2X_ERR("request fp #%d irq failed\n", i);
6876 bnx2x_free_msix_irqs(bp);
6877 return -EBUSY;
6878 }
6879
6880 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6881
6882 }
6883
6884 return 0;
6885
6886}
6887
6888static int bnx2x_req_irq(struct bnx2x *bp)
6889{
6890
6891 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6892 IRQF_SHARED, bp->dev->name, bp->dev);
6893 if (!rc)
6894 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6895
6896 return rc;
6897
6898}
6899
6900/*
6901 * Init service functions
6902 */
6903
6904static void bnx2x_set_mac_addr(struct bnx2x *bp)
6905{
6906 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6907
6908 /* CAM allocation
6909 * unicasts 0-31:port0 32-63:port1
6910 * multicast 64-127:port0 128-191:port1
6911 */
6912 config->hdr.length_6b = 2;
6913 config->hdr.offset = bp->port ? 31 : 0;
6914 config->hdr.reserved0 = 0;
6915 config->hdr.reserved1 = 0;
6916
6917 /* primary MAC */
6918 config->config_table[0].cam_entry.msb_mac_addr =
6919 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6920 config->config_table[0].cam_entry.middle_mac_addr =
6921 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6922 config->config_table[0].cam_entry.lsb_mac_addr =
6923 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6924 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
6925 config->config_table[0].target_table_entry.flags = 0;
6926 config->config_table[0].target_table_entry.client_id = 0;
6927 config->config_table[0].target_table_entry.vlan_id = 0;
6928
6929 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6930 config->config_table[0].cam_entry.msb_mac_addr,
6931 config->config_table[0].cam_entry.middle_mac_addr,
6932 config->config_table[0].cam_entry.lsb_mac_addr);
6933
6934 /* broadcast */
6935 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6936 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6937 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6938 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
6939 config->config_table[1].target_table_entry.flags =
6940 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6941 config->config_table[1].target_table_entry.client_id = 0;
6942 config->config_table[1].target_table_entry.vlan_id = 0;
6943
6944 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6945 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6946 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6947}
6948
6949static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6950 int *state_p, int poll)
6951{
6952 /* can take a while if any port is running */
6953 int timeout = 500;
6954
c14423fe
ET
6955 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6956 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6957
6958 might_sleep();
6959
6960 while (timeout) {
6961
6962 if (poll) {
6963 bnx2x_rx_int(bp->fp, 10);
6964 /* If index is different from 0
6965 * The reply for some commands will
6966 * be on the none default queue
6967 */
6968 if (idx)
6969 bnx2x_rx_int(&bp->fp[idx], 10);
6970 }
6971
6972 mb(); /* state is changed by bnx2x_sp_event()*/
6973
49d66772 6974 if (*state_p == state)
a2fbb9ea
ET
6975 return 0;
6976
6977 timeout--;
6978 msleep(1);
6979
6980 }
6981
a2fbb9ea 6982 /* timeout! */
49d66772
ET
6983 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6984 poll ? "polling" : "waiting", state, idx);
a2fbb9ea 6985
49d66772 6986 return -EBUSY;
a2fbb9ea
ET
6987}
6988
6989static int bnx2x_setup_leading(struct bnx2x *bp)
6990{
6991
c14423fe 6992 /* reset IGU state */
a2fbb9ea
ET
6993 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6994
6995 /* SETUP ramrod */
6996 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6997
6998 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6999
7000}
7001
7002static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7003{
7004
7005 /* reset IGU state */
7006 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7007
7008 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
7009 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
7010
7011 /* Wait for completion */
7012 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7013 &(bp->fp[index].state), 1);
7014
7015}
7016
7017
7018static int bnx2x_poll(struct napi_struct *napi, int budget);
7019static void bnx2x_set_rx_mode(struct net_device *dev);
7020
7021static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
7022{
7023 int rc;
7024 int i = 0;
7025
7026 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7027
7028 /* Send LOAD_REQUEST command to MCP.
7029 Returns the type of LOAD command: if it is the
7030 first port to be initialized common blocks should be
7031 initialized, otherwise - not.
7032 */
7033 if (!nomcp) {
7034 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7035 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7036 return -EBUSY; /* other port in diagnostic mode */
7037 }
7038 } else {
7039 rc = FW_MSG_CODE_DRV_LOAD_COMMON;
7040 }
7041
a2fbb9ea
ET
7042 /* if we can't use msix we only need one fp,
7043 * so try to enable msix with the requested number of fp's
7044 * and fallback to inta with one fp
7045 */
7046 if (req_irq) {
a2fbb9ea
ET
7047 if (use_inta) {
7048 bp->num_queues = 1;
7049 } else {
c14423fe 7050 if ((use_multi > 1) && (use_multi <= 16))
a2fbb9ea
ET
7051 /* user requested number */
7052 bp->num_queues = use_multi;
7053 else if (use_multi == 1)
7054 bp->num_queues = num_online_cpus();
7055 else
7056 bp->num_queues = 1;
7057
7058 if (bnx2x_enable_msix(bp)) {
c14423fe 7059 /* failed to enable msix */
a2fbb9ea
ET
7060 bp->num_queues = 1;
7061 if (use_multi)
c14423fe 7062 BNX2X_ERR("Multi requested but failed"
a2fbb9ea
ET
7063 " to enable MSI-X\n");
7064 }
7065 }
7066 }
7067
c14423fe
ET
7068 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7069
a2fbb9ea
ET
7070 if (bnx2x_alloc_mem(bp))
7071 return -ENOMEM;
7072
7073 if (req_irq) {
7074 if (bp->flags & USING_MSIX_FLAG) {
7075 if (bnx2x_req_msix_irqs(bp)) {
7076 pci_disable_msix(bp->pdev);
7077 goto out_error;
7078 }
7079
7080 } else {
7081 if (bnx2x_req_irq(bp)) {
7082 BNX2X_ERR("IRQ request failed, aborting\n");
7083 goto out_error;
7084 }
7085 }
7086 }
7087
7088 for_each_queue(bp, i)
7089 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7090 bnx2x_poll, 128);
7091
7092
7093 /* Initialize HW */
7094 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
7095 BNX2X_ERR("HW init failed, aborting\n");
7096 goto out_error;
7097 }
7098
7099
7100 atomic_set(&bp->intr_sem, 0);
7101
a2fbb9ea
ET
7102
7103 /* Setup NIC internals and enable interrupts */
7104 bnx2x_nic_init(bp);
7105
7106 /* Send LOAD_DONE command to MCP */
7107 if (!nomcp) {
7108 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7109 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
7110 if (!rc) {
7111 BNX2X_ERR("MCP response failure, unloading\n");
7112 goto int_disable;
7113 }
7114 }
7115
7116 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7117
7118 /* Enable Rx interrupt handling before sending the ramrod
7119 as it's completed on Rx FP queue */
7120 for_each_queue(bp, i)
7121 napi_enable(&bnx2x_fp(bp, i, napi));
7122
7123 if (bnx2x_setup_leading(bp))
7124 goto stop_netif;
7125
7126 for_each_nondefault_queue(bp, i)
7127 if (bnx2x_setup_multi(bp, i))
7128 goto stop_netif;
7129
7130 bnx2x_set_mac_addr(bp);
7131
7132 bnx2x_phy_init(bp);
7133
7134 /* Start fast path */
7135 if (req_irq) { /* IRQ is only requested from bnx2x_open */
7136 netif_start_queue(bp->dev);
7137 if (bp->flags & USING_MSIX_FLAG)
7138 printk(KERN_INFO PFX "%s: using MSI-X\n",
7139 bp->dev->name);
7140
7141 /* Otherwise Tx queue should be only reenabled */
7142 } else if (netif_running(bp->dev)) {
7143 netif_wake_queue(bp->dev);
7144 bnx2x_set_rx_mode(bp->dev);
7145 }
7146
7147 /* start the timer */
7148 mod_timer(&bp->timer, jiffies + bp->current_interval);
7149
7150 return 0;
7151
7152stop_netif:
7153 for_each_queue(bp, i)
7154 napi_disable(&bnx2x_fp(bp, i, napi));
7155
7156int_disable:
7157 bnx2x_disable_int_sync(bp);
7158
7159 bnx2x_free_skbs(bp);
7160 bnx2x_free_irq(bp);
7161
7162out_error:
7163 bnx2x_free_mem(bp);
7164
7165 /* TBD we really need to reset the chip
7166 if we want to recover from this */
7167 return rc;
7168}
7169
7170static void bnx2x_netif_stop(struct bnx2x *bp)
7171{
7172 int i;
7173
7174 bp->rx_mode = BNX2X_RX_MODE_NONE;
7175 bnx2x_set_storm_rx_mode(bp);
7176
7177 bnx2x_disable_int_sync(bp);
7178 bnx2x_link_reset(bp);
7179
7180 for_each_queue(bp, i)
7181 napi_disable(&bnx2x_fp(bp, i, napi));
7182
7183 if (netif_running(bp->dev)) {
7184 netif_tx_disable(bp->dev);
7185 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7186 }
7187}
7188
7189static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7190{
7191 int port = bp->port;
7192#ifdef USE_DMAE
7193 u32 wb_write[2];
7194#endif
7195 int base, i;
7196
7197 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
7198
7199 /* Do not rcv packets to BRB */
7200 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7201 /* Do not direct rcv packets that are not for MCP to the BRB */
7202 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7203 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7204
7205 /* Configure IGU and AEU */
7206 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
7207 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7208
7209 /* TODO: Close Doorbell port? */
7210
7211 /* Clear ILT */
7212#ifdef USE_DMAE
7213 wb_write[0] = 0;
7214 wb_write[1] = 0;
7215#endif
7216 base = port * RQ_ONCHIP_AT_PORT_SIZE;
7217 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
7218#ifdef USE_DMAE
7219 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
7220#else
7221 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
7222 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
7223#endif
7224 }
7225
7226 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7227 /* reset_common */
7228 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7229 0xd3ffff7f);
7230 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7231 0x1403);
7232 }
7233}
7234
7235static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7236{
7237
7238 int rc;
7239
c14423fe 7240 /* halt the connection */
a2fbb9ea
ET
7241 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
7242 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
7243
7244
7245 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7246 &(bp->fp[index].state), 1);
c14423fe 7247 if (rc) /* timeout */
a2fbb9ea
ET
7248 return rc;
7249
7250 /* delete cfc entry */
7251 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7252
49d66772 7253 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
a2fbb9ea
ET
7254 &(bp->fp[index].state), 1);
7255
7256}
7257
7258
7259static void bnx2x_stop_leading(struct bnx2x *bp)
7260{
49d66772 7261 u16 dsb_sp_prod_idx;
c14423fe 7262 /* if the other port is handling traffic,
a2fbb9ea
ET
7263 this can take a lot of time */
7264 int timeout = 500;
7265
7266 might_sleep();
7267
7268 /* Send HALT ramrod */
7269 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7270 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
7271
7272 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7273 &(bp->fp[0].state), 1))
7274 return;
7275
49d66772 7276 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea
ET
7277
7278 /* Send CFC_DELETE ramrod */
7279 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7280
49d66772 7281 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7282 we are going to reset the chip anyway
7283 so there is not much to do if this times out
7284 */
49d66772
ET
7285 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
7286 timeout--;
7287 msleep(1);
a2fbb9ea 7288 }
49d66772
ET
7289 if (!timeout) {
7290 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
7291 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7292 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7293 }
7294 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7295 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
7296}
7297
49d66772 7298
a2fbb9ea
ET
7299static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
7300{
7301 u32 reset_code = 0;
7302 int rc;
7303 int i;
7304
7305 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7306
7307 /* Calling flush_scheduled_work() may deadlock because
7308 * linkwatch_event() may be on the workqueue and it will try to get
7309 * the rtnl_lock which we are holding.
7310 */
7311
7312 while (bp->in_reset_task)
7313 msleep(1);
7314
7315 /* Delete the timer: do it before disabling interrupts, as it
c14423fe 7316 may be still STAT_QUERY ramrod pending after stopping the timer */
a2fbb9ea
ET
7317 del_timer_sync(&bp->timer);
7318
7319 /* Wait until stat ramrod returns and all SP tasks complete */
7320 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
7321 msleep(1);
7322
7323 /* Stop fast path, disable MAC, disable interrupts, disable napi */
7324 bnx2x_netif_stop(bp);
7325
7326 if (bp->flags & NO_WOL_FLAG)
7327 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7328 else if (bp->wol) {
7329 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
7330 u8 *mac_addr = bp->dev->dev_addr;
7331 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
7332 EMAC_MODE_ACPI_RCVD);
7333
7334 EMAC_WR(EMAC_REG_EMAC_MODE, val);
7335
7336 val = (mac_addr[0] << 8) | mac_addr[1];
7337 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
7338
7339 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7340 (mac_addr[4] << 8) | mac_addr[5];
7341 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
7342
7343 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7344 } else
7345 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7346
7347 for_each_nondefault_queue(bp, i)
7348 if (bnx2x_stop_multi(bp, i))
7349 goto error;
7350
7351
7352 bnx2x_stop_leading(bp);
7353
7354error:
7355 if (!nomcp)
7356 rc = bnx2x_fw_command(bp, reset_code);
7357 else
7358 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7359
7360 /* Release IRQs */
7361 if (fre_irq)
7362 bnx2x_free_irq(bp);
7363
7364 /* Reset the chip */
7365 bnx2x_reset_chip(bp, rc);
7366
7367 /* Report UNLOAD_DONE to MCP */
7368 if (!nomcp)
7369 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7370
7371 /* Free SKBs and driver internals */
7372 bnx2x_free_skbs(bp);
7373 bnx2x_free_mem(bp);
7374
7375 bp->state = BNX2X_STATE_CLOSED;
7376 /* Set link down */
7377 bp->link_up = 0;
7378 netif_carrier_off(bp->dev);
7379
7380 return 0;
7381}
7382
7383/* end of nic load/unload */
7384
7385/* ethtool_ops */
7386
7387/*
7388 * Init service functions
7389 */
7390
7391static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
7392{
7393 int port = bp->port;
7394 u32 ext_phy_type;
7395
7396 bp->phy_flags = 0;
7397
7398 switch (switch_cfg) {
7399 case SWITCH_CFG_1G:
7400 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7401
7402 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
7403 switch (ext_phy_type) {
7404 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7405 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7406 ext_phy_type);
7407
7408 bp->supported |= (SUPPORTED_10baseT_Half |
7409 SUPPORTED_10baseT_Full |
7410 SUPPORTED_100baseT_Half |
7411 SUPPORTED_100baseT_Full |
7412 SUPPORTED_1000baseT_Full |
f1410647 7413 SUPPORTED_2500baseX_Full |
a2fbb9ea
ET
7414 SUPPORTED_TP | SUPPORTED_FIBRE |
7415 SUPPORTED_Autoneg |
7416 SUPPORTED_Pause |
7417 SUPPORTED_Asym_Pause);
7418 break;
7419
7420 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7421 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7422 ext_phy_type);
7423
7424 bp->phy_flags |= PHY_SGMII_FLAG;
7425
f1410647
ET
7426 bp->supported |= (SUPPORTED_10baseT_Half |
7427 SUPPORTED_10baseT_Full |
7428 SUPPORTED_100baseT_Half |
7429 SUPPORTED_100baseT_Full |
a2fbb9ea
ET
7430 SUPPORTED_1000baseT_Full |
7431 SUPPORTED_TP | SUPPORTED_FIBRE |
7432 SUPPORTED_Autoneg |
7433 SUPPORTED_Pause |
7434 SUPPORTED_Asym_Pause);
7435 break;
7436
7437 default:
7438 BNX2X_ERR("NVRAM config error. "
7439 "BAD SerDes ext_phy_config 0x%x\n",
7440 bp->ext_phy_config);
7441 return;
7442 }
7443
7444 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7445 port*0x10);
7446 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7447 break;
7448
7449 case SWITCH_CFG_10G:
7450 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7451
7452 bp->phy_flags |= PHY_XGXS_FLAG;
7453
7454 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7455 switch (ext_phy_type) {
7456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7457 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7458 ext_phy_type);
7459
7460 bp->supported |= (SUPPORTED_10baseT_Half |
7461 SUPPORTED_10baseT_Full |
7462 SUPPORTED_100baseT_Half |
7463 SUPPORTED_100baseT_Full |
7464 SUPPORTED_1000baseT_Full |
f1410647 7465 SUPPORTED_2500baseX_Full |
a2fbb9ea
ET
7466 SUPPORTED_10000baseT_Full |
7467 SUPPORTED_TP | SUPPORTED_FIBRE |
7468 SUPPORTED_Autoneg |
7469 SUPPORTED_Pause |
7470 SUPPORTED_Asym_Pause);
7471 break;
7472
7473 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647
ET
7474 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7475 ext_phy_type);
7476
7477 bp->supported |= (SUPPORTED_10000baseT_Full |
7478 SUPPORTED_FIBRE |
7479 SUPPORTED_Pause |
7480 SUPPORTED_Asym_Pause);
7481 break;
7482
a2fbb9ea 7483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7484 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7485 ext_phy_type);
7486
7487 bp->supported |= (SUPPORTED_10000baseT_Full |
7488 SUPPORTED_1000baseT_Full |
7489 SUPPORTED_Autoneg |
7490 SUPPORTED_FIBRE |
7491 SUPPORTED_Pause |
7492 SUPPORTED_Asym_Pause);
7493 break;
7494
7495 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7496 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7497 ext_phy_type);
7498
7499 bp->supported |= (SUPPORTED_10000baseT_Full |
f1410647 7500 SUPPORTED_1000baseT_Full |
a2fbb9ea 7501 SUPPORTED_FIBRE |
f1410647
ET
7502 SUPPORTED_Autoneg |
7503 SUPPORTED_Pause |
7504 SUPPORTED_Asym_Pause);
7505 break;
7506
7507 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7508 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7509 ext_phy_type);
7510
7511 bp->supported |= (SUPPORTED_10000baseT_Full |
7512 SUPPORTED_TP |
7513 SUPPORTED_Autoneg |
a2fbb9ea
ET
7514 SUPPORTED_Pause |
7515 SUPPORTED_Asym_Pause);
7516 break;
7517
7518 default:
7519 BNX2X_ERR("NVRAM config error. "
7520 "BAD XGXS ext_phy_config 0x%x\n",
7521 bp->ext_phy_config);
7522 return;
7523 }
7524
7525 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7526 port*0x18);
7527 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7528
7529 bp->ser_lane = ((bp->lane_config &
7530 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
7531 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
7532 bp->rx_lane_swap = ((bp->lane_config &
7533 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
7534 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
7535 bp->tx_lane_swap = ((bp->lane_config &
7536 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
7537 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
7538 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
7539 bp->rx_lane_swap, bp->tx_lane_swap);
7540 break;
7541
7542 default:
7543 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7544 bp->link_config);
7545 return;
7546 }
7547
7548 /* mask what we support according to speed_cap_mask */
7549 if (!(bp->speed_cap_mask &
7550 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7551 bp->supported &= ~SUPPORTED_10baseT_Half;
7552
7553 if (!(bp->speed_cap_mask &
7554 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7555 bp->supported &= ~SUPPORTED_10baseT_Full;
7556
7557 if (!(bp->speed_cap_mask &
7558 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7559 bp->supported &= ~SUPPORTED_100baseT_Half;
7560
7561 if (!(bp->speed_cap_mask &
7562 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7563 bp->supported &= ~SUPPORTED_100baseT_Full;
7564
7565 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7566 bp->supported &= ~(SUPPORTED_1000baseT_Half |
7567 SUPPORTED_1000baseT_Full);
7568
7569 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
f1410647 7570 bp->supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea
ET
7571
7572 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7573 bp->supported &= ~SUPPORTED_10000baseT_Full;
7574
7575 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
7576}
7577
7578static void bnx2x_link_settings_requested(struct bnx2x *bp)
7579{
7580 bp->req_autoneg = 0;
7581 bp->req_duplex = DUPLEX_FULL;
7582
7583 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7584 case PORT_FEATURE_LINK_SPEED_AUTO:
7585 if (bp->supported & SUPPORTED_Autoneg) {
7586 bp->req_autoneg |= AUTONEG_SPEED;
7587 bp->req_line_speed = 0;
7588 bp->advertising = bp->supported;
7589 } else {
f1410647
ET
7590 if (XGXS_EXT_PHY_TYPE(bp) ==
7591 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
a2fbb9ea
ET
7592 /* force 10G, no AN */
7593 bp->req_line_speed = SPEED_10000;
7594 bp->advertising =
7595 (ADVERTISED_10000baseT_Full |
7596 ADVERTISED_FIBRE);
7597 break;
7598 }
7599 BNX2X_ERR("NVRAM config error. "
7600 "Invalid link_config 0x%x"
7601 " Autoneg not supported\n",
7602 bp->link_config);
7603 return;
7604 }
7605 break;
7606
7607 case PORT_FEATURE_LINK_SPEED_10M_FULL:
f1410647 7608 if (bp->supported & SUPPORTED_10baseT_Full) {
a2fbb9ea
ET
7609 bp->req_line_speed = SPEED_10;
7610 bp->advertising = (ADVERTISED_10baseT_Full |
7611 ADVERTISED_TP);
7612 } else {
7613 BNX2X_ERR("NVRAM config error. "
7614 "Invalid link_config 0x%x"
7615 " speed_cap_mask 0x%x\n",
7616 bp->link_config, bp->speed_cap_mask);
7617 return;
7618 }
7619 break;
7620
7621 case PORT_FEATURE_LINK_SPEED_10M_HALF:
f1410647 7622 if (bp->supported & SUPPORTED_10baseT_Half) {
a2fbb9ea
ET
7623 bp->req_line_speed = SPEED_10;
7624 bp->req_duplex = DUPLEX_HALF;
7625 bp->advertising = (ADVERTISED_10baseT_Half |
7626 ADVERTISED_TP);
7627 } else {
7628 BNX2X_ERR("NVRAM config error. "
7629 "Invalid link_config 0x%x"
7630 " speed_cap_mask 0x%x\n",
7631 bp->link_config, bp->speed_cap_mask);
7632 return;
7633 }
7634 break;
7635
7636 case PORT_FEATURE_LINK_SPEED_100M_FULL:
f1410647 7637 if (bp->supported & SUPPORTED_100baseT_Full) {
a2fbb9ea
ET
7638 bp->req_line_speed = SPEED_100;
7639 bp->advertising = (ADVERTISED_100baseT_Full |
7640 ADVERTISED_TP);
7641 } else {
7642 BNX2X_ERR("NVRAM config error. "
7643 "Invalid link_config 0x%x"
7644 " speed_cap_mask 0x%x\n",
7645 bp->link_config, bp->speed_cap_mask);
7646 return;
7647 }
7648 break;
7649
7650 case PORT_FEATURE_LINK_SPEED_100M_HALF:
f1410647 7651 if (bp->supported & SUPPORTED_100baseT_Half) {
a2fbb9ea
ET
7652 bp->req_line_speed = SPEED_100;
7653 bp->req_duplex = DUPLEX_HALF;
7654 bp->advertising = (ADVERTISED_100baseT_Half |
7655 ADVERTISED_TP);
7656 } else {
7657 BNX2X_ERR("NVRAM config error. "
7658 "Invalid link_config 0x%x"
7659 " speed_cap_mask 0x%x\n",
7660 bp->link_config, bp->speed_cap_mask);
7661 return;
7662 }
7663 break;
7664
7665 case PORT_FEATURE_LINK_SPEED_1G:
f1410647 7666 if (bp->supported & SUPPORTED_1000baseT_Full) {
a2fbb9ea
ET
7667 bp->req_line_speed = SPEED_1000;
7668 bp->advertising = (ADVERTISED_1000baseT_Full |
7669 ADVERTISED_TP);
7670 } else {
7671 BNX2X_ERR("NVRAM config error. "
7672 "Invalid link_config 0x%x"
7673 " speed_cap_mask 0x%x\n",
7674 bp->link_config, bp->speed_cap_mask);
7675 return;
7676 }
7677 break;
7678
7679 case PORT_FEATURE_LINK_SPEED_2_5G:
f1410647 7680 if (bp->supported & SUPPORTED_2500baseX_Full) {
a2fbb9ea 7681 bp->req_line_speed = SPEED_2500;
f1410647 7682 bp->advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7683 ADVERTISED_TP);
7684 } else {
7685 BNX2X_ERR("NVRAM config error. "
7686 "Invalid link_config 0x%x"
7687 " speed_cap_mask 0x%x\n",
7688 bp->link_config, bp->speed_cap_mask);
7689 return;
7690 }
7691 break;
7692
7693 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7694 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7695 case PORT_FEATURE_LINK_SPEED_10G_KR:
f1410647 7696 if (bp->supported & SUPPORTED_10000baseT_Full) {
a2fbb9ea
ET
7697 bp->req_line_speed = SPEED_10000;
7698 bp->advertising = (ADVERTISED_10000baseT_Full |
7699 ADVERTISED_FIBRE);
7700 } else {
7701 BNX2X_ERR("NVRAM config error. "
7702 "Invalid link_config 0x%x"
7703 " speed_cap_mask 0x%x\n",
7704 bp->link_config, bp->speed_cap_mask);
7705 return;
7706 }
7707 break;
7708
7709 default:
7710 BNX2X_ERR("NVRAM config error. "
7711 "BAD link speed link_config 0x%x\n",
7712 bp->link_config);
7713 bp->req_autoneg |= AUTONEG_SPEED;
7714 bp->req_line_speed = 0;
7715 bp->advertising = bp->supported;
7716 break;
7717 }
7718 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
7719 bp->req_line_speed, bp->req_duplex);
7720
7721 bp->req_flow_ctrl = (bp->link_config &
7722 PORT_FEATURE_FLOW_CONTROL_MASK);
f1410647
ET
7723 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
7724 (bp->supported & SUPPORTED_Autoneg))
a2fbb9ea 7725 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
a2fbb9ea 7726
f1410647
ET
7727 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
7728 " advertising 0x%x\n",
7729 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
a2fbb9ea
ET
7730}
7731
7732static void bnx2x_get_hwinfo(struct bnx2x *bp)
7733{
7734 u32 val, val2, val3, val4, id;
7735 int port = bp->port;
7736 u32 switch_cfg;
7737
7738 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7739 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
7740
7741 /* Get the chip revision id and number. */
7742 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7743 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7744 id = ((val & 0xffff) << 16);
7745 val = REG_RD(bp, MISC_REG_CHIP_REV);
7746 id |= ((val & 0xf) << 12);
7747 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7748 id |= ((val & 0xff) << 4);
7749 REG_RD(bp, MISC_REG_BOND_ID);
7750 id |= (val & 0xf);
7751 bp->chip_id = id;
7752 BNX2X_DEV_INFO("chip ID is %x\n", id);
7753
7754 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
7755 BNX2X_DEV_INFO("MCP not active\n");
7756 nomcp = 1;
7757 goto set_mac;
7758 }
7759
7760 val = SHMEM_RD(bp, validity_map[port]);
7761 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f1410647
ET
7762 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7763 BNX2X_ERR("BAD MCP validity signature\n");
a2fbb9ea 7764
f1410647 7765 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
a2fbb9ea
ET
7766 DRV_MSG_SEQ_NUMBER_MASK);
7767
7768 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
f1410647 7769 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
a2fbb9ea 7770 bp->serdes_config =
f1410647 7771 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
a2fbb9ea
ET
7772 bp->lane_config =
7773 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7774 bp->ext_phy_config =
7775 SHMEM_RD(bp,
7776 dev_info.port_hw_config[port].external_phy_config);
7777 bp->speed_cap_mask =
7778 SHMEM_RD(bp,
7779 dev_info.port_hw_config[port].speed_capability_mask);
7780
7781 bp->link_config =
7782 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7783
f1410647 7784 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
a2fbb9ea
ET
7785 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
7786 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
7787 " fw_seq (%08x)\n",
f1410647
ET
7788 bp->hw_config, bp->board, bp->serdes_config,
7789 bp->lane_config, bp->ext_phy_config,
7790 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
a2fbb9ea
ET
7791
7792 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
7793 bnx2x_link_settings_supported(bp, switch_cfg);
7794
7795 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
7796 /* for now disable cl73 */
7797 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
7798 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
7799
7800 bnx2x_link_settings_requested(bp);
7801
7802 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7803 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7804 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7805 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7806 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7807 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7808 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7809 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7810
7811 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7812
7813
7814 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7815 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7816 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7817 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7818
7819 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7820 val, val2, val3, val4);
7821
7822 /* bc ver */
7823 if (!nomcp) {
7824 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
7825 BNX2X_DEV_INFO("bc_ver %X\n", val);
7826 if (val < BNX2X_BC_VER) {
7827 /* for now only warn
7828 * later we might need to enforce this */
7829 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7830 " please upgrade BC\n", BNX2X_BC_VER, val);
7831 }
7832 } else {
7833 bp->bc_ver = 0;
7834 }
7835
7836 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7837 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7838 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7839 bp->flash_size, bp->flash_size);
7840
7841 return;
7842
7843set_mac: /* only supposed to happen on emulation/FPGA */
f1410647
ET
7844 BNX2X_ERR("warning rendom MAC workaround active\n");
7845 random_ether_addr(bp->dev->dev_addr);
a2fbb9ea
ET
7846 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7847
7848}
7849
7850/*
7851 * ethtool service functions
7852 */
7853
7854/* All ethtool functions called with rtnl_lock */
7855
7856static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7857{
7858 struct bnx2x *bp = netdev_priv(dev);
7859
7860 cmd->supported = bp->supported;
7861 cmd->advertising = bp->advertising;
7862
7863 if (netif_carrier_ok(dev)) {
7864 cmd->speed = bp->line_speed;
7865 cmd->duplex = bp->duplex;
7866 } else {
7867 cmd->speed = bp->req_line_speed;
7868 cmd->duplex = bp->req_duplex;
7869 }
7870
7871 if (bp->phy_flags & PHY_XGXS_FLAG) {
f1410647
ET
7872 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7873
7874 switch (ext_phy_type) {
7875 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7876 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7877 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7879 cmd->port = PORT_FIBRE;
7880 break;
7881
7882 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7883 cmd->port = PORT_TP;
7884 break;
7885
7886 default:
7887 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7888 bp->ext_phy_config);
7889 }
7890 } else
a2fbb9ea 7891 cmd->port = PORT_TP;
a2fbb9ea
ET
7892
7893 cmd->phy_address = bp->phy_addr;
7894 cmd->transceiver = XCVR_INTERNAL;
7895
f1410647 7896 if (bp->req_autoneg & AUTONEG_SPEED)
a2fbb9ea 7897 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7898 else
a2fbb9ea 7899 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7900
7901 cmd->maxtxpkt = 0;
7902 cmd->maxrxpkt = 0;
7903
7904 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7905 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7906 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7907 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7908 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7909 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7910 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7911
7912 return 0;
7913}
7914
7915static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7916{
7917 struct bnx2x *bp = netdev_priv(dev);
7918 u32 advertising;
7919
7920 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7921 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7922 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7923 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7924 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7925 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7926 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7927
7928 switch (cmd->port) {
7929 case PORT_TP:
f1410647
ET
7930 if (!(bp->supported & SUPPORTED_TP)) {
7931 DP(NETIF_MSG_LINK, "TP not supported\n");
a2fbb9ea 7932 return -EINVAL;
f1410647 7933 }
a2fbb9ea
ET
7934
7935 if (bp->phy_flags & PHY_XGXS_FLAG) {
7936 bnx2x_link_reset(bp);
7937 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
7938 bnx2x_phy_deassert(bp);
7939 }
7940 break;
7941
7942 case PORT_FIBRE:
f1410647
ET
7943 if (!(bp->supported & SUPPORTED_FIBRE)) {
7944 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
a2fbb9ea 7945 return -EINVAL;
f1410647 7946 }
a2fbb9ea
ET
7947
7948 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7949 bnx2x_link_reset(bp);
7950 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
7951 bnx2x_phy_deassert(bp);
7952 }
7953 break;
7954
7955 default:
f1410647 7956 DP(NETIF_MSG_LINK, "Unknown port type\n");
a2fbb9ea
ET
7957 return -EINVAL;
7958 }
7959
7960 if (cmd->autoneg == AUTONEG_ENABLE) {
f1410647
ET
7961 if (!(bp->supported & SUPPORTED_Autoneg)) {
7962 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
a2fbb9ea 7963 return -EINVAL;
f1410647 7964 }
a2fbb9ea
ET
7965
7966 /* advertise the requested speed and duplex if supported */
7967 cmd->advertising &= bp->supported;
7968
7969 bp->req_autoneg |= AUTONEG_SPEED;
7970 bp->req_line_speed = 0;
7971 bp->req_duplex = DUPLEX_FULL;
7972 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
7973
7974 } else { /* forced speed */
7975 /* advertise the requested speed and duplex if supported */
7976 switch (cmd->speed) {
7977 case SPEED_10:
7978 if (cmd->duplex == DUPLEX_FULL) {
f1410647
ET
7979 if (!(bp->supported &
7980 SUPPORTED_10baseT_Full)) {
7981 DP(NETIF_MSG_LINK,
7982 "10M full not supported\n");
a2fbb9ea 7983 return -EINVAL;
f1410647 7984 }
a2fbb9ea
ET
7985
7986 advertising = (ADVERTISED_10baseT_Full |
7987 ADVERTISED_TP);
7988 } else {
f1410647
ET
7989 if (!(bp->supported &
7990 SUPPORTED_10baseT_Half)) {
7991 DP(NETIF_MSG_LINK,
7992 "10M half not supported\n");
a2fbb9ea 7993 return -EINVAL;
f1410647 7994 }
a2fbb9ea
ET
7995
7996 advertising = (ADVERTISED_10baseT_Half |
7997 ADVERTISED_TP);
7998 }
7999 break;
8000
8001 case SPEED_100:
8002 if (cmd->duplex == DUPLEX_FULL) {
8003 if (!(bp->supported &
f1410647
ET
8004 SUPPORTED_100baseT_Full)) {
8005 DP(NETIF_MSG_LINK,
8006 "100M full not supported\n");
a2fbb9ea 8007 return -EINVAL;
f1410647 8008 }
a2fbb9ea
ET
8009
8010 advertising = (ADVERTISED_100baseT_Full |
8011 ADVERTISED_TP);
8012 } else {
8013 if (!(bp->supported &
f1410647
ET
8014 SUPPORTED_100baseT_Half)) {
8015 DP(NETIF_MSG_LINK,
8016 "100M half not supported\n");
a2fbb9ea 8017 return -EINVAL;
f1410647 8018 }
a2fbb9ea
ET
8019
8020 advertising = (ADVERTISED_100baseT_Half |
8021 ADVERTISED_TP);
8022 }
8023 break;
8024
8025 case SPEED_1000:
f1410647
ET
8026 if (cmd->duplex != DUPLEX_FULL) {
8027 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8028 return -EINVAL;
f1410647 8029 }
a2fbb9ea 8030
f1410647
ET
8031 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8032 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8033 return -EINVAL;
f1410647 8034 }
a2fbb9ea
ET
8035
8036 advertising = (ADVERTISED_1000baseT_Full |
8037 ADVERTISED_TP);
8038 break;
8039
8040 case SPEED_2500:
f1410647
ET
8041 if (cmd->duplex != DUPLEX_FULL) {
8042 DP(NETIF_MSG_LINK,
8043 "2.5G half not supported\n");
a2fbb9ea 8044 return -EINVAL;
f1410647 8045 }
a2fbb9ea 8046
f1410647
ET
8047 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8048 DP(NETIF_MSG_LINK,
8049 "2.5G full not supported\n");
a2fbb9ea 8050 return -EINVAL;
f1410647 8051 }
a2fbb9ea 8052
f1410647 8053 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8054 ADVERTISED_TP);
8055 break;
8056
8057 case SPEED_10000:
f1410647
ET
8058 if (cmd->duplex != DUPLEX_FULL) {
8059 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8060 return -EINVAL;
f1410647 8061 }
a2fbb9ea 8062
f1410647
ET
8063 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8064 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8065 return -EINVAL;
f1410647 8066 }
a2fbb9ea
ET
8067
8068 advertising = (ADVERTISED_10000baseT_Full |
8069 ADVERTISED_FIBRE);
8070 break;
8071
8072 default:
f1410647 8073 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8074 return -EINVAL;
8075 }
8076
8077 bp->req_autoneg &= ~AUTONEG_SPEED;
8078 bp->req_line_speed = cmd->speed;
8079 bp->req_duplex = cmd->duplex;
8080 bp->advertising = advertising;
8081 }
8082
8083 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
8084 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8085 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
8086 bp->advertising);
8087
8088 bnx2x_stop_stats(bp);
8089 bnx2x_link_initialize(bp);
8090
8091 return 0;
8092}
8093
8094static void bnx2x_get_drvinfo(struct net_device *dev,
8095 struct ethtool_drvinfo *info)
8096{
8097 struct bnx2x *bp = netdev_priv(dev);
8098
8099 strcpy(info->driver, DRV_MODULE_NAME);
8100 strcpy(info->version, DRV_MODULE_VERSION);
8101 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
8102 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
8103 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
8104 bp->bc_ver);
8105 strcpy(info->bus_info, pci_name(bp->pdev));
8106 info->n_stats = BNX2X_NUM_STATS;
8107 info->testinfo_len = BNX2X_NUM_TESTS;
8108 info->eedump_len = bp->flash_size;
8109 info->regdump_len = 0;
8110}
8111
8112static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8113{
8114 struct bnx2x *bp = netdev_priv(dev);
8115
8116 if (bp->flags & NO_WOL_FLAG) {
8117 wol->supported = 0;
8118 wol->wolopts = 0;
8119 } else {
8120 wol->supported = WAKE_MAGIC;
8121 if (bp->wol)
8122 wol->wolopts = WAKE_MAGIC;
8123 else
8124 wol->wolopts = 0;
8125 }
8126 memset(&wol->sopass, 0, sizeof(wol->sopass));
8127}
8128
8129static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8130{
8131 struct bnx2x *bp = netdev_priv(dev);
8132
8133 if (wol->wolopts & ~WAKE_MAGIC)
8134 return -EINVAL;
8135
8136 if (wol->wolopts & WAKE_MAGIC) {
8137 if (bp->flags & NO_WOL_FLAG)
8138 return -EINVAL;
8139
8140 bp->wol = 1;
8141 } else {
8142 bp->wol = 0;
8143 }
8144 return 0;
8145}
8146
8147static u32 bnx2x_get_msglevel(struct net_device *dev)
8148{
8149 struct bnx2x *bp = netdev_priv(dev);
8150
8151 return bp->msglevel;
8152}
8153
8154static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8155{
8156 struct bnx2x *bp = netdev_priv(dev);
8157
8158 if (capable(CAP_NET_ADMIN))
8159 bp->msglevel = level;
8160}
8161
8162static int bnx2x_nway_reset(struct net_device *dev)
8163{
8164 struct bnx2x *bp = netdev_priv(dev);
8165
8166 if (bp->state != BNX2X_STATE_OPEN) {
8167 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8168 return -EAGAIN;
8169 }
8170
8171 bnx2x_stop_stats(bp);
8172 bnx2x_link_initialize(bp);
8173
8174 return 0;
8175}
8176
8177static int bnx2x_get_eeprom_len(struct net_device *dev)
8178{
8179 struct bnx2x *bp = netdev_priv(dev);
8180
8181 return bp->flash_size;
8182}
8183
8184static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8185{
8186 int port = bp->port;
8187 int count, i;
8188 u32 val = 0;
8189
8190 /* adjust timeout for emulation/FPGA */
8191 count = NVRAM_TIMEOUT_COUNT;
8192 if (CHIP_REV_IS_SLOW(bp))
8193 count *= 100;
8194
8195 /* request access to nvram interface */
8196 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8197 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8198
8199 for (i = 0; i < count*10; i++) {
8200 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8201 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8202 break;
8203
8204 udelay(5);
8205 }
8206
8207 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8208 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
8209 return -EBUSY;
8210 }
8211
8212 return 0;
8213}
8214
8215static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8216{
8217 int port = bp->port;
8218 int count, i;
8219 u32 val = 0;
8220
8221 /* adjust timeout for emulation/FPGA */
8222 count = NVRAM_TIMEOUT_COUNT;
8223 if (CHIP_REV_IS_SLOW(bp))
8224 count *= 100;
8225
8226 /* relinquish nvram interface */
8227 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8228 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8229
8230 for (i = 0; i < count*10; i++) {
8231 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8232 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8233 break;
8234
8235 udelay(5);
8236 }
8237
8238 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8239 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
8240 return -EBUSY;
8241 }
8242
8243 return 0;
8244}
8245
8246static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8247{
8248 u32 val;
8249
8250 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8251
8252 /* enable both bits, even on read */
8253 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8254 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8255 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8256}
8257
8258static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8259{
8260 u32 val;
8261
8262 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8263
8264 /* disable both bits, even after read */
8265 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8266 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8267 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8268}
8269
8270static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8271 u32 cmd_flags)
8272{
f1410647 8273 int count, i, rc;
a2fbb9ea
ET
8274 u32 val;
8275
8276 /* build the command word */
8277 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8278
8279 /* need to clear DONE bit separately */
8280 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8281
8282 /* address of the NVRAM to read from */
8283 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8284 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8285
8286 /* issue a read command */
8287 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8288
8289 /* adjust timeout for emulation/FPGA */
8290 count = NVRAM_TIMEOUT_COUNT;
8291 if (CHIP_REV_IS_SLOW(bp))
8292 count *= 100;
8293
8294 /* wait for completion */
8295 *ret_val = 0;
8296 rc = -EBUSY;
8297 for (i = 0; i < count; i++) {
8298 udelay(5);
8299 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8300
8301 if (val & MCPR_NVM_COMMAND_DONE) {
8302 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8303 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8304 /* we read nvram data in cpu order
8305 * but ethtool sees it as an array of bytes
8306 * converting to big-endian will do the work */
8307 val = cpu_to_be32(val);
8308 *ret_val = val;
8309 rc = 0;
8310 break;
8311 }
8312 }
8313
8314 return rc;
8315}
8316
8317static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8318 int buf_size)
8319{
8320 int rc;
8321 u32 cmd_flags;
8322 u32 val;
8323
8324 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8325 DP(NETIF_MSG_NVM,
c14423fe 8326 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8327 offset, buf_size);
8328 return -EINVAL;
8329 }
8330
8331 if (offset + buf_size > bp->flash_size) {
c14423fe 8332 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8333 " buf_size (0x%x) > flash_size (0x%x)\n",
8334 offset, buf_size, bp->flash_size);
8335 return -EINVAL;
8336 }
8337
8338 /* request access to nvram interface */
8339 rc = bnx2x_acquire_nvram_lock(bp);
8340 if (rc)
8341 return rc;
8342
8343 /* enable access to nvram interface */
8344 bnx2x_enable_nvram_access(bp);
8345
8346 /* read the first word(s) */
8347 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8348 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8349 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8350 memcpy(ret_buf, &val, 4);
8351
8352 /* advance to the next dword */
8353 offset += sizeof(u32);
8354 ret_buf += sizeof(u32);
8355 buf_size -= sizeof(u32);
8356 cmd_flags = 0;
8357 }
8358
8359 if (rc == 0) {
8360 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8361 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8362 memcpy(ret_buf, &val, 4);
8363 }
8364
8365 /* disable access to nvram interface */
8366 bnx2x_disable_nvram_access(bp);
8367 bnx2x_release_nvram_lock(bp);
8368
8369 return rc;
8370}
8371
8372static int bnx2x_get_eeprom(struct net_device *dev,
8373 struct ethtool_eeprom *eeprom, u8 *eebuf)
8374{
8375 struct bnx2x *bp = netdev_priv(dev);
8376 int rc;
8377
8378 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8379 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8380 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8381 eeprom->len, eeprom->len);
8382
8383 /* parameters already validated in ethtool_get_eeprom */
8384
8385 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8386
8387 return rc;
8388}
8389
8390static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8391 u32 cmd_flags)
8392{
f1410647 8393 int count, i, rc;
a2fbb9ea
ET
8394
8395 /* build the command word */
8396 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8397
8398 /* need to clear DONE bit separately */
8399 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8400
8401 /* write the data */
8402 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8403
8404 /* address of the NVRAM to write to */
8405 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8406 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8407
8408 /* issue the write command */
8409 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8410
8411 /* adjust timeout for emulation/FPGA */
8412 count = NVRAM_TIMEOUT_COUNT;
8413 if (CHIP_REV_IS_SLOW(bp))
8414 count *= 100;
8415
8416 /* wait for completion */
8417 rc = -EBUSY;
8418 for (i = 0; i < count; i++) {
8419 udelay(5);
8420 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8421 if (val & MCPR_NVM_COMMAND_DONE) {
8422 rc = 0;
8423 break;
8424 }
8425 }
8426
8427 return rc;
8428}
8429
f1410647 8430#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8431
8432static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8433 int buf_size)
8434{
8435 int rc;
8436 u32 cmd_flags;
8437 u32 align_offset;
8438 u32 val;
8439
8440 if (offset + buf_size > bp->flash_size) {
c14423fe 8441 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8442 " buf_size (0x%x) > flash_size (0x%x)\n",
8443 offset, buf_size, bp->flash_size);
8444 return -EINVAL;
8445 }
8446
8447 /* request access to nvram interface */
8448 rc = bnx2x_acquire_nvram_lock(bp);
8449 if (rc)
8450 return rc;
8451
8452 /* enable access to nvram interface */
8453 bnx2x_enable_nvram_access(bp);
8454
8455 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8456 align_offset = (offset & ~0x03);
8457 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8458
8459 if (rc == 0) {
8460 val &= ~(0xff << BYTE_OFFSET(offset));
8461 val |= (*data_buf << BYTE_OFFSET(offset));
8462
8463 /* nvram data is returned as an array of bytes
8464 * convert it back to cpu order */
8465 val = be32_to_cpu(val);
8466
8467 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8468
8469 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8470 cmd_flags);
8471 }
8472
8473 /* disable access to nvram interface */
8474 bnx2x_disable_nvram_access(bp);
8475 bnx2x_release_nvram_lock(bp);
8476
8477 return rc;
8478}
8479
8480static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8481 int buf_size)
8482{
8483 int rc;
8484 u32 cmd_flags;
8485 u32 val;
8486 u32 written_so_far;
8487
8488 if (buf_size == 1) { /* ethtool */
8489 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8490 }
8491
8492 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8493 DP(NETIF_MSG_NVM,
c14423fe 8494 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8495 offset, buf_size);
8496 return -EINVAL;
8497 }
8498
8499 if (offset + buf_size > bp->flash_size) {
c14423fe 8500 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8501 " buf_size (0x%x) > flash_size (0x%x)\n",
8502 offset, buf_size, bp->flash_size);
8503 return -EINVAL;
8504 }
8505
8506 /* request access to nvram interface */
8507 rc = bnx2x_acquire_nvram_lock(bp);
8508 if (rc)
8509 return rc;
8510
8511 /* enable access to nvram interface */
8512 bnx2x_enable_nvram_access(bp);
8513
8514 written_so_far = 0;
8515 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8516 while ((written_so_far < buf_size) && (rc == 0)) {
8517 if (written_so_far == (buf_size - sizeof(u32)))
8518 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8519 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8520 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8521 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8522 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8523
8524 memcpy(&val, data_buf, 4);
8525 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8526
8527 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8528
8529 /* advance to the next dword */
8530 offset += sizeof(u32);
8531 data_buf += sizeof(u32);
8532 written_so_far += sizeof(u32);
8533 cmd_flags = 0;
8534 }
8535
8536 /* disable access to nvram interface */
8537 bnx2x_disable_nvram_access(bp);
8538 bnx2x_release_nvram_lock(bp);
8539
8540 return rc;
8541}
8542
8543static int bnx2x_set_eeprom(struct net_device *dev,
8544 struct ethtool_eeprom *eeprom, u8 *eebuf)
8545{
8546 struct bnx2x *bp = netdev_priv(dev);
8547 int rc;
8548
8549 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8550 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8551 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8552 eeprom->len, eeprom->len);
8553
8554 /* parameters already validated in ethtool_set_eeprom */
8555
8556 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8557
8558 return rc;
8559}
8560
8561static int bnx2x_get_coalesce(struct net_device *dev,
8562 struct ethtool_coalesce *coal)
8563{
8564 struct bnx2x *bp = netdev_priv(dev);
8565
8566 memset(coal, 0, sizeof(struct ethtool_coalesce));
8567
8568 coal->rx_coalesce_usecs = bp->rx_ticks;
8569 coal->tx_coalesce_usecs = bp->tx_ticks;
8570 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8571
8572 return 0;
8573}
8574
8575static int bnx2x_set_coalesce(struct net_device *dev,
8576 struct ethtool_coalesce *coal)
8577{
8578 struct bnx2x *bp = netdev_priv(dev);
8579
8580 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8581 if (bp->rx_ticks > 3000)
8582 bp->rx_ticks = 3000;
8583
8584 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8585 if (bp->tx_ticks > 0x3000)
8586 bp->tx_ticks = 0x3000;
8587
8588 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8589 if (bp->stats_ticks > 0xffff00)
8590 bp->stats_ticks = 0xffff00;
8591 bp->stats_ticks &= 0xffff00;
8592
8593 if (netif_running(bp->dev))
8594 bnx2x_update_coalesce(bp);
8595
8596 return 0;
8597}
8598
8599static void bnx2x_get_ringparam(struct net_device *dev,
8600 struct ethtool_ringparam *ering)
8601{
8602 struct bnx2x *bp = netdev_priv(dev);
8603
8604 ering->rx_max_pending = MAX_RX_AVAIL;
8605 ering->rx_mini_max_pending = 0;
8606 ering->rx_jumbo_max_pending = 0;
8607
8608 ering->rx_pending = bp->rx_ring_size;
8609 ering->rx_mini_pending = 0;
8610 ering->rx_jumbo_pending = 0;
8611
8612 ering->tx_max_pending = MAX_TX_AVAIL;
8613 ering->tx_pending = bp->tx_ring_size;
8614}
8615
8616static int bnx2x_set_ringparam(struct net_device *dev,
8617 struct ethtool_ringparam *ering)
8618{
8619 struct bnx2x *bp = netdev_priv(dev);
8620
8621 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8622 (ering->tx_pending > MAX_TX_AVAIL) ||
8623 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8624 return -EINVAL;
8625
8626 bp->rx_ring_size = ering->rx_pending;
8627 bp->tx_ring_size = ering->tx_pending;
8628
8629 if (netif_running(bp->dev)) {
8630 bnx2x_nic_unload(bp, 0);
8631 bnx2x_nic_load(bp, 0);
8632 }
8633
8634 return 0;
8635}
8636
8637static void bnx2x_get_pauseparam(struct net_device *dev,
8638 struct ethtool_pauseparam *epause)
8639{
8640 struct bnx2x *bp = netdev_priv(dev);
8641
8642 epause->autoneg =
8643 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
8644 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
8645 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
8646
8647 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8648 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8649 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8650}
8651
8652static int bnx2x_set_pauseparam(struct net_device *dev,
8653 struct ethtool_pauseparam *epause)
8654{
8655 struct bnx2x *bp = netdev_priv(dev);
8656
8657 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8658 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8659 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8660
a2fbb9ea 8661 if (epause->autoneg) {
f1410647
ET
8662 if (!(bp->supported & SUPPORTED_Autoneg)) {
8663 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
8664 return -EINVAL;
a2fbb9ea
ET
8665 }
8666
f1410647
ET
8667 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8668 } else
a2fbb9ea
ET
8669 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
8670
f1410647 8671 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8672
f1410647
ET
8673 if (epause->rx_pause)
8674 bp->req_flow_ctrl |= FLOW_CTRL_RX;
8675 if (epause->tx_pause)
8676 bp->req_flow_ctrl |= FLOW_CTRL_TX;
a2fbb9ea 8677
f1410647
ET
8678 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
8679 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
8680 bp->req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8681
f1410647
ET
8682 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
8683 bp->req_autoneg, bp->req_flow_ctrl);
a2fbb9ea
ET
8684
8685 bnx2x_stop_stats(bp);
8686 bnx2x_link_initialize(bp);
8687
8688 return 0;
8689}
8690
8691static u32 bnx2x_get_rx_csum(struct net_device *dev)
8692{
8693 struct bnx2x *bp = netdev_priv(dev);
8694
8695 return bp->rx_csum;
8696}
8697
8698static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8699{
8700 struct bnx2x *bp = netdev_priv(dev);
8701
8702 bp->rx_csum = data;
8703 return 0;
8704}
8705
8706static int bnx2x_set_tso(struct net_device *dev, u32 data)
8707{
8708 if (data)
8709 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8710 else
8711 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8712 return 0;
8713}
8714
8715static struct {
8716 char string[ETH_GSTRING_LEN];
8717} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8718 { "MC Errors (online)" }
8719};
8720
8721static int bnx2x_self_test_count(struct net_device *dev)
8722{
8723 return BNX2X_NUM_TESTS;
8724}
8725
8726static void bnx2x_self_test(struct net_device *dev,
8727 struct ethtool_test *etest, u64 *buf)
8728{
8729 struct bnx2x *bp = netdev_priv(dev);
8730 int stats_state;
8731
8732 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8733
8734 if (bp->state != BNX2X_STATE_OPEN) {
8735 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8736 return;
8737 }
8738
8739 stats_state = bp->stats_state;
8740 bnx2x_stop_stats(bp);
8741
8742 if (bnx2x_mc_assert(bp) != 0) {
8743 buf[0] = 1;
8744 etest->flags |= ETH_TEST_FL_FAILED;
8745 }
8746
8747#ifdef BNX2X_EXTRA_DEBUG
8748 bnx2x_panic_dump(bp);
8749#endif
8750 bp->stats_state = stats_state;
8751}
8752
8753static struct {
8754 char string[ETH_GSTRING_LEN];
8755} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
0e39e645
ET
8756 { "rx_bytes"},
8757 { "rx_error_bytes"},
8758 { "tx_bytes"},
8759 { "tx_error_bytes"},
8760 { "rx_ucast_packets"},
8761 { "rx_mcast_packets"},
8762 { "rx_bcast_packets"},
8763 { "tx_ucast_packets"},
8764 { "tx_mcast_packets"},
8765 { "tx_bcast_packets"},
8766 { "tx_mac_errors"}, /* 10 */
8767 { "tx_carrier_errors"},
8768 { "rx_crc_errors"},
8769 { "rx_align_errors"},
8770 { "tx_single_collisions"},
8771 { "tx_multi_collisions"},
8772 { "tx_deferred"},
8773 { "tx_excess_collisions"},
8774 { "tx_late_collisions"},
8775 { "tx_total_collisions"},
8776 { "rx_fragments"}, /* 20 */
8777 { "rx_jabbers"},
8778 { "rx_undersize_packets"},
8779 { "rx_oversize_packets"},
8780 { "rx_xon_frames"},
8781 { "rx_xoff_frames"},
8782 { "tx_xon_frames"},
8783 { "tx_xoff_frames"},
8784 { "rx_mac_ctrl_frames"},
8785 { "rx_filtered_packets"},
8786 { "rx_discards"}, /* 30 */
8787 { "brb_discard"},
8788 { "brb_truncate"},
8789 { "xxoverflow"}
a2fbb9ea
ET
8790};
8791
8792#define STATS_OFFSET32(offset_name) \
8793 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
8794
8795static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
0e39e645
ET
8796 STATS_OFFSET32(total_bytes_received_hi),
8797 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
8798 STATS_OFFSET32(total_bytes_transmitted_hi),
8799 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
8800 STATS_OFFSET32(total_unicast_packets_received_hi),
8801 STATS_OFFSET32(total_multicast_packets_received_hi),
8802 STATS_OFFSET32(total_broadcast_packets_received_hi),
8803 STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8804 STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8805 STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8806 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
8807 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
8808 STATS_OFFSET32(crc_receive_errors),
8809 STATS_OFFSET32(alignment_errors),
8810 STATS_OFFSET32(single_collision_transmit_frames),
8811 STATS_OFFSET32(multiple_collision_transmit_frames),
8812 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
8813 STATS_OFFSET32(excessive_collision_frames),
8814 STATS_OFFSET32(late_collision_frames),
8815 STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
8816 STATS_OFFSET32(runt_packets_received), /* 20 */
8817 STATS_OFFSET32(jabber_packets_received),
8818 STATS_OFFSET32(error_runt_packets_received),
8819 STATS_OFFSET32(error_jabber_packets_received),
8820 STATS_OFFSET32(pause_xon_frames_received),
8821 STATS_OFFSET32(pause_xoff_frames_received),
8822 STATS_OFFSET32(pause_xon_frames_transmitted),
8823 STATS_OFFSET32(pause_xoff_frames_transmitted),
8824 STATS_OFFSET32(control_frames_received),
8825 STATS_OFFSET32(mac_filter_discard),
8826 STATS_OFFSET32(no_buff_discard), /* 30 */
8827 STATS_OFFSET32(brb_discard),
8828 STATS_OFFSET32(brb_truncate_discard),
8829 STATS_OFFSET32(xxoverflow_discard)
a2fbb9ea
ET
8830};
8831
8832static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
8833 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
8834 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
8835 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
0e39e645 8836 4, 4, 4, 4
a2fbb9ea
ET
8837};
8838
8839static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8840{
8841 switch (stringset) {
8842 case ETH_SS_STATS:
8843 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
8844 break;
8845
8846 case ETH_SS_TEST:
8847 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8848 break;
8849 }
8850}
8851
8852static int bnx2x_get_stats_count(struct net_device *dev)
8853{
8854 return BNX2X_NUM_STATS;
8855}
8856
8857static void bnx2x_get_ethtool_stats(struct net_device *dev,
8858 struct ethtool_stats *stats, u64 *buf)
8859{
8860 struct bnx2x *bp = netdev_priv(dev);
8861 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8862 int i;
8863
8864 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8865 if (bnx2x_stats_len_arr[i] == 0) {
8866 /* skip this counter */
8867 buf[i] = 0;
8868 continue;
8869 }
8870 if (!hw_stats) {
8871 buf[i] = 0;
8872 continue;
8873 }
8874 if (bnx2x_stats_len_arr[i] == 4) {
8875 /* 4-byte counter */
8876 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8877 continue;
8878 }
8879 /* 8-byte counter */
8880 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8881 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8882 }
8883}
8884
8885static int bnx2x_phys_id(struct net_device *dev, u32 data)
8886{
8887 struct bnx2x *bp = netdev_priv(dev);
8888 int i;
8889
8890 if (data == 0)
8891 data = 2;
8892
8893 for (i = 0; i < (data * 2); i++) {
8894 if ((i % 2) == 0) {
8895 bnx2x_leds_set(bp, SPEED_1000);
8896 } else {
8897 bnx2x_leds_unset(bp);
8898 }
8899 msleep_interruptible(500);
8900 if (signal_pending(current))
8901 break;
8902 }
8903
8904 if (bp->link_up)
8905 bnx2x_leds_set(bp, bp->line_speed);
8906
8907 return 0;
8908}
8909
8910static struct ethtool_ops bnx2x_ethtool_ops = {
8911 .get_settings = bnx2x_get_settings,
8912 .set_settings = bnx2x_set_settings,
8913 .get_drvinfo = bnx2x_get_drvinfo,
8914 .get_wol = bnx2x_get_wol,
8915 .set_wol = bnx2x_set_wol,
8916 .get_msglevel = bnx2x_get_msglevel,
8917 .set_msglevel = bnx2x_set_msglevel,
8918 .nway_reset = bnx2x_nway_reset,
8919 .get_link = ethtool_op_get_link,
8920 .get_eeprom_len = bnx2x_get_eeprom_len,
8921 .get_eeprom = bnx2x_get_eeprom,
8922 .set_eeprom = bnx2x_set_eeprom,
8923 .get_coalesce = bnx2x_get_coalesce,
8924 .set_coalesce = bnx2x_set_coalesce,
8925 .get_ringparam = bnx2x_get_ringparam,
8926 .set_ringparam = bnx2x_set_ringparam,
8927 .get_pauseparam = bnx2x_get_pauseparam,
8928 .set_pauseparam = bnx2x_set_pauseparam,
8929 .get_rx_csum = bnx2x_get_rx_csum,
8930 .set_rx_csum = bnx2x_set_rx_csum,
8931 .get_tx_csum = ethtool_op_get_tx_csum,
8932 .set_tx_csum = ethtool_op_set_tx_csum,
8933 .get_sg = ethtool_op_get_sg,
8934 .set_sg = ethtool_op_set_sg,
8935 .get_tso = ethtool_op_get_tso,
8936 .set_tso = bnx2x_set_tso,
8937 .self_test_count = bnx2x_self_test_count,
8938 .self_test = bnx2x_self_test,
8939 .get_strings = bnx2x_get_strings,
8940 .phys_id = bnx2x_phys_id,
8941 .get_stats_count = bnx2x_get_stats_count,
8942 .get_ethtool_stats = bnx2x_get_ethtool_stats
8943};
8944
8945/* end of ethtool_ops */
8946
8947/****************************************************************************
8948* General service functions
8949****************************************************************************/
8950
8951static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8952{
8953 u16 pmcsr;
8954
8955 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
8956
8957 switch (state) {
8958 case PCI_D0:
8959 pci_write_config_word(bp->pdev,
8960 bp->pm_cap + PCI_PM_CTRL,
8961 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
8962 PCI_PM_CTRL_PME_STATUS));
8963
8964 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
8965 /* delay required during transition out of D3hot */
8966 msleep(20);
8967 break;
8968
8969 case PCI_D3hot:
8970 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
8971 pmcsr |= 3;
8972
8973 if (bp->wol)
8974 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
8975
8976 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
8977 pmcsr);
8978
8979 /* No more memory access after this point until
8980 * device is brought back to D0.
8981 */
8982 break;
8983
8984 default:
8985 return -EINVAL;
8986 }
8987 return 0;
8988}
8989
8990/*
8991 * net_device service functions
8992 */
8993
49d66772 8994/* called with netif_tx_lock from set_multicast */
a2fbb9ea
ET
8995static void bnx2x_set_rx_mode(struct net_device *dev)
8996{
8997 struct bnx2x *bp = netdev_priv(dev);
8998 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8999
9000 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
9001
9002 if (dev->flags & IFF_PROMISC)
9003 rx_mode = BNX2X_RX_MODE_PROMISC;
9004
9005 else if ((dev->flags & IFF_ALLMULTI) ||
9006 (dev->mc_count > BNX2X_MAX_MULTICAST))
9007 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9008
9009 else { /* some multicasts */
9010 int i, old, offset;
9011 struct dev_mc_list *mclist;
9012 struct mac_configuration_cmd *config =
9013 bnx2x_sp(bp, mcast_config);
9014
9015 for (i = 0, mclist = dev->mc_list;
9016 mclist && (i < dev->mc_count);
9017 i++, mclist = mclist->next) {
9018
9019 config->config_table[i].cam_entry.msb_mac_addr =
9020 swab16(*(u16 *)&mclist->dmi_addr[0]);
9021 config->config_table[i].cam_entry.middle_mac_addr =
9022 swab16(*(u16 *)&mclist->dmi_addr[2]);
9023 config->config_table[i].cam_entry.lsb_mac_addr =
9024 swab16(*(u16 *)&mclist->dmi_addr[4]);
9025 config->config_table[i].cam_entry.flags =
9026 cpu_to_le16(bp->port);
9027 config->config_table[i].target_table_entry.flags = 0;
9028 config->config_table[i].target_table_entry.
9029 client_id = 0;
9030 config->config_table[i].target_table_entry.
9031 vlan_id = 0;
9032
9033 DP(NETIF_MSG_IFUP,
9034 "setting MCAST[%d] (%04x:%04x:%04x)\n",
9035 i, config->config_table[i].cam_entry.msb_mac_addr,
9036 config->config_table[i].cam_entry.middle_mac_addr,
9037 config->config_table[i].cam_entry.lsb_mac_addr);
9038 }
9039 old = config->hdr.length_6b;
9040 if (old > i) {
9041 for (; i < old; i++) {
9042 if (CAM_IS_INVALID(config->config_table[i])) {
9043 i--; /* already invalidated */
9044 break;
9045 }
9046 /* invalidate */
9047 CAM_INVALIDATE(config->config_table[i]);
9048 }
9049 }
9050
9051 if (CHIP_REV_IS_SLOW(bp))
9052 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
9053 else
9054 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
9055
9056 config->hdr.length_6b = i;
9057 config->hdr.offset = offset;
9058 config->hdr.reserved0 = 0;
9059 config->hdr.reserved1 = 0;
9060
9061 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9062 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9063 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
9064 }
9065
9066 bp->rx_mode = rx_mode;
9067 bnx2x_set_storm_rx_mode(bp);
9068}
9069
9070static int bnx2x_poll(struct napi_struct *napi, int budget)
9071{
9072 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9073 napi);
9074 struct bnx2x *bp = fp->bp;
9075 int work_done = 0;
9076
9077#ifdef BNX2X_STOP_ON_ERROR
9078 if (unlikely(bp->panic))
9079 goto out_panic;
9080#endif
9081
9082 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9083 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9084 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9085
9086 bnx2x_update_fpsb_idx(fp);
9087
9088 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
9089 bnx2x_tx_int(fp, budget);
9090
9091
9092 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9093 work_done = bnx2x_rx_int(fp, budget);
9094
9095
9096 rmb(); /* bnx2x_has_work() reads the status block */
9097
9098 /* must not complete if we consumed full budget */
9099 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9100
9101#ifdef BNX2X_STOP_ON_ERROR
9102out_panic:
9103#endif
9104 netif_rx_complete(bp->dev, napi);
9105
9106 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
9107 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9108 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
9109 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9110 }
9111
9112 return work_done;
9113}
9114
9115/* Called with netif_tx_lock.
9116 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9117 * netif_wake_queue().
9118 */
9119static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9120{
9121 struct bnx2x *bp = netdev_priv(dev);
9122 struct bnx2x_fastpath *fp;
9123 struct sw_tx_bd *tx_buf;
9124 struct eth_tx_bd *tx_bd;
9125 struct eth_tx_parse_bd *pbd = NULL;
9126 u16 pkt_prod, bd_prod;
9127 int nbd, fp_index = 0;
9128 dma_addr_t mapping;
9129
9130#ifdef BNX2X_STOP_ON_ERROR
9131 if (unlikely(bp->panic))
9132 return NETDEV_TX_BUSY;
9133#endif
9134
9135 fp_index = smp_processor_id() % (bp->num_queues);
9136
9137 fp = &bp->fp[fp_index];
9138 if (unlikely(bnx2x_tx_avail(bp->fp) <
9139 (skb_shinfo(skb)->nr_frags + 3))) {
9140 bp->slowpath->eth_stats.driver_xoff++,
9141 netif_stop_queue(dev);
9142 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9143 return NETDEV_TX_BUSY;
9144 }
9145
9146 /*
9147 This is a bit ugly. First we use one BD which we mark as start,
9148 then for TSO or xsum we have a parsing info BD,
9149 and only then we have the rest of the TSO bds.
9150 (don't forget to mark the last one as last,
9151 and to unmap only AFTER you write to the BD ...)
9152 I would like to thank DovH for this mess.
9153 */
9154
9155 pkt_prod = fp->tx_pkt_prod++;
9156 bd_prod = fp->tx_bd_prod;
9157 bd_prod = TX_BD(bd_prod);
9158
9159 /* get a tx_buff and first bd */
9160 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9161 tx_bd = &fp->tx_desc_ring[bd_prod];
9162
9163 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9164 tx_bd->general_data = (UNICAST_ADDRESS <<
9165 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9166 tx_bd->general_data |= 1; /* header nbd */
9167
c14423fe 9168 /* remember the first bd of the packet */
a2fbb9ea
ET
9169 tx_buf->first_bd = bd_prod;
9170
9171 DP(NETIF_MSG_TX_QUEUED,
9172 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9173 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9174
9175 if (skb->ip_summed == CHECKSUM_PARTIAL) {
9176 struct iphdr *iph = ip_hdr(skb);
9177 u8 len;
9178
9179 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
9180
9181 /* turn on parsing and get a bd */
9182 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9183 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9184 len = ((u8 *)iph - (u8 *)skb->data) / 2;
9185
9186 /* for now NS flag is not used in Linux */
9187 pbd->global_data = (len |
9188 ((skb->protocol == ETH_P_8021Q) <<
9189 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9190 pbd->ip_hlen = ip_hdrlen(skb) / 2;
9191 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
9192 if (iph->protocol == IPPROTO_TCP) {
9193 struct tcphdr *th = tcp_hdr(skb);
9194
9195 tx_bd->bd_flags.as_bitfield |=
9196 ETH_TX_BD_FLAGS_TCP_CSUM;
9197 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
9198 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
9199 pbd->tcp_pseudo_csum = swab16(th->check);
9200
9201 } else if (iph->protocol == IPPROTO_UDP) {
9202 struct udphdr *uh = udp_hdr(skb);
9203
9204 tx_bd->bd_flags.as_bitfield |=
9205 ETH_TX_BD_FLAGS_TCP_CSUM;
9206 pbd->total_hlen += cpu_to_le16(4);
9207 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9208 pbd->cs_offset = 5; /* 10 >> 1 */
9209 pbd->tcp_pseudo_csum = 0;
9210 /* HW bug: we need to subtract 10 bytes before the
9211 * UDP header from the csum
9212 */
9213 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
9214 csum_partial(((u8 *)(uh)-10), 10, 0)));
9215 }
9216 }
9217
9218 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9219 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9220 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9221 } else {
9222 tx_bd->vlan = cpu_to_le16(pkt_prod);
9223 }
9224
9225 mapping = pci_map_single(bp->pdev, skb->data,
9226 skb->len, PCI_DMA_TODEVICE);
9227
9228 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9229 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9230 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9231 tx_bd->nbd = cpu_to_le16(nbd);
9232 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9233
9234 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9235 " nbytes %d flags %x vlan %u\n",
9236 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
9237 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
9238
9239 if (skb_shinfo(skb)->gso_size &&
9240 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
9241 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
9242
9243 DP(NETIF_MSG_TX_QUEUED,
9244 "TSO packet len %d hlen %d total len %d tso size %d\n",
9245 skb->len, hlen, skb_headlen(skb),
9246 skb_shinfo(skb)->gso_size);
9247
9248 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9249
9250 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
9251 /* we split the first bd into headers and data bds
9252 * to ease the pain of our fellow micocode engineers
9253 * we use one mapping for both bds
9254 * So far this has only been observed to happen
9255 * in Other Operating Systems(TM)
9256 */
9257
9258 /* first fix first bd */
9259 nbd++;
9260 tx_bd->nbd = cpu_to_le16(nbd);
9261 tx_bd->nbytes = cpu_to_le16(hlen);
9262
9263 /* we only print this as an error
9264 * because we don't think this will ever happen.
9265 */
9266 BNX2X_ERR("TSO split header size is %d (%x:%x)"
9267 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
9268 tx_bd->addr_lo, tx_bd->nbd);
9269
9270 /* now get a new data bd
9271 * (after the pbd) and fill it */
9272 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9273 tx_bd = &fp->tx_desc_ring[bd_prod];
9274
9275 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9276 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
9277 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
9278 tx_bd->vlan = cpu_to_le16(pkt_prod);
9279 /* this marks the bd
9280 * as one that has no individual mapping
c14423fe 9281 * the FW ignores this flag in a bd not marked start
a2fbb9ea
ET
9282 */
9283 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9284 DP(NETIF_MSG_TX_QUEUED,
9285 "TSO split data size is %d (%x:%x)\n",
9286 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
9287 }
9288
9289 if (!pbd) {
9290 /* supposed to be unreached
9291 * (and therefore not handled properly...)
9292 */
9293 BNX2X_ERR("LSO with no PBD\n");
9294 BUG();
9295 }
9296
9297 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9298 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9299 pbd->ip_id = swab16(ip_hdr(skb)->id);
9300 pbd->tcp_pseudo_csum =
9301 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9302 ip_hdr(skb)->daddr,
9303 0, IPPROTO_TCP, 0));
9304 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9305 }
9306
9307 {
9308 int i;
9309
9310 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9311 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9312
9313 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9314 tx_bd = &fp->tx_desc_ring[bd_prod];
9315
9316 mapping = pci_map_page(bp->pdev, frag->page,
9317 frag->page_offset,
9318 frag->size, PCI_DMA_TODEVICE);
9319
9320 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9321 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9322 tx_bd->nbytes = cpu_to_le16(frag->size);
9323 tx_bd->vlan = cpu_to_le16(pkt_prod);
9324 tx_bd->bd_flags.as_bitfield = 0;
9325 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
9326 " addr (%x:%x) nbytes %d flags %x\n",
9327 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9328 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
9329 } /* for */
9330 }
9331
9332 /* now at last mark the bd as the last bd */
9333 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9334
9335 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9336 tx_bd, tx_bd->bd_flags.as_bitfield);
9337
9338 tx_buf->skb = skb;
9339
9340 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9341
9342 /* now send a tx doorbell, counting the next bd
9343 * if the packet contains or ends with it
9344 */
9345 if (TX_BD_POFF(bd_prod) < nbd)
9346 nbd++;
9347
9348 if (pbd)
9349 DP(NETIF_MSG_TX_QUEUED,
9350 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9351 " tcp_flags %x xsum %x seq %u hlen %u\n",
9352 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9353 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9354 pbd->tcp_send_seq, pbd->total_hlen);
9355
9356 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
9357
9358 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
9359 mb(); /* FW restriction: must not reorder writing nbd and packets */
9360 fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
9361 DOORBELL(bp, fp_index, 0);
9362
9363 mmiowb();
9364
9365 fp->tx_bd_prod = bd_prod;
9366 dev->trans_start = jiffies;
9367
9368 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9369 netif_stop_queue(dev);
9370 bp->slowpath->eth_stats.driver_xoff++;
9371 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9372 netif_wake_queue(dev);
9373 }
9374 fp->tx_pkt++;
9375
9376 return NETDEV_TX_OK;
9377}
9378
a2fbb9ea
ET
9379/* Called with rtnl_lock */
9380static int bnx2x_open(struct net_device *dev)
9381{
9382 struct bnx2x *bp = netdev_priv(dev);
9383
9384 bnx2x_set_power_state(bp, PCI_D0);
9385
9386 return bnx2x_nic_load(bp, 1);
9387}
9388
9389/* Called with rtnl_lock */
9390static int bnx2x_close(struct net_device *dev)
9391{
9392 int rc;
9393 struct bnx2x *bp = netdev_priv(dev);
9394
9395 /* Unload the driver, release IRQs */
9396 rc = bnx2x_nic_unload(bp, 1);
9397 if (rc) {
9398 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
9399 return rc;
9400 }
9401 bnx2x_set_power_state(bp, PCI_D3hot);
9402
9403 return 0;
9404}
9405
9406/* Called with rtnl_lock */
9407static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9408{
9409 struct sockaddr *addr = p;
9410 struct bnx2x *bp = netdev_priv(dev);
9411
9412 if (!is_valid_ether_addr(addr->sa_data))
9413 return -EINVAL;
9414
9415 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9416 if (netif_running(dev))
9417 bnx2x_set_mac_addr(bp);
9418
9419 return 0;
9420}
9421
9422/* Called with rtnl_lock */
9423static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9424{
9425 struct mii_ioctl_data *data = if_mii(ifr);
9426 struct bnx2x *bp = netdev_priv(dev);
9427 int err;
9428
9429 switch (cmd) {
9430 case SIOCGMIIPHY:
9431 data->phy_id = bp->phy_addr;
9432
c14423fe 9433 /* fallthrough */
a2fbb9ea
ET
9434 case SIOCGMIIREG: {
9435 u32 mii_regval;
9436
9437 spin_lock_bh(&bp->phy_lock);
9438 if (bp->state == BNX2X_STATE_OPEN) {
9439 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
9440 &mii_regval);
9441
9442 data->val_out = mii_regval;
9443 } else {
9444 err = -EAGAIN;
9445 }
9446 spin_unlock_bh(&bp->phy_lock);
9447 return err;
9448 }
9449
9450 case SIOCSMIIREG:
9451 if (!capable(CAP_NET_ADMIN))
9452 return -EPERM;
9453
9454 spin_lock_bh(&bp->phy_lock);
9455 if (bp->state == BNX2X_STATE_OPEN) {
9456 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
9457 data->val_in);
9458 } else {
9459 err = -EAGAIN;
9460 }
9461 spin_unlock_bh(&bp->phy_lock);
9462 return err;
9463
9464 default:
9465 /* do nothing */
9466 break;
9467 }
9468
9469 return -EOPNOTSUPP;
9470}
9471
9472/* Called with rtnl_lock */
9473static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9474{
9475 struct bnx2x *bp = netdev_priv(dev);
9476
9477 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9478 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9479 return -EINVAL;
9480
9481 /* This does not race with packet allocation
c14423fe 9482 * because the actual alloc size is
a2fbb9ea
ET
9483 * only updated as part of load
9484 */
9485 dev->mtu = new_mtu;
9486
9487 if (netif_running(dev)) {
9488 bnx2x_nic_unload(bp, 0);
9489 bnx2x_nic_load(bp, 0);
9490 }
9491 return 0;
9492}
9493
9494static void bnx2x_tx_timeout(struct net_device *dev)
9495{
9496 struct bnx2x *bp = netdev_priv(dev);
9497
9498#ifdef BNX2X_STOP_ON_ERROR
9499 if (!bp->panic)
9500 bnx2x_panic();
9501#endif
9502 /* This allows the netif to be shutdown gracefully before resetting */
9503 schedule_work(&bp->reset_task);
9504}
9505
9506#ifdef BCM_VLAN
9507/* Called with rtnl_lock */
9508static void bnx2x_vlan_rx_register(struct net_device *dev,
9509 struct vlan_group *vlgrp)
9510{
9511 struct bnx2x *bp = netdev_priv(dev);
9512
9513 bp->vlgrp = vlgrp;
9514 if (netif_running(dev))
49d66772 9515 bnx2x_set_client_config(bp);
a2fbb9ea
ET
9516}
9517#endif
9518
9519#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9520static void poll_bnx2x(struct net_device *dev)
9521{
9522 struct bnx2x *bp = netdev_priv(dev);
9523
9524 disable_irq(bp->pdev->irq);
9525 bnx2x_interrupt(bp->pdev->irq, dev);
9526 enable_irq(bp->pdev->irq);
9527}
9528#endif
9529
9530static void bnx2x_reset_task(struct work_struct *work)
9531{
9532 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
9533
9534#ifdef BNX2X_STOP_ON_ERROR
9535 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
9536 " so reset not done to allow debug dump,\n"
9537 KERN_ERR " you will need to reboot when done\n");
9538 return;
9539#endif
9540
9541 if (!netif_running(bp->dev))
9542 return;
9543
9544 bp->in_reset_task = 1;
9545
9546 bnx2x_netif_stop(bp);
9547
9548 bnx2x_nic_unload(bp, 0);
9549 bnx2x_nic_load(bp, 0);
9550
9551 bp->in_reset_task = 0;
9552}
9553
9554static int __devinit bnx2x_init_board(struct pci_dev *pdev,
9555 struct net_device *dev)
9556{
9557 struct bnx2x *bp;
9558 int rc;
9559
9560 SET_NETDEV_DEV(dev, &pdev->dev);
9561 bp = netdev_priv(dev);
9562
9563 bp->flags = 0;
9564 bp->port = PCI_FUNC(pdev->devfn);
9565
9566 rc = pci_enable_device(pdev);
9567 if (rc) {
9568 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9569 goto err_out;
9570 }
9571
9572 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9573 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9574 " aborting\n");
9575 rc = -ENODEV;
9576 goto err_out_disable;
9577 }
9578
9579 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9580 printk(KERN_ERR PFX "Cannot find second PCI device"
9581 " base address, aborting\n");
9582 rc = -ENODEV;
9583 goto err_out_disable;
9584 }
9585
9586 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9587 if (rc) {
9588 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9589 " aborting\n");
9590 goto err_out_disable;
9591 }
9592
9593 pci_set_master(pdev);
9594
9595 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9596 if (bp->pm_cap == 0) {
9597 printk(KERN_ERR PFX "Cannot find power management"
9598 " capability, aborting\n");
9599 rc = -EIO;
9600 goto err_out_release;
9601 }
9602
9603 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9604 if (bp->pcie_cap == 0) {
9605 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9606 " aborting\n");
9607 rc = -EIO;
9608 goto err_out_release;
9609 }
9610
9611 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9612 bp->flags |= USING_DAC_FLAG;
9613 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9614 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9615 " failed, aborting\n");
9616 rc = -EIO;
9617 goto err_out_release;
9618 }
9619
9620 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9621 printk(KERN_ERR PFX "System does not support DMA,"
9622 " aborting\n");
9623 rc = -EIO;
9624 goto err_out_release;
9625 }
9626
9627 bp->dev = dev;
9628 bp->pdev = pdev;
9629
9630 spin_lock_init(&bp->phy_lock);
9631
9632 bp->in_reset_task = 0;
9633
9634 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
9635 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
9636
cba0516d 9637 dev->base_addr = pci_resource_start(pdev, 0);
a2fbb9ea
ET
9638
9639 dev->irq = pdev->irq;
9640
9641 bp->regview = ioremap_nocache(dev->base_addr,
9642 pci_resource_len(pdev, 0));
9643 if (!bp->regview) {
9644 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9645 rc = -ENOMEM;
9646 goto err_out_release;
9647 }
9648
9649 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
9650 pci_resource_len(pdev, 2));
9651 if (!bp->doorbells) {
9652 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9653 rc = -ENOMEM;
9654 goto err_out_unmap;
9655 }
9656
9657 bnx2x_set_power_state(bp, PCI_D0);
9658
9659 bnx2x_get_hwinfo(bp);
9660
9661 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
c14423fe 9662 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
a2fbb9ea
ET
9663 " will only init first device\n");
9664 onefunc = 1;
9665 nomcp = 1;
9666 }
9667
9668 if (nomcp) {
9669 printk(KERN_ERR PFX "MCP disabled, will only"
9670 " init first device\n");
9671 onefunc = 1;
9672 }
9673
9674 if (onefunc && bp->port) {
9675 printk(KERN_ERR PFX "Second device disabled, exiting\n");
9676 rc = -ENODEV;
9677 goto err_out_unmap;
9678 }
9679
9680 bp->tx_ring_size = MAX_TX_AVAIL;
9681 bp->rx_ring_size = MAX_RX_AVAIL;
9682
9683 bp->rx_csum = 1;
9684
9685 bp->rx_offset = 0;
9686
9687 bp->tx_quick_cons_trip_int = 0xff;
9688 bp->tx_quick_cons_trip = 0xff;
9689 bp->tx_ticks_int = 50;
9690 bp->tx_ticks = 50;
9691
9692 bp->rx_quick_cons_trip_int = 0xff;
9693 bp->rx_quick_cons_trip = 0xff;
9694 bp->rx_ticks_int = 25;
9695 bp->rx_ticks = 25;
9696
9697 bp->stats_ticks = 1000000 & 0xffff00;
9698
9699 bp->timer_interval = HZ;
9700 bp->current_interval = (poll ? poll : HZ);
9701
9702 init_timer(&bp->timer);
9703 bp->timer.expires = jiffies + bp->current_interval;
9704 bp->timer.data = (unsigned long) bp;
9705 bp->timer.function = bnx2x_timer;
9706
9707 return 0;
9708
9709err_out_unmap:
9710 if (bp->regview) {
9711 iounmap(bp->regview);
9712 bp->regview = NULL;
9713 }
9714
9715 if (bp->doorbells) {
9716 iounmap(bp->doorbells);
9717 bp->doorbells = NULL;
9718 }
9719
9720err_out_release:
9721 pci_release_regions(pdev);
9722
9723err_out_disable:
9724 pci_disable_device(pdev);
9725 pci_set_drvdata(pdev, NULL);
9726
9727err_out:
9728 return rc;
9729}
9730
25047950
ET
9731static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9732{
9733 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9734
9735 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9736 return val;
9737}
9738
9739/* return value of 1=2.5GHz 2=5GHz */
9740static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9741{
9742 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9743
9744 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9745 return val;
9746}
9747
a2fbb9ea
ET
9748static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9749 const struct pci_device_id *ent)
9750{
9751 static int version_printed;
9752 struct net_device *dev = NULL;
9753 struct bnx2x *bp;
25047950 9754 int rc;
a2fbb9ea 9755 int port = PCI_FUNC(pdev->devfn);
25047950 9756 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
9757
9758 if (version_printed++ == 0)
9759 printk(KERN_INFO "%s", version);
9760
9761 /* dev zeroed in init_etherdev */
9762 dev = alloc_etherdev(sizeof(*bp));
9763 if (!dev)
9764 return -ENOMEM;
9765
9766 netif_carrier_off(dev);
9767
9768 bp = netdev_priv(dev);
9769 bp->msglevel = debug;
9770
9771 if (port && onefunc) {
9772 printk(KERN_ERR PFX "second function disabled. exiting\n");
25047950 9773 free_netdev(dev);
a2fbb9ea
ET
9774 return 0;
9775 }
9776
9777 rc = bnx2x_init_board(pdev, dev);
9778 if (rc < 0) {
9779 free_netdev(dev);
9780 return rc;
9781 }
9782
9783 dev->hard_start_xmit = bnx2x_start_xmit;
9784 dev->watchdog_timeo = TX_TIMEOUT;
9785
a2fbb9ea
ET
9786 dev->ethtool_ops = &bnx2x_ethtool_ops;
9787 dev->open = bnx2x_open;
9788 dev->stop = bnx2x_close;
9789 dev->set_multicast_list = bnx2x_set_rx_mode;
9790 dev->set_mac_address = bnx2x_change_mac_addr;
9791 dev->do_ioctl = bnx2x_ioctl;
9792 dev->change_mtu = bnx2x_change_mtu;
9793 dev->tx_timeout = bnx2x_tx_timeout;
9794#ifdef BCM_VLAN
9795 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9796#endif
9797#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9798 dev->poll_controller = poll_bnx2x;
9799#endif
9800 dev->features |= NETIF_F_SG;
9801 if (bp->flags & USING_DAC_FLAG)
9802 dev->features |= NETIF_F_HIGHDMA;
9803 dev->features |= NETIF_F_IP_CSUM;
9804#ifdef BCM_VLAN
9805 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9806#endif
9807 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
9808
9809 rc = register_netdev(dev);
9810 if (rc) {
c14423fe 9811 dev_err(&pdev->dev, "Cannot register net device\n");
a2fbb9ea
ET
9812 if (bp->regview)
9813 iounmap(bp->regview);
9814 if (bp->doorbells)
9815 iounmap(bp->doorbells);
9816 pci_release_regions(pdev);
9817 pci_disable_device(pdev);
9818 pci_set_drvdata(pdev, NULL);
9819 free_netdev(dev);
9820 return rc;
9821 }
9822
9823 pci_set_drvdata(pdev, dev);
9824
9825 bp->name = board_info[ent->driver_data].name;
25047950
ET
9826 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
9827 " IRQ %d, ", dev->name, bp->name,
a2fbb9ea
ET
9828 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
9829 ((CHIP_ID(bp) & 0x0ff0) >> 4),
25047950
ET
9830 bnx2x_get_pcie_width(bp),
9831 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
9832 dev->base_addr, bp->pdev->irq);
9833 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea
ET
9834 return 0;
9835}
9836
9837static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9838{
9839 struct net_device *dev = pci_get_drvdata(pdev);
9840 struct bnx2x *bp = netdev_priv(dev);
9841
9842 flush_scheduled_work();
9843 /*tasklet_kill(&bp->sp_task);*/
9844 unregister_netdev(dev);
9845
9846 if (bp->regview)
9847 iounmap(bp->regview);
9848
9849 if (bp->doorbells)
9850 iounmap(bp->doorbells);
9851
9852 free_netdev(dev);
9853 pci_release_regions(pdev);
9854 pci_disable_device(pdev);
9855 pci_set_drvdata(pdev, NULL);
9856}
9857
9858static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9859{
9860 struct net_device *dev = pci_get_drvdata(pdev);
9861 struct bnx2x *bp = netdev_priv(dev);
9862 int rc;
9863
9864 if (!netif_running(dev))
9865 return 0;
9866
9867 rc = bnx2x_nic_unload(bp, 0);
9868 if (!rc)
9869 return rc;
9870
9871 netif_device_detach(dev);
9872 pci_save_state(pdev);
9873
9874 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9875 return 0;
9876}
9877
9878static int bnx2x_resume(struct pci_dev *pdev)
9879{
9880 struct net_device *dev = pci_get_drvdata(pdev);
9881 struct bnx2x *bp = netdev_priv(dev);
9882 int rc;
9883
9884 if (!netif_running(dev))
9885 return 0;
9886
9887 pci_restore_state(pdev);
9888
9889 bnx2x_set_power_state(bp, PCI_D0);
9890 netif_device_attach(dev);
9891
9892 rc = bnx2x_nic_load(bp, 0);
9893 if (rc)
9894 return rc;
9895
9896 return 0;
9897}
9898
9899static struct pci_driver bnx2x_pci_driver = {
9900 .name = DRV_MODULE_NAME,
9901 .id_table = bnx2x_pci_tbl,
9902 .probe = bnx2x_init_one,
9903 .remove = __devexit_p(bnx2x_remove_one),
9904 .suspend = bnx2x_suspend,
9905 .resume = bnx2x_resume,
9906};
9907
9908static int __init bnx2x_init(void)
9909{
9910 return pci_register_driver(&bnx2x_pci_driver);
9911}
9912
9913static void __exit bnx2x_cleanup(void)
9914{
9915 pci_unregister_driver(&bnx2x_pci_driver);
9916}
9917
9918module_init(bnx2x_init);
9919module_exit(bnx2x_cleanup);
9920