]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x.c
[BNX2X]: Correct RX filtering and MC configuration
[net-next-2.6.git] / drivers / net / bnx2x.c
CommitLineData
a2fbb9ea
ET
1/* bnx2x.c: Broadcom Everest network driver.
2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
c14423fe 13 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
14 *
15 */
16
17/* define this to make the driver freeze on error
18 * to allow getting debug info
c14423fe 19 * (you will need to reboot afterwards)
a2fbb9ea
ET
20 */
21/*#define BNX2X_STOP_ON_ERROR*/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/kernel.h>
26#include <linux/device.h> /* for dev_info() */
27#include <linux/timer.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/slab.h>
31#include <linux/vmalloc.h>
32#include <linux/interrupt.h>
33#include <linux/pci.h>
34#include <linux/init.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/dma-mapping.h>
39#include <linux/bitops.h>
40#include <linux/irq.h>
41#include <linux/delay.h>
42#include <asm/byteorder.h>
43#include <linux/time.h>
44#include <linux/ethtool.h>
45#include <linux/mii.h>
46#ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
48 #define BCM_VLAN 1
49#endif
50#include <net/ip.h>
51#include <net/tcp.h>
52#include <net/checksum.h>
53#include <linux/workqueue.h>
54#include <linux/crc32.h>
55#include <linux/prefetch.h>
56#include <linux/zlib.h>
57#include <linux/version.h>
58#include <linux/io.h>
59
60#include "bnx2x_reg.h"
61#include "bnx2x_fw_defs.h"
62#include "bnx2x_hsi.h"
63#include "bnx2x.h"
64#include "bnx2x_init.h"
65
66#define DRV_MODULE_VERSION "0.40.15"
67#define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $"
f1410647 68#define BNX2X_BC_VER 0x040200
a2fbb9ea
ET
69
70/* Time in jiffies before concluding the transmitter is hung. */
71#define TX_TIMEOUT (5*HZ)
72
53a10565 73static char version[] __devinitdata =
c14423fe 74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
a2fbb9ea
ET
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
f1410647 81MODULE_INFO(cvs_version, "$Revision: #404 $");
a2fbb9ea
ET
82
83static int use_inta;
84static int poll;
85static int onefunc;
86static int nomcp;
87static int debug;
88static int use_multi;
89
90module_param(use_inta, int, 0);
91module_param(poll, int, 0);
92module_param(onefunc, int, 0);
93module_param(debug, int, 0);
94MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95MODULE_PARM_DESC(poll, "use polling (for debug)");
96MODULE_PARM_DESC(onefunc, "enable only first function");
c14423fe
ET
97MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
99
100#ifdef BNX2X_MULTI
101module_param(use_multi, int, 0);
102MODULE_PARM_DESC(use_multi, "use per-CPU queues");
103#endif
104
105enum bnx2x_board_type {
106 BCM57710 = 0,
107};
108
109/* indexed by board_t, above */
53a10565 110static struct {
a2fbb9ea
ET
111 char *name;
112} board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" }
114};
115
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { 0 }
120};
121
122MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
123
124/****************************************************************************
125* General service functions
126****************************************************************************/
127
128/* used only at init
129 * locking is done by mcp
130 */
131static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
132{
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
137}
138
139#ifdef BNX2X_IND_RD
140static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
141{
142 u32 val;
143
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
148
149 return val;
150}
151#endif
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171/* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
177static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178 u32 dst_addr, u32 len32)
179{
180 struct dmae_command *dmae = &bp->dmae;
181 int port = bp->port;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183 int timeout = 200;
184
185 memset(dmae, 0, sizeof(struct dmae_command));
186
187 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
190#ifdef __BIG_ENDIAN
191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
192#else
193 DMAE_CMD_ENDIANITY_DW_SWAP |
194#endif
195 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196 dmae->src_addr_lo = U64_LO(dma_addr);
197 dmae->src_addr_hi = U64_HI(dma_addr);
198 dmae->dst_addr_lo = dst_addr >> 2;
199 dmae->dst_addr_hi = 0;
200 dmae->len = len32;
201 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_val = BNX2X_WB_COMP_VAL;
204
205/*
206 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
208 "dst_addr [%x:%08x (%08x)]\n"
209 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
210 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
213*/
214/*
215 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
218*/
219
220 *wb_comp = 0;
221
222 bnx2x_post_dmae(bp, dmae, port * 8);
223
224 udelay(5);
225 /* adjust timeout for emulation/FPGA */
226 if (CHIP_REV_IS_SLOW(bp))
227 timeout *= 100;
228 while (*wb_comp != BNX2X_WB_COMP_VAL) {
229/* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
230 udelay(5);
231 if (!timeout) {
232 BNX2X_ERR("dmae timeout!\n");
233 break;
234 }
235 timeout--;
236 }
237}
238
239#ifdef BNX2X_DMAE_RD
240static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
241{
242 struct dmae_command *dmae = &bp->dmae;
243 int port = bp->port;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
245 int timeout = 200;
246
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
249
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
253#ifdef __BIG_ENDIAN
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
255#else
256 DMAE_CMD_ENDIANITY_DW_SWAP |
257#endif
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
263 dmae->len = len32;
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
267
268/*
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
276*/
277
278 *wb_comp = 0;
279
280 bnx2x_post_dmae(bp, dmae, port * 8);
281
282 udelay(5);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
284 udelay(5);
285 if (!timeout) {
286 BNX2X_ERR("dmae timeout!\n");
287 break;
288 }
289 timeout--;
290 }
291/*
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
295*/
296}
297#endif
298
299static int bnx2x_mc_assert(struct bnx2x *bp)
300{
49d66772 301 int i, j, rc = 0;
a2fbb9ea
ET
302 char last_idx;
303 const char storm[] = {"XTCU"};
304 const u32 intmem_base[] = {
305 BAR_XSTRORM_INTMEM,
306 BAR_TSTRORM_INTMEM,
307 BAR_CSTRORM_INTMEM,
308 BAR_USTRORM_INTMEM
309 };
310
311 /* Go through all instances of all SEMIs */
312 for (i = 0; i < 4; i++) {
313 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
314 intmem_base[i]);
49d66772
ET
315 if (last_idx)
316 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
317 storm[i], last_idx);
a2fbb9ea
ET
318
319 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321 u32 row0, row1, row2, row3;
322
323 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
324 intmem_base[i]);
325 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
326 intmem_base[i]);
327 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
328 intmem_base[i]);
329 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
330 intmem_base[i]);
331
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
49d66772 333 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
a2fbb9ea
ET
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0);
336 rc++;
337 } else {
338 break;
339 }
340 }
341 }
342 return rc;
343}
c14423fe 344
a2fbb9ea
ET
345static void bnx2x_fw_dump(struct bnx2x *bp)
346{
347 u32 mark, offset;
348 u32 data[9];
349 int word;
350
351 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
352 mark = ((mark + 0x3) & ~0x3);
353 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
354
355 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
356 for (word = 0; word < 8; word++)
357 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
358 offset + 4*word));
359 data[8] = 0x0;
49d66772 360 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
361 }
362 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
363 for (word = 0; word < 8; word++)
364 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
365 offset + 4*word));
366 data[8] = 0x0;
49d66772 367 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
368 }
369 printk("\n" KERN_ERR PFX "end of fw dump\n");
370}
371
372static void bnx2x_panic_dump(struct bnx2x *bp)
373{
374 int i;
375 u16 j, start, end;
376
377 BNX2X_ERR("begin crash dump -----------------\n");
378
379 for_each_queue(bp, i) {
380 struct bnx2x_fastpath *fp = &bp->fp[i];
381 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
382
383 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
384 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
385 " *rx_cons_sb(%x) rx_comp_prod(%x)"
386 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
387 " bd data(%x,%x)\n",
388 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
389 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
390 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
391 fp->fp_u_idx, hw_prods->packets_prod,
392 hw_prods->bds_prod);
393
394 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
395 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
396 for (j = start; j < end; j++) {
397 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
398
399 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
400 sw_bd->skb, sw_bd->first_bd);
401 }
402
403 start = TX_BD(fp->tx_bd_cons - 10);
404 end = TX_BD(fp->tx_bd_cons + 254);
405 for (j = start; j < end; j++) {
406 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
407
408 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
409 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
410 }
411
412 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
413 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
414 for (j = start; j < end; j++) {
415 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
416 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
417
418 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
419 j, rx_bd[0], rx_bd[1], sw_bd->skb);
420 }
421
422 start = RCQ_BD(fp->rx_comp_cons - 10);
423 end = RCQ_BD(fp->rx_comp_cons + 503);
424 for (j = start; j < end; j++) {
425 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
426
427 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
428 j, cqe[0], cqe[1], cqe[2], cqe[3]);
429 }
430 }
431
49d66772
ET
432 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
433 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 434 " spq_prod_idx(%u)\n",
49d66772 435 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
436 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
437
438
439 bnx2x_mc_assert(bp);
440 BNX2X_ERR("end crash dump -----------------\n");
441
442 bp->stats_state = STATS_STATE_DISABLE;
443 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
444}
445
446static void bnx2x_enable_int(struct bnx2x *bp)
447{
448 int port = bp->port;
449 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
450 u32 val = REG_RD(bp, addr);
451 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
452
453 if (msix) {
454 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
455 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
456 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
457 } else {
458 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
459 HC_CONFIG_0_REG_INT_LINE_EN_0 |
460 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
461 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
462 }
463
464 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) msi %d\n",
465 val, port, addr, msix);
466
467 REG_WR(bp, addr, val);
468}
469
470static void bnx2x_disable_int(struct bnx2x *bp)
471{
472 int port = bp->port;
473 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
474 u32 val = REG_RD(bp, addr);
475
476 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
477 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
478 HC_CONFIG_0_REG_INT_LINE_EN_0 |
479 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
480
481 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
482 val, port, addr);
483
484 REG_WR(bp, addr, val);
485 if (REG_RD(bp, addr) != val)
486 BNX2X_ERR("BUG! proper val not read from IGU!\n");
487}
488
489static void bnx2x_disable_int_sync(struct bnx2x *bp)
490{
491
492 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
493 int i;
494
495 atomic_inc(&bp->intr_sem);
c14423fe 496 /* prevent the HW from sending interrupts */
a2fbb9ea
ET
497 bnx2x_disable_int(bp);
498
499 /* make sure all ISRs are done */
500 if (msix) {
501 for_each_queue(bp, i)
502 synchronize_irq(bp->msix_table[i].vector);
503
504 /* one more for the Slow Path IRQ */
505 synchronize_irq(bp->msix_table[i].vector);
506 } else
507 synchronize_irq(bp->pdev->irq);
508
509 /* make sure sp_task is not running */
510 cancel_work_sync(&bp->sp_task);
511
512}
513
514/* fast path code */
515
516/*
517 * general service functions
518 */
519
520static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
521 u8 storm, u16 index, u8 op, u8 update)
522{
523 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
524 struct igu_ack_register igu_ack;
525
526 igu_ack.status_block_index = index;
527 igu_ack.sb_id_and_flags =
528 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
529 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
530 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
531 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
532
533/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
534 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
535 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
536}
537
538static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
539{
540 struct host_status_block *fpsb = fp->status_blk;
541 u16 rc = 0;
542
543 barrier(); /* status block is written to by the chip */
544 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
545 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
546 rc |= 1;
547 }
548 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
549 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
550 rc |= 2;
551 }
552 return rc;
553}
554
555static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
556{
557 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
558
559 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
560 rx_cons_sb++;
561
562 if ((rx_cons_sb != fp->rx_comp_cons) ||
563 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
564 return 1;
565
566 return 0;
567}
568
569static u16 bnx2x_ack_int(struct bnx2x *bp)
570{
571 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
572 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
573
574/* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
575 result, BAR_IGU_INTMEM + igu_addr); */
576
577#ifdef IGU_DEBUG
578#warning IGU_DEBUG active
579 if (result == 0) {
580 BNX2X_ERR("read %x from IGU\n", result);
581 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
582 }
583#endif
584 return result;
585}
586
587
588/*
589 * fast path service functions
590 */
591
592/* free skb in the packet ring at pos idx
593 * return idx of last bd freed
594 */
595static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
596 u16 idx)
597{
598 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
599 struct eth_tx_bd *tx_bd;
600 struct sk_buff *skb = tx_buf->skb;
601 u16 bd_idx = tx_buf->first_bd;
602 int nbd;
603
604 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
605 idx, tx_buf, skb);
606
607 /* unmap first bd */
608 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
609 tx_bd = &fp->tx_desc_ring[bd_idx];
610 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
611 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
612
613 nbd = le16_to_cpu(tx_bd->nbd) - 1;
614#ifdef BNX2X_STOP_ON_ERROR
615 if (nbd > (MAX_SKB_FRAGS + 2)) {
616 BNX2X_ERR("bad nbd!\n");
617 bnx2x_panic();
618 }
619#endif
620
621 /* Skip a parse bd and the TSO split header bd
622 since they have no mapping */
623 if (nbd)
624 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
625
626 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
627 ETH_TX_BD_FLAGS_TCP_CSUM |
628 ETH_TX_BD_FLAGS_SW_LSO)) {
629 if (--nbd)
630 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
631 tx_bd = &fp->tx_desc_ring[bd_idx];
632 /* is this a TSO split header bd? */
633 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
634 if (--nbd)
635 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
636 }
637 }
638
639 /* now free frags */
640 while (nbd > 0) {
641
642 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
643 tx_bd = &fp->tx_desc_ring[bd_idx];
644 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
645 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
646 if (--nbd)
647 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
648 }
649
650 /* release skb */
651 BUG_TRAP(skb);
652 dev_kfree_skb(skb);
653 tx_buf->first_bd = 0;
654 tx_buf->skb = NULL;
655
656 return bd_idx;
657}
658
659static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
660{
661 u16 used;
662 u32 prod;
663 u32 cons;
664
665 /* Tell compiler that prod and cons can change */
666 barrier();
667 prod = fp->tx_bd_prod;
668 cons = fp->tx_bd_cons;
669
670 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
671 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
672
673 if (prod >= cons) {
674 /* used = prod - cons - prod/size + cons/size */
675 used -= NUM_TX_BD - NUM_TX_RINGS;
676 }
677
678 BUG_TRAP(used <= fp->bp->tx_ring_size);
679 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
680
681 return (fp->bp->tx_ring_size - used);
682}
683
684static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
685{
686 struct bnx2x *bp = fp->bp;
687 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
688 int done = 0;
689
690#ifdef BNX2X_STOP_ON_ERROR
691 if (unlikely(bp->panic))
692 return;
693#endif
694
695 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
696 sw_cons = fp->tx_pkt_cons;
697
698 while (sw_cons != hw_cons) {
699 u16 pkt_cons;
700
701 pkt_cons = TX_BD(sw_cons);
702
703 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
704
705 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
706 hw_cons, sw_cons, pkt_cons);
707
708/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
709 rmb();
710 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
711 }
712*/
713 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
714 sw_cons++;
715 done++;
716
717 if (done == work)
718 break;
719 }
720
721 fp->tx_pkt_cons = sw_cons;
722 fp->tx_bd_cons = bd_cons;
723
724 /* Need to make the tx_cons update visible to start_xmit()
725 * before checking for netif_queue_stopped(). Without the
726 * memory barrier, there is a small possibility that start_xmit()
727 * will miss it and cause the queue to be stopped forever.
728 */
729 smp_mb();
730
731 /* TBD need a thresh? */
732 if (unlikely(netif_queue_stopped(bp->dev))) {
733
734 netif_tx_lock(bp->dev);
735
736 if (netif_queue_stopped(bp->dev) &&
737 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
738 netif_wake_queue(bp->dev);
739
740 netif_tx_unlock(bp->dev);
741
742 }
743}
744
745static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
746 union eth_rx_cqe *rr_cqe)
747{
748 struct bnx2x *bp = fp->bp;
749 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
750 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
751
752 DP(NETIF_MSG_RX_STATUS,
753 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
754 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
755
756 bp->spq_left++;
757
758 if (fp->index) {
759 switch (command | fp->state) {
760 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
761 BNX2X_FP_STATE_OPENING):
762 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
763 cid);
764 fp->state = BNX2X_FP_STATE_OPEN;
765 break;
766
767 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
768 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
769 cid);
770 fp->state = BNX2X_FP_STATE_HALTED;
771 break;
772
773 default:
774 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
775 command, fp->state);
776 }
777 mb(); /* force bnx2x_wait_ramrod to see the change */
778 return;
779 }
c14423fe 780
a2fbb9ea
ET
781 switch (command | bp->state) {
782 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
783 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
784 bp->state = BNX2X_STATE_OPEN;
785 break;
786
787 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
788 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
789 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
790 fp->state = BNX2X_FP_STATE_HALTED;
791 break;
792
a2fbb9ea 793 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
49d66772
ET
794 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
795 cid);
796 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
797 break;
798
799 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
800 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
801 break;
802
49d66772
ET
803 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
804 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
805 break;
806
a2fbb9ea
ET
807 default:
808 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
809 command, bp->state);
810 }
811
812 mb(); /* force bnx2x_wait_ramrod to see the change */
813}
814
815static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
816 struct bnx2x_fastpath *fp, u16 index)
817{
818 struct sk_buff *skb;
819 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
820 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
821 dma_addr_t mapping;
822
823 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
824 if (unlikely(skb == NULL))
825 return -ENOMEM;
826
827 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
828 PCI_DMA_FROMDEVICE);
829 if (unlikely(dma_mapping_error(mapping))) {
830
831 dev_kfree_skb(skb);
832 return -ENOMEM;
833 }
834
835 rx_buf->skb = skb;
836 pci_unmap_addr_set(rx_buf, mapping, mapping);
837
838 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
839 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
840
841 return 0;
842}
843
844/* note that we are not allocating a new skb,
845 * we are just moving one from cons to prod
846 * we are not creating a new mapping,
847 * so there is no need to check for dma_mapping_error().
848 */
849static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
850 struct sk_buff *skb, u16 cons, u16 prod)
851{
852 struct bnx2x *bp = fp->bp;
853 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
854 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
855 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
856 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
857
858 pci_dma_sync_single_for_device(bp->pdev,
859 pci_unmap_addr(cons_rx_buf, mapping),
860 bp->rx_offset + RX_COPY_THRESH,
861 PCI_DMA_FROMDEVICE);
862
863 prod_rx_buf->skb = cons_rx_buf->skb;
864 pci_unmap_addr_set(prod_rx_buf, mapping,
865 pci_unmap_addr(cons_rx_buf, mapping));
866 *prod_bd = *cons_bd;
867}
868
869static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
870{
871 struct bnx2x *bp = fp->bp;
872 u16 bd_cons, bd_prod, comp_ring_cons;
873 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
874 int rx_pkt = 0;
875
876#ifdef BNX2X_STOP_ON_ERROR
877 if (unlikely(bp->panic))
878 return 0;
879#endif
880
881 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
882 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
883 hw_comp_cons++;
884
885 bd_cons = fp->rx_bd_cons;
886 bd_prod = fp->rx_bd_prod;
887 sw_comp_cons = fp->rx_comp_cons;
888 sw_comp_prod = fp->rx_comp_prod;
889
890 /* Memory barrier necessary as speculative reads of the rx
891 * buffer can be ahead of the index in the status block
892 */
893 rmb();
894
895 DP(NETIF_MSG_RX_STATUS,
896 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
897 fp->index, hw_comp_cons, sw_comp_cons);
898
899 while (sw_comp_cons != hw_comp_cons) {
900 unsigned int len, pad;
901 struct sw_rx_bd *rx_buf;
902 struct sk_buff *skb;
903 union eth_rx_cqe *cqe;
904
905 comp_ring_cons = RCQ_BD(sw_comp_cons);
906 bd_prod = RX_BD(bd_prod);
907 bd_cons = RX_BD(bd_cons);
908
909 cqe = &fp->rx_comp_ring[comp_ring_cons];
910
911 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
912 " comp_ring (%u) bd_ring (%u,%u)\n",
913 hw_comp_cons, sw_comp_cons,
914 comp_ring_cons, bd_prod, bd_cons);
915 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
916 " queue %x vlan %x len %x\n",
917 cqe->fast_path_cqe.type,
918 cqe->fast_path_cqe.error_type_flags,
919 cqe->fast_path_cqe.status_flags,
920 cqe->fast_path_cqe.rss_hash_result,
921 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
922
923 /* is this a slowpath msg? */
924 if (unlikely(cqe->fast_path_cqe.type)) {
925 bnx2x_sp_event(fp, cqe);
926 goto next_cqe;
927
928 /* this is an rx packet */
929 } else {
930 rx_buf = &fp->rx_buf_ring[bd_cons];
931 skb = rx_buf->skb;
932
933 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
934 pad = cqe->fast_path_cqe.placement_offset;
935
936 pci_dma_sync_single_for_device(bp->pdev,
937 pci_unmap_addr(rx_buf, mapping),
938 pad + RX_COPY_THRESH,
939 PCI_DMA_FROMDEVICE);
940 prefetch(skb);
941 prefetch(((char *)(skb)) + 128);
942
943 /* is this an error packet? */
944 if (unlikely(cqe->fast_path_cqe.error_type_flags &
945 ETH_RX_ERROR_FALGS)) {
946 /* do we sometimes forward error packets anyway? */
947 DP(NETIF_MSG_RX_ERR,
948 "ERROR flags(%u) Rx packet(%u)\n",
949 cqe->fast_path_cqe.error_type_flags,
950 sw_comp_cons);
951 /* TBD make sure MC counts this as a drop */
952 goto reuse_rx;
953 }
954
955 /* Since we don't have a jumbo ring
956 * copy small packets if mtu > 1500
957 */
958 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
959 (len <= RX_COPY_THRESH)) {
960 struct sk_buff *new_skb;
961
962 new_skb = netdev_alloc_skb(bp->dev,
963 len + pad);
964 if (new_skb == NULL) {
965 DP(NETIF_MSG_RX_ERR,
966 "ERROR packet dropped "
967 "because of alloc failure\n");
968 /* TBD count this as a drop? */
969 goto reuse_rx;
970 }
971
972 /* aligned copy */
973 skb_copy_from_linear_data_offset(skb, pad,
974 new_skb->data + pad, len);
975 skb_reserve(new_skb, pad);
976 skb_put(new_skb, len);
977
978 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
979
980 skb = new_skb;
981
982 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
983 pci_unmap_single(bp->pdev,
984 pci_unmap_addr(rx_buf, mapping),
985 bp->rx_buf_use_size,
986 PCI_DMA_FROMDEVICE);
987 skb_reserve(skb, pad);
988 skb_put(skb, len);
989
990 } else {
991 DP(NETIF_MSG_RX_ERR,
992 "ERROR packet dropped because "
993 "of alloc failure\n");
994reuse_rx:
995 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
996 goto next_rx;
997 }
998
999 skb->protocol = eth_type_trans(skb, bp->dev);
1000
1001 skb->ip_summed = CHECKSUM_NONE;
1002 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1003 skb->ip_summed = CHECKSUM_UNNECESSARY;
1004
1005 /* TBD do we pass bad csum packets in promisc */
1006 }
1007
1008#ifdef BCM_VLAN
1009 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1010 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1011 && (bp->vlgrp != NULL))
1012 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1013 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1014 else
1015#endif
1016 netif_receive_skb(skb);
1017
1018 bp->dev->last_rx = jiffies;
1019
1020next_rx:
1021 rx_buf->skb = NULL;
1022
1023 bd_cons = NEXT_RX_IDX(bd_cons);
1024 bd_prod = NEXT_RX_IDX(bd_prod);
1025next_cqe:
1026 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1027 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1028 rx_pkt++;
1029
1030 if ((rx_pkt == budget))
1031 break;
1032 } /* while */
1033
1034 fp->rx_bd_cons = bd_cons;
1035 fp->rx_bd_prod = bd_prod;
1036 fp->rx_comp_cons = sw_comp_cons;
1037 fp->rx_comp_prod = sw_comp_prod;
1038
1039 REG_WR(bp, BAR_TSTRORM_INTMEM +
1040 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1041
1042 mmiowb(); /* keep prod updates ordered */
1043
1044 fp->rx_pkt += rx_pkt;
1045 fp->rx_calls++;
1046
1047 return rx_pkt;
1048}
1049
1050static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1051{
1052 struct bnx2x_fastpath *fp = fp_cookie;
1053 struct bnx2x *bp = fp->bp;
1054 struct net_device *dev = bp->dev;
1055 int index = fp->index;
1056
1057 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1058 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1059
1060#ifdef BNX2X_STOP_ON_ERROR
1061 if (unlikely(bp->panic))
1062 return IRQ_HANDLED;
1063#endif
1064
1065 prefetch(fp->rx_cons_sb);
1066 prefetch(fp->tx_cons_sb);
1067 prefetch(&fp->status_blk->c_status_block.status_block_index);
1068 prefetch(&fp->status_blk->u_status_block.status_block_index);
1069
1070 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1071 return IRQ_HANDLED;
1072}
1073
1074static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1075{
1076 struct net_device *dev = dev_instance;
1077 struct bnx2x *bp = netdev_priv(dev);
1078 u16 status = bnx2x_ack_int(bp);
1079
1080 if (unlikely(status == 0)) {
1081 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1082 return IRQ_NONE;
1083 }
1084
1085 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1086
1087#ifdef BNX2X_STOP_ON_ERROR
1088 if (unlikely(bp->panic))
1089 return IRQ_HANDLED;
1090#endif
1091
1092 /* Return here if interrupt is shared and is disabled */
1093 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1094 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1095 return IRQ_HANDLED;
1096 }
1097
1098 if (status & 0x2) {
1099 struct bnx2x_fastpath *fp = &bp->fp[0];
1100
1101 prefetch(fp->rx_cons_sb);
1102 prefetch(fp->tx_cons_sb);
1103 prefetch(&fp->status_blk->c_status_block.status_block_index);
1104 prefetch(&fp->status_blk->u_status_block.status_block_index);
1105
1106 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1107
1108 status &= ~0x2;
1109 if (!status)
1110 return IRQ_HANDLED;
1111 }
1112
1113 if (unlikely(status & 0x1)) {
1114
1115 schedule_work(&bp->sp_task);
1116
1117 status &= ~0x1;
1118 if (!status)
1119 return IRQ_HANDLED;
1120 }
1121
1122 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1123 status);
1124
1125 return IRQ_HANDLED;
1126}
1127
1128/* end of fast path */
1129
1130/* PHY/MAC */
1131
1132/*
1133 * General service functions
1134 */
1135
1136static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1137{
1138 int port = bp->port;
1139
1140 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1141 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1142 SHARED_HW_CFG_LED_MODE_SHIFT));
1143 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1144
1145 /* Set blinking rate to ~15.9Hz */
1146 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1147 LED_BLINK_RATE_VAL);
1148 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1149
1150 /* On Ax chip versions for speeds less than 10G
1151 LED scheme is different */
1152 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1153 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1154 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1155 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1156 }
1157}
1158
1159static void bnx2x_leds_unset(struct bnx2x *bp)
1160{
1161 int port = bp->port;
1162
1163 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1164 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1165}
1166
1167static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1168{
1169 u32 val = REG_RD(bp, reg);
1170
1171 val |= bits;
1172 REG_WR(bp, reg, val);
1173 return val;
1174}
1175
1176static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1177{
1178 u32 val = REG_RD(bp, reg);
1179
1180 val &= ~bits;
1181 REG_WR(bp, reg, val);
1182 return val;
1183}
1184
f1410647
ET
1185static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1186{
1187 u32 cnt;
1188 u32 lock_status;
1189 u32 resource_bit = (1 << resource);
1190 u8 func = bp->port;
1191
1192 /* Validating that the resource is within range */
1193 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1194 DP(NETIF_MSG_HW,
1195 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1196 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1197 return -EINVAL;
1198 }
1199
1200 /* Validating that the resource is not already taken */
1201 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1202 if (lock_status & resource_bit) {
1203 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1204 lock_status, resource_bit);
1205 return -EEXIST;
1206 }
1207
1208 /* Try for 1 second every 5ms */
1209 for (cnt = 0; cnt < 200; cnt++) {
1210 /* Try to acquire the lock */
1211 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1212 resource_bit);
1213 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1214 if (lock_status & resource_bit)
1215 return 0;
1216
1217 msleep(5);
1218 }
1219 DP(NETIF_MSG_HW, "Timeout\n");
1220 return -EAGAIN;
1221}
1222
1223static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1224{
1225 u32 lock_status;
1226 u32 resource_bit = (1 << resource);
1227 u8 func = bp->port;
1228
1229 /* Validating that the resource is within range */
1230 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1231 DP(NETIF_MSG_HW,
1232 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1233 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1234 return -EINVAL;
1235 }
1236
1237 /* Validating that the resource is currently taken */
1238 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1239 if (!(lock_status & resource_bit)) {
1240 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1241 lock_status, resource_bit);
1242 return -EFAULT;
1243 }
1244
1245 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1246 return 0;
1247}
1248
1249static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1250{
1251 /* The GPIO should be swapped if swap register is set and active */
1252 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1253 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1254 int gpio_shift = gpio_num +
1255 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1256 u32 gpio_mask = (1 << gpio_shift);
1257 u32 gpio_reg;
1258
1259 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1260 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1261 return -EINVAL;
1262 }
1263
1264 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1265 /* read GPIO and mask except the float bits */
1266 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1267
1268 switch (mode) {
1269 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1270 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1271 gpio_num, gpio_shift);
1272 /* clear FLOAT and set CLR */
1273 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1274 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1275 break;
1276
1277 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1278 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1279 gpio_num, gpio_shift);
1280 /* clear FLOAT and set SET */
1281 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1282 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1283 break;
1284
1285 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1286 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1287 gpio_num, gpio_shift);
1288 /* set FLOAT */
1289 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1290 break;
1291
1292 default:
1293 break;
1294 }
1295
1296 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1297 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1298
1299 return 0;
1300}
1301
1302static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1303{
1304 u32 spio_mask = (1 << spio_num);
1305 u32 spio_reg;
1306
1307 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1308 (spio_num > MISC_REGISTERS_SPIO_7)) {
1309 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1310 return -EINVAL;
1311 }
1312
1313 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1314 /* read SPIO and mask except the float bits */
1315 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1316
1317 switch (mode) {
1318 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1319 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1320 /* clear FLOAT and set CLR */
1321 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1322 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1323 break;
1324
1325 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1326 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1327 /* clear FLOAT and set SET */
1328 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1329 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1330 break;
1331
1332 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1333 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1334 /* set FLOAT */
1335 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1336 break;
1337
1338 default:
1339 break;
1340 }
1341
1342 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1343 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1344
1345 return 0;
1346}
1347
a2fbb9ea
ET
1348static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1349{
a2fbb9ea
ET
1350 int port = bp->port;
1351 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
f1410647
ET
1352 u32 tmp;
1353 int i, rc;
a2fbb9ea
ET
1354
1355/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1356 bp->phy_addr, reg, val); */
1357
1358 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1359
1360 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1361 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1362 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1363 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1364 udelay(40);
1365 }
1366
1367 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1368 (val & EMAC_MDIO_COMM_DATA) |
1369 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1370 EMAC_MDIO_COMM_START_BUSY);
1371 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1372
1373 for (i = 0; i < 50; i++) {
1374 udelay(10);
1375
1376 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1377 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1378 udelay(5);
1379 break;
1380 }
1381 }
1382
1383 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1384 BNX2X_ERR("write phy register failed\n");
1385
1386 rc = -EBUSY;
1387 } else {
1388 rc = 0;
1389 }
1390
1391 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1392
1393 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1394 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1395 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1396 }
1397
1398 return rc;
1399}
1400
1401static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1402{
1403 int port = bp->port;
1404 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
f1410647
ET
1405 u32 val;
1406 int i, rc;
a2fbb9ea
ET
1407
1408 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1409
1410 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1411 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1412 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1413 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1414 udelay(40);
1415 }
1416
1417 val = ((bp->phy_addr << 21) | (reg << 16) |
1418 EMAC_MDIO_COMM_COMMAND_READ_22 |
1419 EMAC_MDIO_COMM_START_BUSY);
1420 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1421
1422 for (i = 0; i < 50; i++) {
1423 udelay(10);
1424
1425 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1426 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1427 val &= EMAC_MDIO_COMM_DATA;
1428 break;
1429 }
1430 }
1431
1432 if (val & EMAC_MDIO_COMM_START_BUSY) {
1433 BNX2X_ERR("read phy register failed\n");
1434
1435 *ret_val = 0x0;
1436 rc = -EBUSY;
1437 } else {
1438 *ret_val = val;
1439 rc = 0;
1440 }
1441
1442 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1443
1444 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1445 val |= EMAC_MDIO_MODE_AUTO_POLL;
1446 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1447 }
1448
1449/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1450 bp->phy_addr, reg, *ret_val); */
1451
1452 return rc;
1453}
1454
f1410647
ET
1455static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1456 u32 phy_addr, u32 reg, u32 addr, u32 val)
a2fbb9ea 1457{
f1410647
ET
1458 u32 tmp;
1459 int i, rc = 0;
a2fbb9ea 1460
f1410647
ET
1461 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1462 * (a value of 49==0x31) and make sure that the AUTO poll is off
1463 */
1464 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1465 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1466 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1467 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1468 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1469 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1470 udelay(40);
a2fbb9ea
ET
1471
1472 /* address */
f1410647 1473 tmp = ((phy_addr << 21) | (reg << 16) | addr |
a2fbb9ea
ET
1474 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1475 EMAC_MDIO_COMM_START_BUSY);
f1410647 1476 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
a2fbb9ea
ET
1477
1478 for (i = 0; i < 50; i++) {
1479 udelay(10);
1480
f1410647 1481 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1482 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1483 udelay(5);
1484 break;
1485 }
1486 }
a2fbb9ea
ET
1487 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1488 BNX2X_ERR("write phy register failed\n");
1489
1490 rc = -EBUSY;
f1410647 1491
a2fbb9ea
ET
1492 } else {
1493 /* data */
f1410647 1494 tmp = ((phy_addr << 21) | (reg << 16) | val |
a2fbb9ea
ET
1495 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1496 EMAC_MDIO_COMM_START_BUSY);
f1410647 1497 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
a2fbb9ea
ET
1498
1499 for (i = 0; i < 50; i++) {
1500 udelay(10);
1501
f1410647 1502 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1503 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1504 udelay(5);
1505 break;
1506 }
1507 }
1508
1509 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1510 BNX2X_ERR("write phy register failed\n");
1511
1512 rc = -EBUSY;
1513 }
1514 }
1515
f1410647
ET
1516 /* unset clause 45 mode, set the MDIO clock to a faster value
1517 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1518 */
1519 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1520 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1521 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1522 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
a2fbb9ea 1523 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
f1410647 1524 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
a2fbb9ea
ET
1525
1526 return rc;
1527}
1528
f1410647
ET
1529static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1530 u32 addr, u32 val)
a2fbb9ea 1531{
f1410647 1532 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 1533
f1410647
ET
1534 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1535 reg, addr, val);
1536}
a2fbb9ea 1537
f1410647
ET
1538static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1539 u32 phy_addr, u32 reg, u32 addr,
1540 u32 *ret_val)
1541{
1542 u32 val;
1543 int i, rc = 0;
a2fbb9ea 1544
f1410647
ET
1545 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1546 * (a value of 49==0x31) and make sure that the AUTO poll is off
1547 */
1548 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1549 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1550 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1551 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1552 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1553 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1554 udelay(40);
a2fbb9ea
ET
1555
1556 /* address */
f1410647 1557 val = ((phy_addr << 21) | (reg << 16) | addr |
a2fbb9ea
ET
1558 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1559 EMAC_MDIO_COMM_START_BUSY);
f1410647 1560 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
a2fbb9ea
ET
1561
1562 for (i = 0; i < 50; i++) {
1563 udelay(10);
1564
f1410647 1565 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1566 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1567 udelay(5);
1568 break;
1569 }
1570 }
a2fbb9ea
ET
1571 if (val & EMAC_MDIO_COMM_START_BUSY) {
1572 BNX2X_ERR("read phy register failed\n");
1573
1574 *ret_val = 0;
1575 rc = -EBUSY;
f1410647 1576
a2fbb9ea
ET
1577 } else {
1578 /* data */
f1410647 1579 val = ((phy_addr << 21) | (reg << 16) |
a2fbb9ea
ET
1580 EMAC_MDIO_COMM_COMMAND_READ_45 |
1581 EMAC_MDIO_COMM_START_BUSY);
f1410647 1582 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
a2fbb9ea
ET
1583
1584 for (i = 0; i < 50; i++) {
1585 udelay(10);
1586
f1410647 1587 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
a2fbb9ea
ET
1588 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1589 val &= EMAC_MDIO_COMM_DATA;
1590 break;
1591 }
1592 }
1593
1594 if (val & EMAC_MDIO_COMM_START_BUSY) {
1595 BNX2X_ERR("read phy register failed\n");
1596
1597 val = 0;
1598 rc = -EBUSY;
1599 }
1600
1601 *ret_val = val;
1602 }
1603
f1410647
ET
1604 /* unset clause 45 mode, set the MDIO clock to a faster value
1605 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1606 */
1607 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1608 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1609 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1610 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
a2fbb9ea 1611 val |= EMAC_MDIO_MODE_AUTO_POLL;
f1410647 1612 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
a2fbb9ea
ET
1613
1614 return rc;
1615}
1616
f1410647
ET
1617static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1618 u32 addr, u32 *ret_val)
1619{
1620 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1621
1622 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1623 reg, addr, ret_val);
1624}
1625
1626static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1627 u32 addr, u32 val)
a2fbb9ea
ET
1628{
1629 int i;
1630 u32 rd_val;
1631
1632 might_sleep();
1633 for (i = 0; i < 10; i++) {
f1410647 1634 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
a2fbb9ea 1635 msleep(5);
f1410647 1636 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
a2fbb9ea
ET
1637 /* if the read value is not the same as the value we wrote,
1638 we should write it again */
1639 if (rd_val == val)
1640 return 0;
1641 }
1642 BNX2X_ERR("MDIO write in CL45 failed\n");
1643 return -EBUSY;
1644}
1645
1646/*
c14423fe 1647 * link management
a2fbb9ea
ET
1648 */
1649
f1410647
ET
1650static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1651{
1652 switch (pause_result) { /* ASYM P ASYM P */
1653 case 0xb: /* 1 0 1 1 */
1654 bp->flow_ctrl = FLOW_CTRL_TX;
1655 break;
1656
1657 case 0xe: /* 1 1 1 0 */
1658 bp->flow_ctrl = FLOW_CTRL_RX;
1659 break;
1660
1661 case 0x5: /* 0 1 0 1 */
1662 case 0x7: /* 0 1 1 1 */
1663 case 0xd: /* 1 1 0 1 */
1664 case 0xf: /* 1 1 1 1 */
1665 bp->flow_ctrl = FLOW_CTRL_BOTH;
1666 break;
1667
1668 default:
1669 break;
1670 }
1671}
1672
1673static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1674{
1675 u32 ext_phy_addr;
1676 u32 ld_pause; /* local */
1677 u32 lp_pause; /* link partner */
1678 u32 an_complete; /* AN complete */
1679 u32 pause_result;
1680 u8 ret = 0;
1681
1682 ext_phy_addr = ((bp->ext_phy_config &
1683 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1684 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1685
1686 /* read twice */
1687 bnx2x_mdio45_read(bp, ext_phy_addr,
1688 EXT_PHY_KR_AUTO_NEG_DEVAD,
1689 EXT_PHY_KR_STATUS, &an_complete);
1690 bnx2x_mdio45_read(bp, ext_phy_addr,
1691 EXT_PHY_KR_AUTO_NEG_DEVAD,
1692 EXT_PHY_KR_STATUS, &an_complete);
1693
1694 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1695 ret = 1;
1696 bnx2x_mdio45_read(bp, ext_phy_addr,
1697 EXT_PHY_KR_AUTO_NEG_DEVAD,
1698 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1699 bnx2x_mdio45_read(bp, ext_phy_addr,
1700 EXT_PHY_KR_AUTO_NEG_DEVAD,
1701 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1702 pause_result = (ld_pause &
1703 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1704 pause_result |= (lp_pause &
1705 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1706 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1707 pause_result);
1708 bnx2x_pause_resolve(bp, pause_result);
1709 }
1710 return ret;
1711}
1712
a2fbb9ea
ET
1713static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1714{
f1410647
ET
1715 u32 ld_pause; /* local driver */
1716 u32 lp_pause; /* link partner */
a2fbb9ea
ET
1717 u32 pause_result;
1718
1719 bp->flow_ctrl = 0;
1720
c14423fe 1721 /* resolve from gp_status in case of AN complete and not sgmii */
a2fbb9ea
ET
1722 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1723 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1724 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1725 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1726
1727 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1728 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1729 &ld_pause);
1730 bnx2x_mdio22_read(bp,
1731 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1732 &lp_pause);
1733 pause_result = (ld_pause &
1734 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1735 pause_result |= (lp_pause &
1736 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1737 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
f1410647
ET
1738 bnx2x_pause_resolve(bp, pause_result);
1739 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1740 !(bnx2x_ext_phy_resove_fc(bp))) {
1741 /* forced speed */
1742 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1743 switch (bp->req_flow_ctrl) {
1744 case FLOW_CTRL_AUTO:
1745 if (bp->dev->mtu <= 4500)
1746 bp->flow_ctrl = FLOW_CTRL_BOTH;
1747 else
1748 bp->flow_ctrl = FLOW_CTRL_TX;
1749 break;
a2fbb9ea 1750
f1410647
ET
1751 case FLOW_CTRL_TX:
1752 bp->flow_ctrl = FLOW_CTRL_TX;
1753 break;
a2fbb9ea 1754
f1410647
ET
1755 case FLOW_CTRL_RX:
1756 if (bp->dev->mtu <= 4500)
1757 bp->flow_ctrl = FLOW_CTRL_RX;
1758 break;
a2fbb9ea 1759
f1410647
ET
1760 case FLOW_CTRL_BOTH:
1761 if (bp->dev->mtu <= 4500)
1762 bp->flow_ctrl = FLOW_CTRL_BOTH;
1763 else
1764 bp->flow_ctrl = FLOW_CTRL_TX;
1765 break;
a2fbb9ea 1766
f1410647
ET
1767 case FLOW_CTRL_NONE:
1768 default:
1769 break;
1770 }
1771 } else { /* forced mode */
1772 switch (bp->req_flow_ctrl) {
1773 case FLOW_CTRL_AUTO:
1774 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1775 " req_autoneg 0x%x\n",
1776 bp->req_flow_ctrl, bp->req_autoneg);
1777 break;
a2fbb9ea 1778
f1410647
ET
1779 case FLOW_CTRL_TX:
1780 case FLOW_CTRL_RX:
1781 case FLOW_CTRL_BOTH:
1782 bp->flow_ctrl = bp->req_flow_ctrl;
1783 break;
a2fbb9ea 1784
f1410647
ET
1785 case FLOW_CTRL_NONE:
1786 default:
1787 break;
1788 }
a2fbb9ea
ET
1789 }
1790 }
1791 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1792}
1793
1794static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1795{
1796 bp->link_status = 0;
1797
1798 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
f1410647 1799 DP(NETIF_MSG_LINK, "phy link up\n");
a2fbb9ea 1800
f1410647 1801 bp->phy_link_up = 1;
a2fbb9ea
ET
1802 bp->link_status |= LINK_STATUS_LINK_UP;
1803
1804 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1805 bp->duplex = DUPLEX_FULL;
1806 else
1807 bp->duplex = DUPLEX_HALF;
1808
1809 bnx2x_flow_ctrl_resolve(bp, gp_status);
1810
1811 switch (gp_status & GP_STATUS_SPEED_MASK) {
1812 case GP_STATUS_10M:
1813 bp->line_speed = SPEED_10;
1814 if (bp->duplex == DUPLEX_FULL)
1815 bp->link_status |= LINK_10TFD;
1816 else
1817 bp->link_status |= LINK_10THD;
1818 break;
1819
1820 case GP_STATUS_100M:
1821 bp->line_speed = SPEED_100;
1822 if (bp->duplex == DUPLEX_FULL)
1823 bp->link_status |= LINK_100TXFD;
1824 else
1825 bp->link_status |= LINK_100TXHD;
1826 break;
1827
1828 case GP_STATUS_1G:
1829 case GP_STATUS_1G_KX:
1830 bp->line_speed = SPEED_1000;
1831 if (bp->duplex == DUPLEX_FULL)
1832 bp->link_status |= LINK_1000TFD;
1833 else
1834 bp->link_status |= LINK_1000THD;
1835 break;
1836
1837 case GP_STATUS_2_5G:
1838 bp->line_speed = SPEED_2500;
1839 if (bp->duplex == DUPLEX_FULL)
1840 bp->link_status |= LINK_2500TFD;
1841 else
1842 bp->link_status |= LINK_2500THD;
1843 break;
1844
1845 case GP_STATUS_5G:
1846 case GP_STATUS_6G:
1847 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1848 gp_status);
1849 break;
1850
1851 case GP_STATUS_10G_KX4:
1852 case GP_STATUS_10G_HIG:
1853 case GP_STATUS_10G_CX4:
1854 bp->line_speed = SPEED_10000;
1855 bp->link_status |= LINK_10GTFD;
1856 break;
1857
1858 case GP_STATUS_12G_HIG:
1859 bp->line_speed = SPEED_12000;
1860 bp->link_status |= LINK_12GTFD;
1861 break;
1862
1863 case GP_STATUS_12_5G:
1864 bp->line_speed = SPEED_12500;
1865 bp->link_status |= LINK_12_5GTFD;
1866 break;
1867
1868 case GP_STATUS_13G:
1869 bp->line_speed = SPEED_13000;
1870 bp->link_status |= LINK_13GTFD;
1871 break;
1872
1873 case GP_STATUS_15G:
1874 bp->line_speed = SPEED_15000;
1875 bp->link_status |= LINK_15GTFD;
1876 break;
1877
1878 case GP_STATUS_16G:
1879 bp->line_speed = SPEED_16000;
1880 bp->link_status |= LINK_16GTFD;
1881 break;
1882
1883 default:
1884 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1885 gp_status);
1886 break;
1887 }
1888
1889 bp->link_status |= LINK_STATUS_SERDES_LINK;
1890
1891 if (bp->req_autoneg & AUTONEG_SPEED) {
1892 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1893
1894 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1895 bp->link_status |=
1896 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1897
1898 if (bp->autoneg & AUTONEG_PARALLEL)
1899 bp->link_status |=
1900 LINK_STATUS_PARALLEL_DETECTION_USED;
1901 }
1902
1903 if (bp->flow_ctrl & FLOW_CTRL_TX)
1904 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1905
1906 if (bp->flow_ctrl & FLOW_CTRL_RX)
1907 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1908
1909 } else { /* link_down */
f1410647 1910 DP(NETIF_MSG_LINK, "phy link down\n");
a2fbb9ea 1911
f1410647 1912 bp->phy_link_up = 0;
a2fbb9ea
ET
1913
1914 bp->line_speed = 0;
1915 bp->duplex = DUPLEX_FULL;
1916 bp->flow_ctrl = 0;
1917 }
1918
f1410647 1919 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
a2fbb9ea
ET
1920 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1921 " link_status 0x%x\n",
f1410647
ET
1922 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1923 bp->flow_ctrl, bp->link_status);
a2fbb9ea
ET
1924}
1925
1926static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1927{
1928 int port = bp->port;
1929
1930 /* first reset all status
c14423fe 1931 * we assume only one line will be change at a time */
a2fbb9ea 1932 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647
ET
1933 (NIG_STATUS_XGXS0_LINK10G |
1934 NIG_STATUS_XGXS0_LINK_STATUS |
1935 NIG_STATUS_SERDES0_LINK_STATUS));
1936 if (bp->phy_link_up) {
a2fbb9ea
ET
1937 if (is_10g) {
1938 /* Disable the 10G link interrupt
1939 * by writing 1 to the status register
1940 */
f1410647 1941 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
a2fbb9ea
ET
1942 bnx2x_bits_en(bp,
1943 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647 1944 NIG_STATUS_XGXS0_LINK10G);
a2fbb9ea
ET
1945
1946 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1947 /* Disable the link interrupt
1948 * by writing 1 to the relevant lane
1949 * in the status register
1950 */
f1410647 1951 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
a2fbb9ea
ET
1952 bnx2x_bits_en(bp,
1953 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1954 ((1 << bp->ser_lane) <<
f1410647 1955 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
a2fbb9ea
ET
1956
1957 } else { /* SerDes */
f1410647 1958 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
a2fbb9ea
ET
1959 /* Disable the link interrupt
1960 * by writing 1 to the status register
1961 */
1962 bnx2x_bits_en(bp,
1963 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647 1964 NIG_STATUS_SERDES0_LINK_STATUS);
a2fbb9ea
ET
1965 }
1966
1967 } else { /* link_down */
1968 }
1969}
1970
1971static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1972{
1973 u32 ext_phy_type;
1974 u32 ext_phy_addr;
f1410647 1975 u32 val1 = 0, val2;
a2fbb9ea
ET
1976 u32 rx_sd, pcs_status;
1977
1978 if (bp->phy_flags & PHY_XGXS_FLAG) {
a2fbb9ea
ET
1979 ext_phy_addr = ((bp->ext_phy_config &
1980 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1981 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
a2fbb9ea
ET
1982
1983 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1984 switch (ext_phy_type) {
1985 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1986 DP(NETIF_MSG_LINK, "XGXS Direct\n");
f1410647 1987 val1 = 1;
a2fbb9ea
ET
1988 break;
1989
1990 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1991 DP(NETIF_MSG_LINK, "XGXS 8705\n");
f1410647
ET
1992 bnx2x_mdio45_read(bp, ext_phy_addr,
1993 EXT_PHY_OPT_WIS_DEVAD,
1994 EXT_PHY_OPT_LASI_STATUS, &val1);
1995 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
1996
1997 bnx2x_mdio45_read(bp, ext_phy_addr,
1998 EXT_PHY_OPT_WIS_DEVAD,
1999 EXT_PHY_OPT_LASI_STATUS, &val1);
2000 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2001
2002 bnx2x_mdio45_read(bp, ext_phy_addr,
2003 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 2004 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
f1410647
ET
2005 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2006 val1 = (rx_sd & 0x1);
a2fbb9ea
ET
2007 break;
2008
2009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2010 DP(NETIF_MSG_LINK, "XGXS 8706\n");
f1410647
ET
2011 bnx2x_mdio45_read(bp, ext_phy_addr,
2012 EXT_PHY_OPT_PMA_PMD_DEVAD,
2013 EXT_PHY_OPT_LASI_STATUS, &val1);
2014 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2015
2016 bnx2x_mdio45_read(bp, ext_phy_addr,
2017 EXT_PHY_OPT_PMA_PMD_DEVAD,
2018 EXT_PHY_OPT_LASI_STATUS, &val1);
2019 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2020
2021 bnx2x_mdio45_read(bp, ext_phy_addr,
2022 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 2023 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
f1410647
ET
2024 bnx2x_mdio45_read(bp, ext_phy_addr,
2025 EXT_PHY_OPT_PCS_DEVAD,
2026 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2027 bnx2x_mdio45_read(bp, ext_phy_addr,
2028 EXT_PHY_AUTO_NEG_DEVAD,
2029 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2030
a2fbb9ea 2031 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
f1410647
ET
2032 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2033 rx_sd, pcs_status, val2, (val2 & (1<<1)));
2034 /* link is up if both bit 0 of pmd_rx_sd and
2035 * bit 0 of pcs_status are set, or if the autoneg bit
2036 1 is set
2037 */
2038 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2039 break;
2040
2041 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2042 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2043
2044 /* clear the interrupt LASI status register */
2045 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2046 ext_phy_addr,
2047 EXT_PHY_KR_PCS_DEVAD,
2048 EXT_PHY_KR_LASI_STATUS, &val2);
2049 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2050 ext_phy_addr,
2051 EXT_PHY_KR_PCS_DEVAD,
2052 EXT_PHY_KR_LASI_STATUS, &val1);
2053 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2054 val2, val1);
2055 /* Check the LASI */
2056 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2057 ext_phy_addr,
2058 EXT_PHY_KR_PMA_PMD_DEVAD,
2059 0x9003, &val2);
2060 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2061 ext_phy_addr,
2062 EXT_PHY_KR_PMA_PMD_DEVAD,
2063 0x9003, &val1);
2064 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2065 val2, val1);
2066 /* Check the link status */
2067 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2068 ext_phy_addr,
2069 EXT_PHY_KR_PCS_DEVAD,
2070 EXT_PHY_KR_PCS_STATUS, &val2);
2071 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2072 /* Check the link status on 1.1.2 */
2073 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2074 ext_phy_addr,
2075 EXT_PHY_OPT_PMA_PMD_DEVAD,
2076 EXT_PHY_KR_STATUS, &val2);
2077 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2078 ext_phy_addr,
2079 EXT_PHY_OPT_PMA_PMD_DEVAD,
2080 EXT_PHY_KR_STATUS, &val1);
2081 DP(NETIF_MSG_LINK,
2082 "KR PMA status 0x%x->0x%x\n", val2, val1);
2083 val1 = ((val1 & 4) == 4);
2084 /* If 1G was requested assume the link is up */
2085 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2086 (bp->req_line_speed == SPEED_1000))
2087 val1 = 1;
2088 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2089 break;
2090
2091 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2092 bnx2x_mdio45_read(bp, ext_phy_addr,
2093 EXT_PHY_OPT_PMA_PMD_DEVAD,
2094 EXT_PHY_OPT_LASI_STATUS, &val2);
2095 bnx2x_mdio45_read(bp, ext_phy_addr,
2096 EXT_PHY_OPT_PMA_PMD_DEVAD,
2097 EXT_PHY_OPT_LASI_STATUS, &val1);
2098 DP(NETIF_MSG_LINK,
2099 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2100 bnx2x_mdio45_read(bp, ext_phy_addr,
2101 EXT_PHY_OPT_PMA_PMD_DEVAD,
2102 EXT_PHY_KR_STATUS, &val2);
2103 bnx2x_mdio45_read(bp, ext_phy_addr,
2104 EXT_PHY_OPT_PMA_PMD_DEVAD,
2105 EXT_PHY_KR_STATUS, &val1);
2106 DP(NETIF_MSG_LINK,
2107 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2108 val1 = ((val1 & 4) == 4);
2109 /* if link is up
2110 * print the AN outcome of the SFX7101 PHY
a2fbb9ea 2111 */
f1410647
ET
2112 if (val1) {
2113 bnx2x_mdio45_read(bp, ext_phy_addr,
2114 EXT_PHY_KR_AUTO_NEG_DEVAD,
2115 0x21, &val2);
2116 DP(NETIF_MSG_LINK,
2117 "SFX7101 AN status 0x%x->%s\n", val2,
2118 (val2 & (1<<14)) ? "Master" : "Slave");
2119 }
a2fbb9ea
ET
2120 break;
2121
2122 default:
2123 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2124 bp->ext_phy_config);
f1410647 2125 val1 = 0;
a2fbb9ea
ET
2126 break;
2127 }
a2fbb9ea
ET
2128
2129 } else { /* SerDes */
2130 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2131 switch (ext_phy_type) {
2132 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2133 DP(NETIF_MSG_LINK, "SerDes Direct\n");
f1410647 2134 val1 = 1;
a2fbb9ea
ET
2135 break;
2136
2137 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2138 DP(NETIF_MSG_LINK, "SerDes 5482\n");
f1410647 2139 val1 = 1;
a2fbb9ea
ET
2140 break;
2141
2142 default:
2143 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2144 bp->ext_phy_config);
f1410647 2145 val1 = 0;
a2fbb9ea
ET
2146 break;
2147 }
2148 }
2149
f1410647 2150 return val1;
a2fbb9ea
ET
2151}
2152
2153static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2154{
2155 int port = bp->port;
2156 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2157 NIG_REG_INGRESS_BMAC0_MEM;
2158 u32 wb_write[2];
2159 u32 val;
2160
c14423fe 2161 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
a2fbb9ea
ET
2162 /* reset and unreset the BigMac */
2163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2164 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2165 msleep(5);
2166 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2167 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2168
2169 /* enable access for bmac registers */
2170 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2171
2172 /* XGXS control */
2173 wb_write[0] = 0x3c;
2174 wb_write[1] = 0;
2175 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2176 wb_write, 2);
2177
2178 /* tx MAC SA */
2179 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2180 (bp->dev->dev_addr[3] << 16) |
2181 (bp->dev->dev_addr[4] << 8) |
2182 bp->dev->dev_addr[5]);
2183 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2184 bp->dev->dev_addr[1]);
2185 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2186 wb_write, 2);
2187
2188 /* tx control */
2189 val = 0xc0;
2190 if (bp->flow_ctrl & FLOW_CTRL_TX)
2191 val |= 0x800000;
2192 wb_write[0] = val;
2193 wb_write[1] = 0;
2194 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2195
2196 /* set tx mtu */
2197 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2198 wb_write[1] = 0;
2199 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2200
2201 /* mac control */
2202 val = 0x3;
2203 if (is_lb) {
2204 val |= 0x4;
2205 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2206 }
2207 wb_write[0] = val;
2208 wb_write[1] = 0;
2209 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2210 wb_write, 2);
2211
2212 /* rx control set to don't strip crc */
2213 val = 0x14;
2214 if (bp->flow_ctrl & FLOW_CTRL_RX)
2215 val |= 0x20;
2216 wb_write[0] = val;
2217 wb_write[1] = 0;
2218 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2219
2220 /* set rx mtu */
2221 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2222 wb_write[1] = 0;
2223 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2224
2225 /* set cnt max size */
2226 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2227 wb_write[1] = 0;
2228 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2229 wb_write, 2);
2230
2231 /* configure safc */
2232 wb_write[0] = 0x1000200;
2233 wb_write[1] = 0;
2234 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2235 wb_write, 2);
2236
2237 /* fix for emulation */
2238 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2239 wb_write[0] = 0xf000;
2240 wb_write[1] = 0;
2241 REG_WR_DMAE(bp,
2242 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2243 wb_write, 2);
2244 }
2245
2246 /* reset old bmac stats */
2247 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2248
2249 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2250
2251 /* select XGXS */
2252 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2253 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2254
2255 /* disable the NIG in/out to the emac */
2256 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2257 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2258 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2259
2260 /* enable the NIG in/out to the bmac */
2261 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2262
2263 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2264 val = 0;
2265 if (bp->flow_ctrl & FLOW_CTRL_TX)
2266 val = 1;
2267 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2268 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2269
2270 bp->phy_flags |= PHY_BMAC_FLAG;
2271
2272 bp->stats_state = STATS_STATE_ENABLE;
2273}
2274
f1410647
ET
2275static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2276{
2277 int port = bp->port;
2278 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2279 NIG_REG_INGRESS_BMAC0_MEM;
2280 u32 wb_write[2];
2281
2282 /* Only if the bmac is out of reset */
2283 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2284 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2285 /* Clear Rx Enable bit in BMAC_CONTROL register */
2286#ifdef BNX2X_DMAE_RD
2287 bnx2x_read_dmae(bp, bmac_addr +
2288 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2289 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2290 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2291#else
2292 wb_write[0] = REG_RD(bp,
2293 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2294 wb_write[1] = REG_RD(bp,
2295 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2296#endif
2297 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2298 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2299 wb_write, 2);
2300 msleep(1);
2301 }
2302}
2303
a2fbb9ea
ET
2304static void bnx2x_emac_enable(struct bnx2x *bp)
2305{
2306 int port = bp->port;
2307 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2308 u32 val;
2309 int timeout;
2310
c14423fe 2311 DP(NETIF_MSG_LINK, "enabling EMAC\n");
a2fbb9ea
ET
2312 /* reset and unreset the emac core */
2313 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2314 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2315 msleep(5);
2316 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2317 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2318
2319 /* enable emac and not bmac */
2320 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2321
2322 /* for paladium */
2323 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2324 /* Use lane 1 (of lanes 0-3) */
2325 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2326 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2327 }
2328 /* for fpga */
2329 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2330 /* Use lane 1 (of lanes 0-3) */
2331 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2332 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2333 }
2334 /* ASIC */
2335 else {
2336 if (bp->phy_flags & PHY_XGXS_FLAG) {
2337 DP(NETIF_MSG_LINK, "XGXS\n");
2338 /* select the master lanes (out of 0-3) */
2339 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2340 bp->ser_lane);
2341 /* select XGXS */
2342 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2343
2344 } else { /* SerDes */
2345 DP(NETIF_MSG_LINK, "SerDes\n");
2346 /* select SerDes */
2347 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2348 }
2349 }
2350
2351 /* enable emac */
2352 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2353
2354 /* init emac - use read-modify-write */
2355 /* self clear reset */
2356 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2357 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2358
2359 timeout = 200;
2360 while (val & EMAC_MODE_RESET) {
2361 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2362 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2363 if (!timeout) {
2364 BNX2X_ERR("EMAC timeout!\n");
2365 break;
2366 }
2367 timeout--;
2368 }
2369
2370 /* reset tx part */
2371 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2372
2373 timeout = 200;
2374 while (val & EMAC_TX_MODE_RESET) {
2375 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2376 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2377 if (!timeout) {
2378 BNX2X_ERR("EMAC timeout!\n");
2379 break;
2380 }
2381 timeout--;
2382 }
2383
2384 if (CHIP_REV_IS_SLOW(bp)) {
2385 /* config GMII mode */
2386 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2387 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2388
2389 } else { /* ASIC */
2390 /* pause enable/disable */
2391 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2392 EMAC_RX_MODE_FLOW_EN);
2393 if (bp->flow_ctrl & FLOW_CTRL_RX)
2394 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2395 EMAC_RX_MODE_FLOW_EN);
2396
2397 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2398 EMAC_TX_MODE_EXT_PAUSE_EN);
2399 if (bp->flow_ctrl & FLOW_CTRL_TX)
2400 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2401 EMAC_TX_MODE_EXT_PAUSE_EN);
2402 }
2403
c14423fe 2404 /* KEEP_VLAN_TAG, promiscuous */
a2fbb9ea
ET
2405 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2406 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2407 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2408
2409 /* identify magic packets */
2410 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2411 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2412
2413 /* enable emac for jumbo packets */
2414 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2415 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2416 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2417
2418 /* strip CRC */
2419 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2420
2421 val = ((bp->dev->dev_addr[0] << 8) |
2422 bp->dev->dev_addr[1]);
2423 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2424
2425 val = ((bp->dev->dev_addr[2] << 24) |
2426 (bp->dev->dev_addr[3] << 16) |
2427 (bp->dev->dev_addr[4] << 8) |
2428 bp->dev->dev_addr[5]);
2429 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2430
2431 /* disable the NIG in/out to the bmac */
2432 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2433 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2434 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2435
2436 /* enable the NIG in/out to the emac */
2437 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2438 val = 0;
2439 if (bp->flow_ctrl & FLOW_CTRL_TX)
2440 val = 1;
2441 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2442 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2443
2444 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2445 /* take the BigMac out of reset */
2446 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2447 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2448
2449 /* enable access for bmac registers */
2450 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2451 }
2452
2453 bp->phy_flags |= PHY_EMAC_FLAG;
2454
2455 bp->stats_state = STATS_STATE_ENABLE;
2456}
2457
2458static void bnx2x_emac_program(struct bnx2x *bp)
2459{
2460 u16 mode = 0;
2461 int port = bp->port;
2462
2463 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2464 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2465 (EMAC_MODE_25G_MODE |
2466 EMAC_MODE_PORT_MII_10M |
2467 EMAC_MODE_HALF_DUPLEX));
2468 switch (bp->line_speed) {
2469 case SPEED_10:
2470 mode |= EMAC_MODE_PORT_MII_10M;
2471 break;
2472
2473 case SPEED_100:
2474 mode |= EMAC_MODE_PORT_MII;
2475 break;
2476
2477 case SPEED_1000:
2478 mode |= EMAC_MODE_PORT_GMII;
2479 break;
2480
2481 case SPEED_2500:
2482 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2483 break;
2484
2485 default:
2486 /* 10G not valid for EMAC */
2487 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2488 break;
2489 }
2490
2491 if (bp->duplex == DUPLEX_HALF)
2492 mode |= EMAC_MODE_HALF_DUPLEX;
2493 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2494 mode);
2495
2496 bnx2x_leds_set(bp, bp->line_speed);
2497}
2498
2499static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2500{
2501 u32 lp_up2;
2502 u32 tx_driver;
2503
2504 /* read precomp */
2505 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2506 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2507
2508 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2509 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2510
2511 /* bits [10:7] at lp_up2, positioned at [15:12] */
2512 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2513 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2514 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2515
2516 if ((lp_up2 != 0) &&
2517 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2518 /* replace tx_driver bits [15:12] */
2519 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2520 tx_driver |= lp_up2;
2521 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2522 }
2523}
2524
2525static void bnx2x_pbf_update(struct bnx2x *bp)
2526{
2527 int port = bp->port;
2528 u32 init_crd, crd;
2529 u32 count = 1000;
2530 u32 pause = 0;
2531
a2fbb9ea
ET
2532 /* disable port */
2533 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2534
2535 /* wait for init credit */
2536 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2537 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2538 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2539
2540 while ((init_crd != crd) && count) {
2541 msleep(5);
2542
2543 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2544 count--;
2545 }
2546 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2547 if (init_crd != crd)
2548 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2549
2550 if (bp->flow_ctrl & FLOW_CTRL_RX)
2551 pause = 1;
2552 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2553 if (pause) {
2554 /* update threshold */
2555 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2556 /* update init credit */
2557 init_crd = 778; /* (800-18-4) */
2558
2559 } else {
2560 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2561
2562 /* update threshold */
2563 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2564 /* update init credit */
2565 switch (bp->line_speed) {
2566 case SPEED_10:
2567 case SPEED_100:
2568 case SPEED_1000:
2569 init_crd = thresh + 55 - 22;
2570 break;
2571
2572 case SPEED_2500:
2573 init_crd = thresh + 138 - 22;
2574 break;
2575
2576 case SPEED_10000:
2577 init_crd = thresh + 553 - 22;
2578 break;
2579
2580 default:
2581 BNX2X_ERR("Invalid line_speed 0x%x\n",
2582 bp->line_speed);
2583 break;
2584 }
2585 }
2586 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2587 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2588 bp->line_speed, init_crd);
2589
2590 /* probe the credit changes */
2591 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2592 msleep(5);
2593 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2594
2595 /* enable port */
2596 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2597}
2598
2599static void bnx2x_update_mng(struct bnx2x *bp)
2600{
2601 if (!nomcp)
f1410647 2602 SHMEM_WR(bp, port_mb[bp->port].link_status,
a2fbb9ea
ET
2603 bp->link_status);
2604}
2605
2606static void bnx2x_link_report(struct bnx2x *bp)
2607{
2608 if (bp->link_up) {
2609 netif_carrier_on(bp->dev);
2610 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2611
2612 printk("%d Mbps ", bp->line_speed);
2613
2614 if (bp->duplex == DUPLEX_FULL)
2615 printk("full duplex");
2616 else
2617 printk("half duplex");
2618
2619 if (bp->flow_ctrl) {
2620 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2621 printk(", receive ");
2622 if (bp->flow_ctrl & FLOW_CTRL_TX)
2623 printk("& transmit ");
2624 } else {
2625 printk(", transmit ");
2626 }
2627 printk("flow control ON");
2628 }
2629 printk("\n");
2630
2631 } else { /* link_down */
2632 netif_carrier_off(bp->dev);
2633 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2634 }
2635}
2636
2637static void bnx2x_link_up(struct bnx2x *bp)
2638{
2639 int port = bp->port;
2640
2641 /* PBF - link up */
2642 bnx2x_pbf_update(bp);
2643
2644 /* disable drain */
2645 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2646
2647 /* update shared memory */
2648 bnx2x_update_mng(bp);
2649
2650 /* indicate link up */
2651 bnx2x_link_report(bp);
2652}
2653
2654static void bnx2x_link_down(struct bnx2x *bp)
2655{
2656 int port = bp->port;
2657
2658 /* notify stats */
2659 if (bp->stats_state != STATS_STATE_DISABLE) {
2660 bp->stats_state = STATS_STATE_STOP;
2661 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2662 }
2663
f1410647 2664 /* indicate no mac active */
a2fbb9ea
ET
2665 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2666
f1410647
ET
2667 /* update shared memory */
2668 bnx2x_update_mng(bp);
a2fbb9ea 2669
a2fbb9ea
ET
2670 /* activate nig drain */
2671 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2672
f1410647
ET
2673 /* reset BigMac */
2674 bnx2x_bmac_rx_disable(bp);
2675 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2676 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
a2fbb9ea
ET
2677
2678 /* indicate link down */
2679 bnx2x_link_report(bp);
2680}
2681
2682static void bnx2x_init_mac_stats(struct bnx2x *bp);
2683
2684/* This function is called upon link interrupt */
2685static void bnx2x_link_update(struct bnx2x *bp)
2686{
a2fbb9ea
ET
2687 int port = bp->port;
2688 int i;
f1410647 2689 u32 gp_status;
a2fbb9ea
ET
2690 int link_10g;
2691
f1410647 2692 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
a2fbb9ea 2693 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
f1410647
ET
2694 " 10G %x, XGXS_LINK %x\n", port,
2695 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
a2fbb9ea
ET
2696 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2697 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2698 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2699 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2700 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2701 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2702 );
2703
2704 might_sleep();
2705 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2706 /* avoid fast toggling */
f1410647 2707 for (i = 0; i < 10; i++) {
a2fbb9ea
ET
2708 msleep(10);
2709 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2710 &gp_status);
2711 }
2712
2713 bnx2x_link_settings_status(bp, gp_status);
2714
2715 /* anything 10 and over uses the bmac */
2716 link_10g = ((bp->line_speed >= SPEED_10000) &&
2717 (bp->line_speed <= SPEED_16000));
2718
2719 bnx2x_link_int_ack(bp, link_10g);
2720
2721 /* link is up only if both local phy and external phy are up */
f1410647
ET
2722 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2723 if (bp->link_up) {
a2fbb9ea
ET
2724 if (link_10g) {
2725 bnx2x_bmac_enable(bp, 0);
2726 bnx2x_leds_set(bp, SPEED_10000);
2727
2728 } else {
2729 bnx2x_emac_enable(bp);
2730 bnx2x_emac_program(bp);
2731
2732 /* AN complete? */
2733 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2734 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2735 bnx2x_set_sgmii_tx_driver(bp);
2736 }
2737 }
2738 bnx2x_link_up(bp);
2739
2740 } else { /* link down */
2741 bnx2x_leds_unset(bp);
2742 bnx2x_link_down(bp);
2743 }
2744
2745 bnx2x_init_mac_stats(bp);
2746}
2747
2748/*
2749 * Init service functions
2750 */
2751
2752static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2753{
2754 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2755 (bp->phy_addr + bp->ser_lane) : 0;
2756
2757 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2758 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2759}
2760
2761static void bnx2x_set_master_ln(struct bnx2x *bp)
2762{
2763 u32 new_master_ln;
2764
2765 /* set the master_ln for AN */
2766 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2767 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2768 &new_master_ln);
2769 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2770 (new_master_ln | bp->ser_lane));
2771}
2772
2773static void bnx2x_reset_unicore(struct bnx2x *bp)
2774{
2775 u32 mii_control;
2776 int i;
2777
2778 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2779 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2780 /* reset the unicore */
2781 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2782 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2783
2784 /* wait for the reset to self clear */
2785 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2786 udelay(5);
2787
2788 /* the reset erased the previous bank value */
2789 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2790 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2791 &mii_control);
2792
2793 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2794 udelay(5);
2795 return;
2796 }
2797 }
2798
f1410647
ET
2799 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2800 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2801 bp->phy_addr);
a2fbb9ea
ET
2802}
2803
2804static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2805{
2806 /* Each two bits represents a lane number:
2807 No swap is 0123 => 0x1b no need to enable the swap */
2808
2809 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2810 if (bp->rx_lane_swap != 0x1b) {
2811 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2812 (bp->rx_lane_swap |
2813 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2814 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2815 } else {
2816 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2817 }
2818
2819 if (bp->tx_lane_swap != 0x1b) {
2820 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2821 (bp->tx_lane_swap |
2822 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2823 } else {
2824 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2825 }
2826}
2827
2828static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2829{
2830 u32 control2;
2831
2832 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2833 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2834 &control2);
2835
2836 if (bp->autoneg & AUTONEG_PARALLEL) {
2837 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2838 } else {
2839 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2840 }
2841 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2842 control2);
2843
2844 if (bp->phy_flags & PHY_XGXS_FLAG) {
2845 DP(NETIF_MSG_LINK, "XGXS\n");
2846 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2847
2848 bnx2x_mdio22_write(bp,
f1410647 2849 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
a2fbb9ea
ET
2850 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2851
2852 bnx2x_mdio22_read(bp,
f1410647
ET
2853 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2854 &control2);
a2fbb9ea
ET
2855
2856 if (bp->autoneg & AUTONEG_PARALLEL) {
2857 control2 |=
2858 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2859 } else {
2860 control2 &=
2861 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2862 }
2863 bnx2x_mdio22_write(bp,
f1410647
ET
2864 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2865 control2);
2866
2867 /* Disable parallel detection of HiG */
2868 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2869 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2870 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2871 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
a2fbb9ea
ET
2872 }
2873}
2874
2875static void bnx2x_set_autoneg(struct bnx2x *bp)
2876{
2877 u32 reg_val;
2878
2879 /* CL37 Autoneg */
2880 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2881 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2882 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2883 (bp->autoneg & AUTONEG_CL37)) {
2884 /* CL37 Autoneg Enabled */
2885 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2886 } else {
2887 /* CL37 Autoneg Disabled */
2888 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2889 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2890 }
2891 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2892
2893 /* Enable/Disable Autodetection */
2894 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2895 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
2896 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2897
2898 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2899 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2900 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2901 } else {
2902 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2903 }
2904 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2905
2906 /* Enable TetonII and BAM autoneg */
2907 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2908 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2909 &reg_val);
2910 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2911 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2912 /* Enable BAM aneg Mode and TetonII aneg Mode */
2913 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2914 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2915 } else {
2916 /* TetonII and BAM Autoneg Disabled */
2917 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2918 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2919 }
2920 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2921 reg_val);
2922
2923 /* Enable Clause 73 Aneg */
2924 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2925 (bp->autoneg & AUTONEG_CL73)) {
2926 /* Enable BAM Station Manager */
2927 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2928 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2929 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2930 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2931 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2932
2933 /* Merge CL73 and CL37 aneg resolution */
2934 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2935 &reg_val);
2936 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2937 (reg_val |
2938 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2939
2940 /* Set the CL73 AN speed */
2941 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2942 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, &reg_val);
2943 /* In the SerDes we support only the 1G.
2944 In the XGXS we support the 10G KX4
2945 but we currently do not support the KR */
2946 if (bp->phy_flags & PHY_XGXS_FLAG) {
2947 DP(NETIF_MSG_LINK, "XGXS\n");
2948 /* 10G KX4 */
2949 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2950 } else {
2951 DP(NETIF_MSG_LINK, "SerDes\n");
2952 /* 1000M KX */
2953 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2954 }
2955 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2956
2957 /* CL73 Autoneg Enabled */
2958 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2959 } else {
2960 /* CL73 Autoneg Disabled */
2961 reg_val = 0;
2962 }
2963 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2964 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2965}
2966
2967/* program SerDes, forced speed */
2968static void bnx2x_program_serdes(struct bnx2x *bp)
2969{
2970 u32 reg_val;
2971
2972 /* program duplex, disable autoneg */
2973 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2974 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2975 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2976 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2977 if (bp->req_duplex == DUPLEX_FULL)
2978 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2979 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2980
2981 /* program speed
2982 - needed only if the speed is greater than 1G (2.5G or 10G) */
2983 if (bp->req_line_speed > SPEED_1000) {
2984 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2985 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2986 /* clearing the speed value before setting the right speed */
2987 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2988 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2989 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2990 if (bp->req_line_speed == SPEED_10000)
2991 reg_val |=
2992 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
2993 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
2994 }
2995}
2996
2997static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
2998{
2999 u32 val = 0;
3000
3001 /* configure the 48 bits for BAM AN */
3002 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3003
3004 /* set extended capabilities */
f1410647 3005 if (bp->advertising & ADVERTISED_2500baseX_Full)
a2fbb9ea
ET
3006 val |= MDIO_OVER_1G_UP1_2_5G;
3007 if (bp->advertising & ADVERTISED_10000baseT_Full)
3008 val |= MDIO_OVER_1G_UP1_10G;
3009 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3010
3011 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3012}
3013
3014static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3015{
3016 u32 an_adv;
3017
3018 /* for AN, we are always publishing full duplex */
3019 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3020
f1410647
ET
3021 /* resolve pause mode and advertisement
3022 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3023 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3024 switch (bp->req_flow_ctrl) {
3025 case FLOW_CTRL_AUTO:
3026 if (bp->dev->mtu <= 4500) {
3027 an_adv |=
3028 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3029 bp->advertising |= (ADVERTISED_Pause |
3030 ADVERTISED_Asym_Pause);
3031 } else {
3032 an_adv |=
3033 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3034 bp->advertising |= ADVERTISED_Asym_Pause;
3035 }
3036 break;
3037
3038 case FLOW_CTRL_TX:
3039 an_adv |=
3040 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3041 bp->advertising |= ADVERTISED_Asym_Pause;
3042 break;
3043
3044 case FLOW_CTRL_RX:
3045 if (bp->dev->mtu <= 4500) {
3046 an_adv |=
3047 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3048 bp->advertising |= (ADVERTISED_Pause |
3049 ADVERTISED_Asym_Pause);
3050 } else {
3051 an_adv |=
3052 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3053 bp->advertising &= ~(ADVERTISED_Pause |
3054 ADVERTISED_Asym_Pause);
3055 }
3056 break;
3057
3058 case FLOW_CTRL_BOTH:
3059 if (bp->dev->mtu <= 4500) {
3060 an_adv |=
3061 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3062 bp->advertising |= (ADVERTISED_Pause |
3063 ADVERTISED_Asym_Pause);
3064 } else {
3065 an_adv |=
3066 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3067 bp->advertising |= ADVERTISED_Asym_Pause;
3068 }
3069 break;
3070
3071 case FLOW_CTRL_NONE:
3072 default:
3073 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3074 bp->advertising &= ~(ADVERTISED_Pause |
3075 ADVERTISED_Asym_Pause);
3076 break;
3077 }
3078 } else { /* forced mode */
3079 switch (bp->req_flow_ctrl) {
3080 case FLOW_CTRL_AUTO:
3081 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3082 " req_autoneg 0x%x\n",
3083 bp->req_flow_ctrl, bp->req_autoneg);
3084 break;
3085
3086 case FLOW_CTRL_TX:
3087 an_adv |=
3088 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3089 bp->advertising |= ADVERTISED_Asym_Pause;
3090 break;
3091
3092 case FLOW_CTRL_RX:
3093 case FLOW_CTRL_BOTH:
3094 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3095 bp->advertising |= (ADVERTISED_Pause |
3096 ADVERTISED_Asym_Pause);
3097 break;
3098
3099 case FLOW_CTRL_NONE:
3100 default:
3101 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3102 bp->advertising &= ~(ADVERTISED_Pause |
3103 ADVERTISED_Asym_Pause);
3104 break;
3105 }
a2fbb9ea
ET
3106 }
3107
3108 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3109 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3110}
3111
3112static void bnx2x_restart_autoneg(struct bnx2x *bp)
3113{
3114 if (bp->autoneg & AUTONEG_CL73) {
3115 /* enable and restart clause 73 aneg */
3116 u32 an_ctrl;
3117
3118 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3119 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3120 &an_ctrl);
3121 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3122 (an_ctrl |
3123 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3124 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3125
3126 } else {
3127 /* Enable and restart BAM/CL37 aneg */
3128 u32 mii_control;
3129
3130 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3131 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3132 &mii_control);
3133 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3134 (mii_control |
3135 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3136 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3137 }
3138}
3139
3140static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3141{
3142 u32 control1;
3143
3144 /* in SGMII mode, the unicore is always slave */
3145 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3146 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3147 &control1);
3148 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3149 /* set sgmii mode (and not fiber) */
3150 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3151 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3152 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3153 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3154 control1);
3155
3156 /* if forced speed */
3157 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3158 /* set speed, disable autoneg */
3159 u32 mii_control;
3160
3161 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3162 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3163 &mii_control);
3164 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3165 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3166 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3167
3168 switch (bp->req_line_speed) {
3169 case SPEED_100:
3170 mii_control |=
3171 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3172 break;
3173 case SPEED_1000:
3174 mii_control |=
3175 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3176 break;
3177 case SPEED_10:
3178 /* there is nothing to set for 10M */
3179 break;
3180 default:
3181 /* invalid speed for SGMII */
3182 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3183 bp->req_line_speed);
3184 break;
3185 }
3186
3187 /* setting the full duplex */
3188 if (bp->req_duplex == DUPLEX_FULL)
3189 mii_control |=
3190 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3191 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3192 mii_control);
3193
3194 } else { /* AN mode */
3195 /* enable and restart AN */
3196 bnx2x_restart_autoneg(bp);
3197 }
3198}
3199
3200static void bnx2x_link_int_enable(struct bnx2x *bp)
3201{
3202 int port = bp->port;
f1410647
ET
3203 u32 ext_phy_type;
3204 u32 mask;
a2fbb9ea
ET
3205
3206 /* setting the status to report on link up
3207 for either XGXS or SerDes */
3208 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
f1410647
ET
3209 (NIG_STATUS_XGXS0_LINK10G |
3210 NIG_STATUS_XGXS0_LINK_STATUS |
3211 NIG_STATUS_SERDES0_LINK_STATUS));
a2fbb9ea
ET
3212
3213 if (bp->phy_flags & PHY_XGXS_FLAG) {
f1410647
ET
3214 mask = (NIG_MASK_XGXS0_LINK10G |
3215 NIG_MASK_XGXS0_LINK_STATUS);
3216 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3217 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3218 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3219 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3220 (ext_phy_type !=
3221 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3222 mask |= NIG_MASK_MI_INT;
3223 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3224 }
a2fbb9ea
ET
3225
3226 } else { /* SerDes */
f1410647
ET
3227 mask = NIG_MASK_SERDES0_LINK_STATUS;
3228 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3229 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3230 if ((ext_phy_type !=
3231 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3232 (ext_phy_type !=
3233 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3234 mask |= NIG_MASK_MI_INT;
3235 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3236 }
a2fbb9ea 3237 }
f1410647
ET
3238 bnx2x_bits_en(bp,
3239 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3240 mask);
3241 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3242 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3243 " 10G %x, XGXS_LINK %x\n", port,
3244 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3245 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3246 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3247 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3248 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3249 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3250 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3251 );
3252}
3253
3254static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3255{
3256 u32 ext_phy_addr = ((bp->ext_phy_config &
3257 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3258 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3259 u32 fw_ver1, fw_ver2;
3260
3261 /* Need to wait 200ms after reset */
3262 msleep(200);
3263 /* Boot port from external ROM
3264 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3265 */
3266 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3267 EXT_PHY_KR_PMA_PMD_DEVAD,
3268 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3269
3270 /* Reset internal microprocessor */
3271 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3272 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3273 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3274 /* set micro reset = 0 */
3275 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3276 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3277 EXT_PHY_KR_ROM_MICRO_RESET);
3278 /* Reset internal microprocessor */
3279 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3280 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3281 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3282 /* wait for 100ms for code download via SPI port */
3283 msleep(100);
3284
3285 /* Clear ser_boot_ctl bit */
3286 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3287 EXT_PHY_KR_PMA_PMD_DEVAD,
3288 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3289 /* Wait 100ms */
3290 msleep(100);
3291
3292 /* Print the PHY FW version */
3293 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3294 EXT_PHY_KR_PMA_PMD_DEVAD,
3295 0xca19, &fw_ver1);
3296 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3297 EXT_PHY_KR_PMA_PMD_DEVAD,
3298 0xca1a, &fw_ver2);
3299 DP(NETIF_MSG_LINK,
3300 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3301}
3302
3303static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3304{
3305 u32 ext_phy_addr = ((bp->ext_phy_config &
3306 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3307 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3308
3309 /* Force KR or KX */
3310 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3311 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3312 0x2040);
3313 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3314 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3315 0x000b);
3316 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3317 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3318 0x0000);
3319 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3320 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3321 0x0000);
a2fbb9ea
ET
3322}
3323
3324static void bnx2x_ext_phy_init(struct bnx2x *bp)
3325{
a2fbb9ea
ET
3326 u32 ext_phy_type;
3327 u32 ext_phy_addr;
f1410647
ET
3328 u32 cnt;
3329 u32 ctrl;
3330 u32 val = 0;
a2fbb9ea
ET
3331
3332 if (bp->phy_flags & PHY_XGXS_FLAG) {
a2fbb9ea
ET
3333 ext_phy_addr = ((bp->ext_phy_config &
3334 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3335 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3336
3337 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
f1410647
ET
3338 /* Make sure that the soft reset is off (expect for the 8072:
3339 * due to the lock, it will be done inside the specific
3340 * handling)
3341 */
3342 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3343 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3344 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3345 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3346 /* Wait for soft reset to get cleared upto 1 sec */
3347 for (cnt = 0; cnt < 1000; cnt++) {
3348 bnx2x_mdio45_read(bp, ext_phy_addr,
3349 EXT_PHY_OPT_PMA_PMD_DEVAD,
3350 EXT_PHY_OPT_CNTL, &ctrl);
3351 if (!(ctrl & (1<<15)))
3352 break;
3353 msleep(1);
3354 }
3355 DP(NETIF_MSG_LINK,
3356 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3357 }
3358
a2fbb9ea
ET
3359 switch (ext_phy_type) {
3360 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3361 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3362 break;
3363
3364 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3365 DP(NETIF_MSG_LINK, "XGXS 8705\n");
a2fbb9ea 3366
f1410647
ET
3367 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3368 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3369 EXT_PHY_OPT_PMD_MISC_CNTL,
3370 0x8288);
f1410647
ET
3371 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3372 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3373 EXT_PHY_OPT_PHY_IDENTIFIER,
3374 0x7fbf);
f1410647
ET
3375 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3376 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea
ET
3377 EXT_PHY_OPT_CMU_PLL_BYPASS,
3378 0x0100);
f1410647
ET
3379 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3380 EXT_PHY_OPT_WIS_DEVAD,
a2fbb9ea
ET
3381 EXT_PHY_OPT_LASI_CNTL, 0x1);
3382 break;
3383
3384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3385 DP(NETIF_MSG_LINK, "XGXS 8706\n");
a2fbb9ea 3386
f1410647
ET
3387 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3388 /* Force speed */
3389 if (bp->req_line_speed == SPEED_10000) {
3390 DP(NETIF_MSG_LINK,
3391 "XGXS 8706 force 10Gbps\n");
3392 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3393 EXT_PHY_OPT_PMA_PMD_DEVAD,
3394 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3395 0x400);
3396 } else {
3397 /* Force 1Gbps */
3398 DP(NETIF_MSG_LINK,
3399 "XGXS 8706 force 1Gbps\n");
3400
3401 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3402 EXT_PHY_OPT_PMA_PMD_DEVAD,
3403 EXT_PHY_OPT_CNTL,
3404 0x0040);
3405
3406 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3407 EXT_PHY_OPT_PMA_PMD_DEVAD,
3408 EXT_PHY_OPT_CNTL2,
3409 0x000D);
3410 }
3411
3412 /* Enable LASI */
3413 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3414 EXT_PHY_OPT_PMA_PMD_DEVAD,
3415 EXT_PHY_OPT_LASI_CNTL,
3416 0x1);
3417 } else {
3418 /* AUTONEG */
3419 /* Allow CL37 through CL73 */
3420 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3421 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3422 EXT_PHY_AUTO_NEG_DEVAD,
3423 EXT_PHY_OPT_AN_CL37_CL73,
3424 0x040c);
3425
3426 /* Enable Full-Duplex advertisment on CL37 */
3427 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3428 EXT_PHY_AUTO_NEG_DEVAD,
3429 EXT_PHY_OPT_AN_CL37_FD,
3430 0x0020);
3431 /* Enable CL37 AN */
3432 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3433 EXT_PHY_AUTO_NEG_DEVAD,
3434 EXT_PHY_OPT_AN_CL37_AN,
3435 0x1000);
3436 /* Advertise 10G/1G support */
3437 if (bp->advertising &
3438 ADVERTISED_1000baseT_Full)
3439 val = (1<<5);
3440 if (bp->advertising &
3441 ADVERTISED_10000baseT_Full)
3442 val |= (1<<7);
3443
3444 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3445 EXT_PHY_AUTO_NEG_DEVAD,
3446 EXT_PHY_OPT_AN_ADV, val);
3447 /* Enable LASI */
3448 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3449 EXT_PHY_OPT_PMA_PMD_DEVAD,
3450 EXT_PHY_OPT_LASI_CNTL,
3451 0x1);
3452
3453 /* Enable clause 73 AN */
3454 bnx2x_mdio45_write(bp, ext_phy_addr,
3455 EXT_PHY_AUTO_NEG_DEVAD,
3456 EXT_PHY_OPT_CNTL,
3457 0x1200);
3458 }
3459 break;
3460
3461 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3462 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3463 /* Wait for soft reset to get cleared upto 1 sec */
3464 for (cnt = 0; cnt < 1000; cnt++) {
3465 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3466 ext_phy_addr,
3467 EXT_PHY_OPT_PMA_PMD_DEVAD,
3468 EXT_PHY_OPT_CNTL, &ctrl);
3469 if (!(ctrl & (1<<15)))
3470 break;
3471 msleep(1);
3472 }
3473 DP(NETIF_MSG_LINK,
3474 "8072 control reg 0x%x (after %d ms)\n",
3475 ctrl, cnt);
3476
3477 bnx2x_bcm8072_external_rom_boot(bp);
3478 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3479
3480 /* enable LASI */
3481 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3482 ext_phy_addr,
3483 EXT_PHY_KR_PMA_PMD_DEVAD,
3484 0x9000, 0x0400);
3485 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3486 ext_phy_addr,
3487 EXT_PHY_KR_PMA_PMD_DEVAD,
3488 EXT_PHY_KR_LASI_CNTL, 0x0004);
3489
3490 /* If this is forced speed, set to KR or KX
3491 * (all other are not supported)
3492 */
3493 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3494 if (bp->req_line_speed == SPEED_10000) {
3495 bnx2x_bcm8072_force_10G(bp);
3496 DP(NETIF_MSG_LINK,
3497 "Forced speed 10G on 8072\n");
3498 /* unlock */
3499 bnx2x_hw_unlock(bp,
3500 HW_LOCK_RESOURCE_8072_MDIO);
3501 break;
3502 } else
3503 val = (1<<5);
3504 } else {
3505
3506 /* Advertise 10G/1G support */
3507 if (bp->advertising &
3508 ADVERTISED_1000baseT_Full)
3509 val = (1<<5);
3510 if (bp->advertising &
3511 ADVERTISED_10000baseT_Full)
3512 val |= (1<<7);
3513 }
3514 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3515 ext_phy_addr,
3516 EXT_PHY_KR_AUTO_NEG_DEVAD,
3517 0x11, val);
3518 /* Add support for CL37 ( passive mode ) I */
3519 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3520 ext_phy_addr,
3521 EXT_PHY_KR_AUTO_NEG_DEVAD,
3522 0x8370, 0x040c);
3523 /* Add support for CL37 ( passive mode ) II */
3524 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3525 ext_phy_addr,
3526 EXT_PHY_KR_AUTO_NEG_DEVAD,
3527 0xffe4, 0x20);
3528 /* Add support for CL37 ( passive mode ) III */
3529 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3530 ext_phy_addr,
3531 EXT_PHY_KR_AUTO_NEG_DEVAD,
3532 0xffe0, 0x1000);
3533 /* Restart autoneg */
3534 msleep(500);
3535 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3536 ext_phy_addr,
3537 EXT_PHY_KR_AUTO_NEG_DEVAD,
3538 EXT_PHY_KR_CTRL, 0x1200);
3539 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3540 "1G %ssupported 10G %ssupported\n",
3541 (val & (1<<5)) ? "" : "not ",
3542 (val & (1<<7)) ? "" : "not ");
3543
3544 /* unlock */
3545 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3546 break;
3547
3548 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3549 DP(NETIF_MSG_LINK,
3550 "Setting the SFX7101 LASI indication\n");
3551 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3552 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 3553 EXT_PHY_OPT_LASI_CNTL, 0x1);
f1410647
ET
3554 DP(NETIF_MSG_LINK,
3555 "Setting the SFX7101 LED to blink on traffic\n");
3556 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3557 EXT_PHY_OPT_PMA_PMD_DEVAD,
3558 0xC007, (1<<3));
3559
3560 /* read modify write pause advertizing */
3561 bnx2x_mdio45_read(bp, ext_phy_addr,
3562 EXT_PHY_KR_AUTO_NEG_DEVAD,
3563 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3564 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3565 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3566 if (bp->advertising & ADVERTISED_Pause)
3567 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3568
3569 if (bp->advertising & ADVERTISED_Asym_Pause) {
3570 val |=
3571 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3572 }
3573 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3574 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3575 EXT_PHY_KR_AUTO_NEG_DEVAD,
3576 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3577 /* Restart autoneg */
3578 bnx2x_mdio45_read(bp, ext_phy_addr,
3579 EXT_PHY_KR_AUTO_NEG_DEVAD,
3580 EXT_PHY_KR_CTRL, &val);
3581 val |= 0x200;
3582 bnx2x_mdio45_write(bp, ext_phy_addr,
3583 EXT_PHY_KR_AUTO_NEG_DEVAD,
3584 EXT_PHY_KR_CTRL, val);
a2fbb9ea
ET
3585 break;
3586
3587 default:
f1410647
ET
3588 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3589 bp->ext_phy_config);
a2fbb9ea
ET
3590 break;
3591 }
a2fbb9ea
ET
3592
3593 } else { /* SerDes */
f1410647 3594/* ext_phy_addr = ((bp->ext_phy_config &
a2fbb9ea
ET
3595 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3596 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3597*/
3598 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3599 switch (ext_phy_type) {
3600 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3601 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3602 break;
3603
3604 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3605 DP(NETIF_MSG_LINK, "SerDes 5482\n");
a2fbb9ea
ET
3606 break;
3607
3608 default:
3609 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3610 bp->ext_phy_config);
3611 break;
3612 }
3613 }
3614}
3615
3616static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3617{
3618 u32 ext_phy_type;
f1410647
ET
3619 u32 ext_phy_addr = ((bp->ext_phy_config &
3620 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3621 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3622 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3623
3624 /* The PHY reset is controled by GPIO 1
3625 * Give it 1ms of reset pulse
3626 */
3627 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3628 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3629 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3630 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3631 msleep(1);
3632 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3633 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3634 }
a2fbb9ea
ET
3635
3636 if (bp->phy_flags & PHY_XGXS_FLAG) {
3637 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3638 switch (ext_phy_type) {
3639 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3640 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3641 break;
3642
3643 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3644 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
3645 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3646 bnx2x_mdio45_write(bp, ext_phy_addr,
3647 EXT_PHY_OPT_PMA_PMD_DEVAD,
a2fbb9ea 3648 EXT_PHY_OPT_CNTL, 0xa040);
f1410647
ET
3649 break;
3650
3651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3652 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3653 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3654 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3655 ext_phy_addr,
3656 EXT_PHY_KR_PMA_PMD_DEVAD,
3657 0, 1<<15);
3658 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3659 break;
3660
3661 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3662 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
a2fbb9ea
ET
3663 break;
3664
3665 default:
3666 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3667 bp->ext_phy_config);
3668 break;
3669 }
3670
3671 } else { /* SerDes */
3672 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3673 switch (ext_phy_type) {
3674 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3675 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3676 break;
3677
3678 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3679 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3680 break;
3681
3682 default:
3683 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3684 bp->ext_phy_config);
3685 break;
3686 }
3687 }
3688}
3689
3690static void bnx2x_link_initialize(struct bnx2x *bp)
3691{
3692 int port = bp->port;
3693
3694 /* disable attentions */
3695 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3696 (NIG_MASK_XGXS0_LINK_STATUS |
3697 NIG_MASK_XGXS0_LINK10G |
3698 NIG_MASK_SERDES0_LINK_STATUS |
3699 NIG_MASK_MI_INT));
3700
f1410647 3701 /* Activate the external PHY */
a2fbb9ea
ET
3702 bnx2x_ext_phy_reset(bp);
3703
3704 bnx2x_set_aer_mmd(bp);
3705
3706 if (bp->phy_flags & PHY_XGXS_FLAG)
3707 bnx2x_set_master_ln(bp);
3708
3709 /* reset the SerDes and wait for reset bit return low */
3710 bnx2x_reset_unicore(bp);
3711
3712 bnx2x_set_aer_mmd(bp);
3713
3714 /* setting the masterLn_def again after the reset */
3715 if (bp->phy_flags & PHY_XGXS_FLAG) {
3716 bnx2x_set_master_ln(bp);
3717 bnx2x_set_swap_lanes(bp);
3718 }
3719
3720 /* Set Parallel Detect */
3721 if (bp->req_autoneg & AUTONEG_SPEED)
3722 bnx2x_set_parallel_detection(bp);
3723
3724 if (bp->phy_flags & PHY_XGXS_FLAG) {
3725 if (bp->req_line_speed &&
3726 bp->req_line_speed < SPEED_1000) {
3727 bp->phy_flags |= PHY_SGMII_FLAG;
3728 } else {
3729 bp->phy_flags &= ~PHY_SGMII_FLAG;
3730 }
3731 }
3732
3733 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3734 u16 bank, rx_eq;
3735
3736 rx_eq = ((bp->serdes_config &
3737 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3738 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3739
3740 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3741 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3742 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3743 MDIO_SET_REG_BANK(bp, bank);
3744 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3745 ((rx_eq &
3746 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3747 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3748 }
3749
3750 /* forced speed requested? */
3751 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3752 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3753
3754 /* disable autoneg */
3755 bnx2x_set_autoneg(bp);
3756
3757 /* program speed and duplex */
3758 bnx2x_program_serdes(bp);
3759
3760 } else { /* AN_mode */
3761 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3762
3763 /* AN enabled */
3764 bnx2x_set_brcm_cl37_advertisment(bp);
3765
c14423fe 3766 /* program duplex & pause advertisement (for aneg) */
a2fbb9ea
ET
3767 bnx2x_set_ieee_aneg_advertisment(bp);
3768
3769 /* enable autoneg */
3770 bnx2x_set_autoneg(bp);
3771
c14423fe 3772 /* enable and restart AN */
a2fbb9ea
ET
3773 bnx2x_restart_autoneg(bp);
3774 }
3775
3776 } else { /* SGMII mode */
3777 DP(NETIF_MSG_LINK, "SGMII\n");
3778
3779 bnx2x_initialize_sgmii_process(bp);
3780 }
3781
a2fbb9ea
ET
3782 /* init ext phy and enable link state int */
3783 bnx2x_ext_phy_init(bp);
f1410647
ET
3784
3785 /* enable the interrupt */
3786 bnx2x_link_int_enable(bp);
a2fbb9ea
ET
3787}
3788
3789static void bnx2x_phy_deassert(struct bnx2x *bp)
3790{
3791 int port = bp->port;
3792 u32 val;
3793
3794 if (bp->phy_flags & PHY_XGXS_FLAG) {
3795 DP(NETIF_MSG_LINK, "XGXS\n");
3796 val = XGXS_RESET_BITS;
3797
3798 } else { /* SerDes */
3799 DP(NETIF_MSG_LINK, "SerDes\n");
3800 val = SERDES_RESET_BITS;
3801 }
3802
3803 val = val << (port*16);
3804
3805 /* reset and unreset the SerDes/XGXS */
3806 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3807 msleep(5);
3808 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3809}
3810
3811static int bnx2x_phy_init(struct bnx2x *bp)
3812{
3813 DP(NETIF_MSG_LINK, "started\n");
3814 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3815 bp->phy_flags |= PHY_EMAC_FLAG;
3816 bp->link_up = 1;
3817 bp->line_speed = SPEED_10000;
3818 bp->duplex = DUPLEX_FULL;
3819 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3820 bnx2x_emac_enable(bp);
3821 bnx2x_link_report(bp);
3822 return 0;
3823
3824 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3825 bp->phy_flags |= PHY_BMAC_FLAG;
3826 bp->link_up = 1;
3827 bp->line_speed = SPEED_10000;
3828 bp->duplex = DUPLEX_FULL;
3829 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3830 bnx2x_bmac_enable(bp, 0);
3831 bnx2x_link_report(bp);
3832 return 0;
3833
3834 } else {
3835 bnx2x_phy_deassert(bp);
3836 bnx2x_link_initialize(bp);
3837 }
3838
3839 return 0;
3840}
3841
3842static void bnx2x_link_reset(struct bnx2x *bp)
3843{
3844 int port = bp->port;
f1410647
ET
3845 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3846
3847 /* update shared memory */
3848 bp->link_status = 0;
3849 bnx2x_update_mng(bp);
a2fbb9ea
ET
3850
3851 /* disable attentions */
3852 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3853 (NIG_MASK_XGXS0_LINK_STATUS |
3854 NIG_MASK_XGXS0_LINK10G |
3855 NIG_MASK_SERDES0_LINK_STATUS |
3856 NIG_MASK_MI_INT));
3857
f1410647
ET
3858 /* activate nig drain */
3859 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3860
3861 /* disable nig egress interface */
3862 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3863 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3864
3865 /* Stop BigMac rx */
3866 bnx2x_bmac_rx_disable(bp);
3867
3868 /* disable emac */
3869 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3870
3871 msleep(10);
3872
3873 /* The PHY reset is controled by GPIO 1
3874 * Hold it as output low
3875 */
3876 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3877 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3878 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3879 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3880 DP(NETIF_MSG_LINK, "reset external PHY\n");
3881 }
a2fbb9ea
ET
3882
3883 /* reset the SerDes/XGXS */
3884 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3885 (0x1ff << (port*16)));
3886
f1410647
ET
3887 /* reset BigMac */
3888 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3889 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
a2fbb9ea 3890
f1410647
ET
3891 /* disable nig ingress interface */
3892 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
a2fbb9ea 3893 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
a2fbb9ea 3894
f1410647
ET
3895 /* set link down */
3896 bp->link_up = 0;
a2fbb9ea
ET
3897}
3898
3899#ifdef BNX2X_XGXS_LB
3900static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3901{
3902 int port = bp->port;
3903
3904 if (is_10g) {
3905 u32 md_devad;
3906
3907 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3908
3909 /* change the uni_phy_addr in the nig */
3910 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3911 &md_devad);
3912 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3913
3914 /* change the aer mmd */
3915 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3916 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3917
3918 /* config combo IEEE0 control reg for loopback */
3919 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3920 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3921 0x6041);
3922
3923 /* set aer mmd back */
3924 bnx2x_set_aer_mmd(bp);
3925
3926 /* and md_devad */
3927 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3928
3929 } else {
3930 u32 mii_control;
3931
3932 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3933
3934 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3935 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3936 &mii_control);
3937 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3938 (mii_control |
3939 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3940 }
3941}
3942#endif
3943
3944/* end of PHY/MAC */
3945
3946/* slow path */
3947
3948/*
3949 * General service functions
3950 */
3951
3952/* the slow path queue is odd since completions arrive on the fastpath ring */
3953static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3954 u32 data_hi, u32 data_lo, int common)
3955{
3956 int port = bp->port;
3957
3958 DP(NETIF_MSG_TIMER,
c14423fe 3959 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
3960 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3961 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3962 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3963
3964#ifdef BNX2X_STOP_ON_ERROR
3965 if (unlikely(bp->panic))
3966 return -EIO;
3967#endif
3968
3969 spin_lock(&bp->spq_lock);
3970
3971 if (!bp->spq_left) {
3972 BNX2X_ERR("BUG! SPQ ring full!\n");
3973 spin_unlock(&bp->spq_lock);
3974 bnx2x_panic();
3975 return -EBUSY;
3976 }
f1410647 3977
a2fbb9ea
ET
3978 /* CID needs port number to be encoded int it */
3979 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3980 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3981 HW_CID(bp, cid)));
3982 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3983 if (common)
3984 bp->spq_prod_bd->hdr.type |=
3985 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3986
3987 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3988 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3989
3990 bp->spq_left--;
3991
3992 if (bp->spq_prod_bd == bp->spq_last_bd) {
3993 bp->spq_prod_bd = bp->spq;
3994 bp->spq_prod_idx = 0;
3995 DP(NETIF_MSG_TIMER, "end of spq\n");
3996
3997 } else {
3998 bp->spq_prod_bd++;
3999 bp->spq_prod_idx++;
4000 }
4001
4002 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4003 bp->spq_prod_idx);
4004
4005 spin_unlock(&bp->spq_lock);
4006 return 0;
4007}
4008
4009/* acquire split MCP access lock register */
4010static int bnx2x_lock_alr(struct bnx2x *bp)
4011{
4012 int rc = 0;
4013 u32 i, j, val;
4014
4015 might_sleep();
4016 i = 100;
4017 for (j = 0; j < i*10; j++) {
4018 val = (1UL << 31);
4019 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4020 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4021 if (val & (1L << 31))
4022 break;
4023
4024 msleep(5);
4025 }
4026
4027 if (!(val & (1L << 31))) {
4028 BNX2X_ERR("Cannot acquire nvram interface\n");
4029
4030 rc = -EBUSY;
4031 }
4032
4033 return rc;
4034}
4035
4036/* Release split MCP access lock register */
4037static void bnx2x_unlock_alr(struct bnx2x *bp)
4038{
4039 u32 val = 0;
4040
4041 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4042}
4043
4044static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4045{
4046 struct host_def_status_block *def_sb = bp->def_status_blk;
4047 u16 rc = 0;
4048
4049 barrier(); /* status block is written to by the chip */
4050
4051 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4052 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4053 rc |= 1;
4054 }
4055 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4056 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4057 rc |= 2;
4058 }
4059 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4060 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4061 rc |= 4;
4062 }
4063 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4064 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4065 rc |= 8;
4066 }
4067 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4068 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4069 rc |= 16;
4070 }
4071 return rc;
4072}
4073
4074/*
4075 * slow path service functions
4076 */
4077
4078static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4079{
4080 int port = bp->port;
4081 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4082 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4083 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4084 u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4085 NIG_REG_MASK_INTERRUPT_PORT0;
4086
4087 if (~bp->aeu_mask & (asserted & 0xff))
4088 BNX2X_ERR("IGU ERROR\n");
4089 if (bp->attn_state & asserted)
4090 BNX2X_ERR("IGU ERROR\n");
4091
4092 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4093 bp->aeu_mask, asserted);
4094 bp->aeu_mask &= ~(asserted & 0xff);
4095 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4096
4097 REG_WR(bp, aeu_addr, bp->aeu_mask);
4098
4099 bp->attn_state |= asserted;
4100
4101 if (asserted & ATTN_HARD_WIRED_MASK) {
4102 if (asserted & ATTN_NIG_FOR_FUNC) {
4103 u32 nig_status_port;
4104 u32 nig_int_addr = port ?
4105 NIG_REG_STATUS_INTERRUPT_PORT1 :
4106 NIG_REG_STATUS_INTERRUPT_PORT0;
4107
4108 bp->nig_mask = REG_RD(bp, nig_mask_addr);
4109 REG_WR(bp, nig_mask_addr, 0);
4110
4111 nig_status_port = REG_RD(bp, nig_int_addr);
4112 bnx2x_link_update(bp);
4113
4114 /* handle unicore attn? */
4115 }
4116 if (asserted & ATTN_SW_TIMER_4_FUNC)
4117 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4118
4119 if (asserted & GPIO_2_FUNC)
4120 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4121
4122 if (asserted & GPIO_3_FUNC)
4123 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4124
4125 if (asserted & GPIO_4_FUNC)
4126 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4127
4128 if (port == 0) {
4129 if (asserted & ATTN_GENERAL_ATTN_1) {
4130 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4131 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4132 }
4133 if (asserted & ATTN_GENERAL_ATTN_2) {
4134 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4136 }
4137 if (asserted & ATTN_GENERAL_ATTN_3) {
4138 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4140 }
4141 } else {
4142 if (asserted & ATTN_GENERAL_ATTN_4) {
4143 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4144 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4145 }
4146 if (asserted & ATTN_GENERAL_ATTN_5) {
4147 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4148 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4149 }
4150 if (asserted & ATTN_GENERAL_ATTN_6) {
4151 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4152 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4153 }
4154 }
4155
4156 } /* if hardwired */
4157
4158 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4159 asserted, BAR_IGU_INTMEM + igu_addr);
4160 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4161
4162 /* now set back the mask */
4163 if (asserted & ATTN_NIG_FOR_FUNC)
4164 REG_WR(bp, nig_mask_addr, bp->nig_mask);
4165}
4166
4167static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4168{
4169 int port = bp->port;
4170 int index;
4171 struct attn_route attn;
4172 struct attn_route group_mask;
4173 u32 reg_addr;
4174 u32 val;
4175
4176 /* need to take HW lock because MCP or other port might also
4177 try to handle this event */
4178 bnx2x_lock_alr(bp);
4179
4180 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4181 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4182 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4183 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4184 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4185
4186 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4187 if (deasserted & (1 << index)) {
4188 group_mask = bp->attn_group[index];
4189
4190 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4191 (unsigned long long)group_mask.sig[0]);
4192
4193 if (attn.sig[3] & group_mask.sig[3] &
4194 EVEREST_GEN_ATTN_IN_USE_MASK) {
4195
4196 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
4197
4198 BNX2X_ERR("MC assert!\n");
4199 bnx2x_panic();
4200
4201 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
4202
4203 BNX2X_ERR("MCP assert!\n");
4204 REG_WR(bp,
4205 MISC_REG_AEU_GENERAL_ATTN_11, 0);
4206 bnx2x_mc_assert(bp);
4207
4208 } else {
4209 BNX2X_ERR("UNKOWEN HW ASSERT!\n");
4210 }
4211 }
4212
4213 if (attn.sig[1] & group_mask.sig[1] &
4214 BNX2X_DOORQ_ASSERT) {
4215
4216 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4217 BNX2X_ERR("DB hw attention 0x%x\n", val);
4218 /* DORQ discard attention */
4219 if (val & 0x2)
4220 BNX2X_ERR("FATAL error from DORQ\n");
4221 }
4222
4223 if (attn.sig[2] & group_mask.sig[2] &
4224 AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4225
4226 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4227 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4228 /* CFC error attention */
4229 if (val & 0x2)
4230 BNX2X_ERR("FATAL error from CFC\n");
4231 }
4232
4233 if (attn.sig[2] & group_mask.sig[2] &
4234 AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4235
4236 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4237 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4238 /* RQ_USDMDP_FIFO_OVERFLOW */
4239 if (val & 0x18000)
4240 BNX2X_ERR("FATAL error from PXP\n");
4241 }
4242
4243 if (attn.sig[3] & group_mask.sig[3] &
4244 EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4245
4246 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4247 0x7ff);
4248 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
4249 attn.sig[3]);
4250 }
4251
4252 if ((attn.sig[0] & group_mask.sig[0] &
4253 HW_INTERRUT_ASSERT_SET_0) ||
4254 (attn.sig[1] & group_mask.sig[1] &
4255 HW_INTERRUT_ASSERT_SET_1) ||
4256 (attn.sig[2] & group_mask.sig[2] &
4257 HW_INTERRUT_ASSERT_SET_2))
4258 BNX2X_ERR("FATAL HW block attention\n");
4259
4260 if ((attn.sig[0] & group_mask.sig[0] &
4261 HW_PRTY_ASSERT_SET_0) ||
4262 (attn.sig[1] & group_mask.sig[1] &
4263 HW_PRTY_ASSERT_SET_1) ||
4264 (attn.sig[2] & group_mask.sig[2] &
4265 HW_PRTY_ASSERT_SET_2))
c14423fe 4266 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
4267 }
4268 }
4269
4270 bnx2x_unlock_alr(bp);
4271
4272 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4273
4274 val = ~deasserted;
4275/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4276 val, BAR_IGU_INTMEM + reg_addr); */
4277 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4278
4279 if (bp->aeu_mask & (deasserted & 0xff))
4280 BNX2X_ERR("IGU BUG\n");
4281 if (~bp->attn_state & deasserted)
4282 BNX2X_ERR("IGU BUG\n");
4283
4284 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4285 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4286
4287 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4288 bp->aeu_mask |= (deasserted & 0xff);
4289
4290 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4291 REG_WR(bp, reg_addr, bp->aeu_mask);
4292
4293 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4294 bp->attn_state &= ~deasserted;
4295 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4296}
4297
4298static void bnx2x_attn_int(struct bnx2x *bp)
4299{
4300 /* read local copy of bits */
4301 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4302 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4303 u32 attn_state = bp->attn_state;
4304
4305 /* look for changed bits */
4306 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4307 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4308
4309 DP(NETIF_MSG_HW,
4310 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4311 attn_bits, attn_ack, asserted, deasserted);
4312
4313 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4314 BNX2X_ERR("bad attention state\n");
4315
4316 /* handle bits that were raised */
4317 if (asserted)
4318 bnx2x_attn_int_asserted(bp, asserted);
4319
4320 if (deasserted)
4321 bnx2x_attn_int_deasserted(bp, deasserted);
4322}
4323
4324static void bnx2x_sp_task(struct work_struct *work)
4325{
4326 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4327 u16 status;
4328
4329 /* Return here if interrupt is disabled */
4330 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4331 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4332 return;
4333 }
4334
4335 status = bnx2x_update_dsb_idx(bp);
4336 if (status == 0)
4337 BNX2X_ERR("spurious slowpath interrupt!\n");
4338
4339 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4340
4341 if (status & 0x1) {
4342 /* HW attentions */
4343 bnx2x_attn_int(bp);
4344 }
4345
4346 /* CStorm events: query_stats, cfc delete ramrods */
4347 if (status & 0x2)
4348 bp->stat_pending = 0;
4349
4350 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4351 IGU_INT_NOP, 1);
4352 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4353 IGU_INT_NOP, 1);
4354 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4355 IGU_INT_NOP, 1);
4356 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4357 IGU_INT_NOP, 1);
4358 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4359 IGU_INT_ENABLE, 1);
4360}
4361
4362static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4363{
4364 struct net_device *dev = dev_instance;
4365 struct bnx2x *bp = netdev_priv(dev);
4366
4367 /* Return here if interrupt is disabled */
4368 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4369 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4370 return IRQ_HANDLED;
4371 }
4372
4373 bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
4374
4375#ifdef BNX2X_STOP_ON_ERROR
4376 if (unlikely(bp->panic))
4377 return IRQ_HANDLED;
4378#endif
4379
4380 schedule_work(&bp->sp_task);
4381
4382 return IRQ_HANDLED;
4383}
4384
4385/* end of slow path */
4386
4387/* Statistics */
4388
4389/****************************************************************************
4390* Macros
4391****************************************************************************/
4392
4393#define UPDATE_STAT(s, t) \
4394 do { \
4395 estats->t += new->s - old->s; \
4396 old->s = new->s; \
4397 } while (0)
4398
4399/* sum[hi:lo] += add[hi:lo] */
4400#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4401 do { \
4402 s_lo += a_lo; \
4403 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4404 } while (0)
4405
4406/* difference = minuend - subtrahend */
4407#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4408 do { \
4409 if (m_lo < s_lo) { /* underflow */ \
4410 d_hi = m_hi - s_hi; \
4411 if (d_hi > 0) { /* we can 'loan' 1 */ \
4412 d_hi--; \
4413 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4414 } else { /* m_hi <= s_hi */ \
4415 d_hi = 0; \
4416 d_lo = 0; \
4417 } \
4418 } else { /* m_lo >= s_lo */ \
4419 if (m_hi < s_hi) { \
4420 d_hi = 0; \
4421 d_lo = 0; \
4422 } else { /* m_hi >= s_hi */ \
4423 d_hi = m_hi - s_hi; \
4424 d_lo = m_lo - s_lo; \
4425 } \
4426 } \
4427 } while (0)
4428
4429/* minuend -= subtrahend */
4430#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4431 do { \
4432 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4433 } while (0)
4434
4435#define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4436 do { \
4437 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4438 diff.lo, new->s_lo, old->s_lo); \
4439 old->s_hi = new->s_hi; \
4440 old->s_lo = new->s_lo; \
4441 ADD_64(estats->t_hi, diff.hi, \
4442 estats->t_lo, diff.lo); \
4443 } while (0)
4444
4445/* sum[hi:lo] += add */
4446#define ADD_EXTEND_64(s_hi, s_lo, a) \
4447 do { \
4448 s_lo += a; \
4449 s_hi += (s_lo < a) ? 1 : 0; \
4450 } while (0)
4451
4452#define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4453 do { \
4454 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4455 } while (0)
4456
4457#define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4458 do { \
4459 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4460 old_tclient->s = le32_to_cpu(tclient->s); \
4461 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4462 } while (0)
4463
4464/*
4465 * General service functions
4466 */
4467
4468static inline long bnx2x_hilo(u32 *hiref)
4469{
4470 u32 lo = *(hiref + 1);
4471#if (BITS_PER_LONG == 64)
4472 u32 hi = *hiref;
4473
4474 return HILO_U64(hi, lo);
4475#else
4476 return lo;
4477#endif
4478}
4479
4480/*
4481 * Init service functions
4482 */
4483
4484static void bnx2x_init_mac_stats(struct bnx2x *bp)
4485{
4486 struct dmae_command *dmae;
4487 int port = bp->port;
4488 int loader_idx = port * 8;
4489 u32 opcode;
4490 u32 mac_addr;
4491
4492 bp->executer_idx = 0;
4493 if (bp->fw_mb) {
4494 /* MCP */
4495 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4496 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4497#ifdef __BIG_ENDIAN
4498 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4499#else
4500 DMAE_CMD_ENDIANITY_DW_SWAP |
4501#endif
4502 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4503
4504 if (bp->link_up)
4505 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4506
4507 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4508 dmae->opcode = opcode;
4509 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4510 sizeof(u32));
4511 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4512 sizeof(u32));
4513 dmae->dst_addr_lo = bp->fw_mb >> 2;
4514 dmae->dst_addr_hi = 0;
4515 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4516 sizeof(u32)) >> 2;
4517 if (bp->link_up) {
4518 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4519 dmae->comp_addr_hi = 0;
4520 dmae->comp_val = 1;
4521 } else {
4522 dmae->comp_addr_lo = 0;
4523 dmae->comp_addr_hi = 0;
4524 dmae->comp_val = 0;
4525 }
4526 }
4527
4528 if (!bp->link_up) {
4529 /* no need to collect statistics in link down */
4530 return;
4531 }
4532
4533 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4534 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4535 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4536#ifdef __BIG_ENDIAN
4537 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4538#else
4539 DMAE_CMD_ENDIANITY_DW_SWAP |
4540#endif
4541 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4542
4543 if (bp->phy_flags & PHY_BMAC_FLAG) {
4544
4545 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4546 NIG_REG_INGRESS_BMAC0_MEM);
4547
4548 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4549 BIGMAC_REGISTER_TX_STAT_GTBYT */
4550 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4551 dmae->opcode = opcode;
4552 dmae->src_addr_lo = (mac_addr +
4553 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4554 dmae->src_addr_hi = 0;
4555 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4556 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4557 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4558 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4559 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4560 dmae->comp_addr_hi = 0;
4561 dmae->comp_val = 1;
4562
4563 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4564 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4565 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4566 dmae->opcode = opcode;
4567 dmae->src_addr_lo = (mac_addr +
4568 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4569 dmae->src_addr_hi = 0;
4570 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4571 offsetof(struct bmac_stats, rx_gr64));
4572 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4573 offsetof(struct bmac_stats, rx_gr64));
4574 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4575 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4576 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4577 dmae->comp_addr_hi = 0;
4578 dmae->comp_val = 1;
4579
4580 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4581
4582 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4583
4584 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4586 dmae->opcode = opcode;
4587 dmae->src_addr_lo = (mac_addr +
4588 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4589 dmae->src_addr_hi = 0;
4590 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4591 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4592 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4593 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4594 dmae->comp_addr_hi = 0;
4595 dmae->comp_val = 1;
4596
4597 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4598 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4599 dmae->opcode = opcode;
4600 dmae->src_addr_lo = (mac_addr +
4601 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4602 dmae->src_addr_hi = 0;
4603 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4604 offsetof(struct emac_stats,
4605 rx_falsecarriererrors));
4606 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4607 offsetof(struct emac_stats,
4608 rx_falsecarriererrors));
4609 dmae->len = 1;
4610 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4611 dmae->comp_addr_hi = 0;
4612 dmae->comp_val = 1;
4613
4614 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4615 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4616 dmae->opcode = opcode;
4617 dmae->src_addr_lo = (mac_addr +
4618 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4619 dmae->src_addr_hi = 0;
4620 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4621 offsetof(struct emac_stats,
4622 tx_ifhcoutoctets));
4623 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4624 offsetof(struct emac_stats,
4625 tx_ifhcoutoctets));
4626 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4627 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4628 dmae->comp_addr_hi = 0;
4629 dmae->comp_val = 1;
4630 }
4631
4632 /* NIG */
4633 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4634 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4635 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4636 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4637#ifdef __BIG_ENDIAN
4638 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4639#else
4640 DMAE_CMD_ENDIANITY_DW_SWAP |
4641#endif
4642 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4643 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4644 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4645 dmae->src_addr_hi = 0;
4646 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4647 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4648 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4649 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4650 offsetof(struct nig_stats, done));
4651 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4652 offsetof(struct nig_stats, done));
4653 dmae->comp_val = 0xffffffff;
4654}
4655
4656static void bnx2x_init_stats(struct bnx2x *bp)
4657{
4658 int port = bp->port;
4659
4660 bp->stats_state = STATS_STATE_DISABLE;
4661 bp->executer_idx = 0;
4662
4663 bp->old_brb_discard = REG_RD(bp,
4664 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4665
4666 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4667 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4668 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4669
4670 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4671 REG_WR(bp, BAR_XSTRORM_INTMEM +
4672 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4673
4674 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4675 REG_WR(bp, BAR_TSTRORM_INTMEM +
4676 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4677
4678 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4679 REG_WR(bp, BAR_CSTRORM_INTMEM +
4680 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4681
4682 REG_WR(bp, BAR_XSTRORM_INTMEM +
4683 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4684 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4685 REG_WR(bp, BAR_XSTRORM_INTMEM +
4686 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4687 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4688
4689 REG_WR(bp, BAR_TSTRORM_INTMEM +
4690 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4691 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4692 REG_WR(bp, BAR_TSTRORM_INTMEM +
4693 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4694 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4695}
4696
4697static void bnx2x_stop_stats(struct bnx2x *bp)
4698{
4699 might_sleep();
4700 if (bp->stats_state != STATS_STATE_DISABLE) {
4701 int timeout = 10;
4702
4703 bp->stats_state = STATS_STATE_STOP;
4704 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4705
4706 while (bp->stats_state != STATS_STATE_DISABLE) {
4707 if (!timeout) {
c14423fe 4708 BNX2X_ERR("timeout waiting for stats stop\n");
a2fbb9ea
ET
4709 break;
4710 }
4711 timeout--;
4712 msleep(100);
4713 }
4714 }
4715 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4716}
4717
4718/*
4719 * Statistics service functions
4720 */
4721
4722static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4723{
4724 struct regp diff;
4725 struct regp sum;
4726 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4727 struct bmac_stats *old = &bp->old_bmac;
4728 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4729
4730 sum.hi = 0;
4731 sum.lo = 0;
4732
4733 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4734 tx_gtbyt.lo, total_bytes_transmitted_lo);
4735
4736 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4737 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4738 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4739
4740 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4741 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4742 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4743
4744 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4745 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4746 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4747 estats->total_unicast_packets_transmitted_lo, sum.lo);
4748
4749 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4750 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4751 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4752 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4753 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4754 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4755 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4756 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4757 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4758 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4759 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4760
4761 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4762 UPDATE_STAT(rx_grund.lo, runt_packets_received);
4763 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4764 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4765 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4766 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4767 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4768 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4769
4770 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4771 rx_grerb.lo, stat_IfHCInBadOctets_lo);
4772 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4773 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4774 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4775 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4776 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4777}
4778
4779static void bnx2x_update_emac_stats(struct bnx2x *bp)
4780{
4781 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4782 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4783
4784 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4785 total_bytes_transmitted_lo);
4786 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4787 total_unicast_packets_transmitted_hi,
4788 total_unicast_packets_transmitted_lo);
4789 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4790 total_multicast_packets_transmitted_hi,
4791 total_multicast_packets_transmitted_lo);
4792 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4793 total_broadcast_packets_transmitted_hi,
4794 total_broadcast_packets_transmitted_lo);
4795
4796 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4797 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4798 estats->single_collision_transmit_frames +=
4799 new->tx_dot3statssinglecollisionframes;
4800 estats->multiple_collision_transmit_frames +=
4801 new->tx_dot3statsmultiplecollisionframes;
4802 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4803 estats->excessive_collision_frames +=
4804 new->tx_dot3statsexcessivecollisions;
4805 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4806 estats->frames_transmitted_65_127_bytes +=
4807 new->tx_etherstatspkts65octetsto127octets;
4808 estats->frames_transmitted_128_255_bytes +=
4809 new->tx_etherstatspkts128octetsto255octets;
4810 estats->frames_transmitted_256_511_bytes +=
4811 new->tx_etherstatspkts256octetsto511octets;
4812 estats->frames_transmitted_512_1023_bytes +=
4813 new->tx_etherstatspkts512octetsto1023octets;
4814 estats->frames_transmitted_1024_1522_bytes +=
4815 new->tx_etherstatspkts1024octetsto1522octet;
4816 estats->frames_transmitted_1523_9022_bytes +=
4817 new->tx_etherstatspktsover1522octets;
4818
4819 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4820 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4821 estats->false_carrier_detections += new->rx_falsecarriererrors;
4822 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4823 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4824 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4825 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4826 estats->control_frames_received += new->rx_maccontrolframesreceived;
4827 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4828 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4829
4830 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4831 stat_IfHCInBadOctets_lo);
4832 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4833 stat_IfHCOutBadOctets_lo);
4834 estats->stat_Dot3statsInternalMacTransmitErrors +=
4835 new->tx_dot3statsinternalmactransmiterrors;
4836 estats->stat_Dot3StatsCarrierSenseErrors +=
4837 new->rx_dot3statscarriersenseerrors;
4838 estats->stat_Dot3StatsDeferredTransmissions +=
4839 new->tx_dot3statsdeferredtransmissions;
4840 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4841 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4842}
4843
4844static int bnx2x_update_storm_stats(struct bnx2x *bp)
4845{
4846 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4847 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4848 struct tstorm_per_client_stats *tclient =
4849 &tstats->client_statistics[0];
4850 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4851 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4852 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4853 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4854 u32 diff;
4855
4856 /* are DMAE stats valid? */
4857 if (nstats->done != 0xffffffff) {
4858 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4859 return -1;
4860 }
4861
4862 /* are storm stats valid? */
4863 if (tstats->done.hi != 0xffffffff) {
4864 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4865 return -2;
4866 }
4867 if (xstats->done.hi != 0xffffffff) {
4868 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4869 return -3;
4870 }
4871
4872 estats->total_bytes_received_hi =
4873 estats->valid_bytes_received_hi =
4874 le32_to_cpu(tclient->total_rcv_bytes.hi);
4875 estats->total_bytes_received_lo =
4876 estats->valid_bytes_received_lo =
4877 le32_to_cpu(tclient->total_rcv_bytes.lo);
4878 ADD_64(estats->total_bytes_received_hi,
4879 le32_to_cpu(tclient->rcv_error_bytes.hi),
4880 estats->total_bytes_received_lo,
4881 le32_to_cpu(tclient->rcv_error_bytes.lo));
4882
4883 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4884 total_unicast_packets_received_hi,
4885 total_unicast_packets_received_lo);
4886 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4887 total_multicast_packets_received_hi,
4888 total_multicast_packets_received_lo);
4889 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4890 total_broadcast_packets_received_hi,
4891 total_broadcast_packets_received_lo);
4892
4893 estats->frames_received_64_bytes = MAC_STX_NA;
4894 estats->frames_received_65_127_bytes = MAC_STX_NA;
4895 estats->frames_received_128_255_bytes = MAC_STX_NA;
4896 estats->frames_received_256_511_bytes = MAC_STX_NA;
4897 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4898 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4899 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4900
4901 estats->x_total_sent_bytes_hi =
4902 le32_to_cpu(xstats->total_sent_bytes.hi);
4903 estats->x_total_sent_bytes_lo =
4904 le32_to_cpu(xstats->total_sent_bytes.lo);
4905 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4906
4907 estats->t_rcv_unicast_bytes_hi =
4908 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4909 estats->t_rcv_unicast_bytes_lo =
4910 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4911 estats->t_rcv_broadcast_bytes_hi =
4912 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4913 estats->t_rcv_broadcast_bytes_lo =
4914 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4915 estats->t_rcv_multicast_bytes_hi =
4916 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4917 estats->t_rcv_multicast_bytes_lo =
4918 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4919 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4920
4921 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4922 estats->packets_too_big_discard =
4923 le32_to_cpu(tclient->packets_too_big_discard);
4924 estats->jabber_packets_received = estats->packets_too_big_discard +
4925 estats->stat_Dot3statsFramesTooLong;
4926 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4927 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4928 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4929 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4930 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4931 estats->brb_truncate_discard =
4932 le32_to_cpu(tstats->brb_truncate_discard);
4933
4934 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4935 bp->old_brb_discard = nstats->brb_discard;
4936
4937 estats->brb_packet = nstats->brb_packet;
4938 estats->brb_truncate = nstats->brb_truncate;
4939 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4940 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4941 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4942 estats->mng_discard = nstats->mng_discard;
4943 estats->mng_octet_inp = nstats->mng_octet_inp;
4944 estats->mng_octet_out = nstats->mng_octet_out;
4945 estats->mng_packet_inp = nstats->mng_packet_inp;
4946 estats->mng_packet_out = nstats->mng_packet_out;
4947 estats->pbf_octets = nstats->pbf_octets;
4948 estats->pbf_packet = nstats->pbf_packet;
4949 estats->safc_inp = nstats->safc_inp;
4950
4951 xstats->done.hi = 0;
4952 tstats->done.hi = 0;
4953 nstats->done = 0;
4954
4955 return 0;
4956}
4957
4958static void bnx2x_update_net_stats(struct bnx2x *bp)
4959{
4960 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4961 struct net_device_stats *nstats = &bp->dev->stats;
4962
4963 nstats->rx_packets =
4964 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4965 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4966 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4967
4968 nstats->tx_packets =
4969 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4970 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4971 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4972
4973 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4974
4975 nstats->tx_bytes =
4976 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4977
4978 nstats->rx_dropped = estats->checksum_discard +
4979 estats->mac_discard;
4980 nstats->tx_dropped = 0;
4981
4982 nstats->multicast =
4983 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4984
4985 nstats->collisions =
4986 estats->single_collision_transmit_frames +
4987 estats->multiple_collision_transmit_frames +
4988 estats->late_collision_frames +
4989 estats->excessive_collision_frames;
4990
4991 nstats->rx_length_errors = estats->runt_packets_received +
4992 estats->jabber_packets_received;
4993 nstats->rx_over_errors = estats->no_buff_discard;
4994 nstats->rx_crc_errors = estats->crc_receive_errors;
4995 nstats->rx_frame_errors = estats->alignment_errors;
4996 nstats->rx_fifo_errors = estats->brb_discard +
4997 estats->brb_truncate_discard;
4998 nstats->rx_missed_errors = estats->xxoverflow_discard;
4999
5000 nstats->rx_errors = nstats->rx_length_errors +
5001 nstats->rx_over_errors +
5002 nstats->rx_crc_errors +
5003 nstats->rx_frame_errors +
5004 nstats->rx_fifo_errors;
5005
5006 nstats->tx_aborted_errors = estats->late_collision_frames +
5007 estats->excessive_collision_frames;
5008 nstats->tx_carrier_errors = estats->false_carrier_detections;
5009 nstats->tx_fifo_errors = 0;
5010 nstats->tx_heartbeat_errors = 0;
5011 nstats->tx_window_errors = 0;
5012
5013 nstats->tx_errors = nstats->tx_aborted_errors +
5014 nstats->tx_carrier_errors;
5015
5016 estats->mac_stx_start = ++estats->mac_stx_end;
5017}
5018
5019static void bnx2x_update_stats(struct bnx2x *bp)
5020{
5021 int i;
5022
5023 if (!bnx2x_update_storm_stats(bp)) {
5024
5025 if (bp->phy_flags & PHY_BMAC_FLAG) {
5026 bnx2x_update_bmac_stats(bp);
5027
5028 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5029 bnx2x_update_emac_stats(bp);
5030
5031 } else { /* unreached */
5032 BNX2X_ERR("no MAC active\n");
5033 return;
5034 }
5035
5036 bnx2x_update_net_stats(bp);
5037 }
5038
5039 if (bp->msglevel & NETIF_MSG_TIMER) {
5040 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5041 struct net_device_stats *nstats = &bp->dev->stats;
5042
5043 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5044 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
5045 " tx pkt (%lx)\n",
5046 bnx2x_tx_avail(bp->fp),
5047 *bp->fp->tx_cons_sb, nstats->tx_packets);
5048 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
5049 " rx pkt (%lx)\n",
5050 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5051 *bp->fp->rx_cons_sb, nstats->rx_packets);
5052 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
5053 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5054 estats->driver_xoff, estats->brb_discard);
5055 printk(KERN_DEBUG "tstats: checksum_discard %u "
5056 "packets_too_big_discard %u no_buff_discard %u "
5057 "mac_discard %u mac_filter_discard %u "
5058 "xxovrflow_discard %u brb_truncate_discard %u "
5059 "ttl0_discard %u\n",
5060 estats->checksum_discard,
5061 estats->packets_too_big_discard,
5062 estats->no_buff_discard, estats->mac_discard,
5063 estats->mac_filter_discard, estats->xxoverflow_discard,
5064 estats->brb_truncate_discard, estats->ttl0_discard);
5065
5066 for_each_queue(bp, i) {
5067 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5068 bnx2x_fp(bp, i, tx_pkt),
5069 bnx2x_fp(bp, i, rx_pkt),
5070 bnx2x_fp(bp, i, rx_calls));
5071 }
5072 }
5073
5074 if (bp->state != BNX2X_STATE_OPEN) {
5075 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5076 return;
5077 }
5078
5079#ifdef BNX2X_STOP_ON_ERROR
5080 if (unlikely(bp->panic))
5081 return;
5082#endif
5083
5084 /* loader */
5085 if (bp->executer_idx) {
5086 struct dmae_command *dmae = &bp->dmae;
5087 int port = bp->port;
5088 int loader_idx = port * 8;
5089
5090 memset(dmae, 0, sizeof(struct dmae_command));
5091
5092 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5093 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5094 DMAE_CMD_DST_RESET |
5095#ifdef __BIG_ENDIAN
5096 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5097#else
5098 DMAE_CMD_ENDIANITY_DW_SWAP |
5099#endif
5100 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5101 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5102 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5103 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5104 sizeof(struct dmae_command) *
5105 (loader_idx + 1)) >> 2;
5106 dmae->dst_addr_hi = 0;
5107 dmae->len = sizeof(struct dmae_command) >> 2;
5108 dmae->len--; /* !!! for A0/1 only */
5109 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5110 dmae->comp_addr_hi = 0;
5111 dmae->comp_val = 1;
5112
5113 bnx2x_post_dmae(bp, dmae, loader_idx);
5114 }
5115
5116 if (bp->stats_state != STATS_STATE_ENABLE) {
5117 bp->stats_state = STATS_STATE_DISABLE;
5118 return;
5119 }
5120
5121 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5122 /* stats ramrod has it's own slot on the spe */
5123 bp->spq_left++;
5124 bp->stat_pending = 1;
5125 }
5126}
5127
5128static void bnx2x_timer(unsigned long data)
5129{
5130 struct bnx2x *bp = (struct bnx2x *) data;
5131
5132 if (!netif_running(bp->dev))
5133 return;
5134
5135 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5136 goto timer_restart;
a2fbb9ea
ET
5137
5138 if (poll) {
5139 struct bnx2x_fastpath *fp = &bp->fp[0];
5140 int rc;
5141
5142 bnx2x_tx_int(fp, 1000);
5143 rc = bnx2x_rx_int(fp, 1000);
5144 }
5145
f1410647 5146 if (!nomcp) {
a2fbb9ea
ET
5147 int port = bp->port;
5148 u32 drv_pulse;
5149 u32 mcp_pulse;
5150
5151 ++bp->fw_drv_pulse_wr_seq;
5152 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5153 /* TBD - add SYSTEM_TIME */
5154 drv_pulse = bp->fw_drv_pulse_wr_seq;
f1410647 5155 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
a2fbb9ea 5156
f1410647 5157 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
a2fbb9ea
ET
5158 MCP_PULSE_SEQ_MASK);
5159 /* The delta between driver pulse and mcp response
5160 * should be 1 (before mcp response) or 0 (after mcp response)
5161 */
5162 if ((drv_pulse != mcp_pulse) &&
5163 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5164 /* someone lost a heartbeat... */
5165 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5166 drv_pulse, mcp_pulse);
5167 }
5168 }
5169
5170 if (bp->stats_state == STATS_STATE_DISABLE)
f1410647 5171 goto timer_restart;
a2fbb9ea
ET
5172
5173 bnx2x_update_stats(bp);
5174
f1410647 5175timer_restart:
a2fbb9ea
ET
5176 mod_timer(&bp->timer, jiffies + bp->current_interval);
5177}
5178
5179/* end of Statistics */
5180
5181/* nic init */
5182
5183/*
5184 * nic init service functions
5185 */
5186
5187static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5188 dma_addr_t mapping, int id)
5189{
5190 int port = bp->port;
5191 u64 section;
5192 int index;
5193
5194 /* USTORM */
5195 section = ((u64)mapping) + offsetof(struct host_status_block,
5196 u_status_block);
5197 sb->u_status_block.status_block_id = id;
5198
5199 REG_WR(bp, BAR_USTRORM_INTMEM +
5200 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5201 REG_WR(bp, BAR_USTRORM_INTMEM +
5202 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5203 U64_HI(section));
5204
5205 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5206 REG_WR16(bp, BAR_USTRORM_INTMEM +
5207 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5208
5209 /* CSTORM */
5210 section = ((u64)mapping) + offsetof(struct host_status_block,
5211 c_status_block);
5212 sb->c_status_block.status_block_id = id;
5213
5214 REG_WR(bp, BAR_CSTRORM_INTMEM +
5215 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5216 REG_WR(bp, BAR_CSTRORM_INTMEM +
5217 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5218 U64_HI(section));
5219
5220 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5221 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5222 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5223
5224 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5225}
5226
5227static void bnx2x_init_def_sb(struct bnx2x *bp,
5228 struct host_def_status_block *def_sb,
5229 dma_addr_t mapping, int id)
5230{
5231 int port = bp->port;
5232 int index, val, reg_offset;
5233 u64 section;
5234
5235 /* ATTN */
5236 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5237 atten_status_block);
5238 def_sb->atten_status_block.status_block_id = id;
5239
49d66772
ET
5240 bp->def_att_idx = 0;
5241 bp->attn_state = 0;
5242
a2fbb9ea
ET
5243 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5245
5246 for (index = 0; index < 3; index++) {
5247 bp->attn_group[index].sig[0] = REG_RD(bp,
5248 reg_offset + 0x10*index);
5249 bp->attn_group[index].sig[1] = REG_RD(bp,
5250 reg_offset + 0x4 + 0x10*index);
5251 bp->attn_group[index].sig[2] = REG_RD(bp,
5252 reg_offset + 0x8 + 0x10*index);
5253 bp->attn_group[index].sig[3] = REG_RD(bp,
5254 reg_offset + 0xc + 0x10*index);
5255 }
5256
5257 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5258 MISC_REG_AEU_MASK_ATTN_FUNC_0));
5259
5260 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5261 HC_REG_ATTN_MSG0_ADDR_L);
5262
5263 REG_WR(bp, reg_offset, U64_LO(section));
5264 REG_WR(bp, reg_offset + 4, U64_HI(section));
5265
5266 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5267
5268 val = REG_RD(bp, reg_offset);
5269 val |= id;
5270 REG_WR(bp, reg_offset, val);
5271
5272 /* USTORM */
5273 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5274 u_def_status_block);
5275 def_sb->u_def_status_block.status_block_id = id;
5276
49d66772
ET
5277 bp->def_u_idx = 0;
5278
a2fbb9ea
ET
5279 REG_WR(bp, BAR_USTRORM_INTMEM +
5280 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5281 REG_WR(bp, BAR_USTRORM_INTMEM +
5282 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5283 U64_HI(section));
5284 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5285 BNX2X_BTR);
5286
5287 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5288 REG_WR16(bp, BAR_USTRORM_INTMEM +
5289 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5290
5291 /* CSTORM */
5292 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5293 c_def_status_block);
5294 def_sb->c_def_status_block.status_block_id = id;
5295
49d66772
ET
5296 bp->def_c_idx = 0;
5297
a2fbb9ea
ET
5298 REG_WR(bp, BAR_CSTRORM_INTMEM +
5299 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5300 REG_WR(bp, BAR_CSTRORM_INTMEM +
5301 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5302 U64_HI(section));
5303 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5304 BNX2X_BTR);
5305
5306 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5307 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5308 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5309
5310 /* TSTORM */
5311 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5312 t_def_status_block);
5313 def_sb->t_def_status_block.status_block_id = id;
5314
49d66772
ET
5315 bp->def_t_idx = 0;
5316
a2fbb9ea
ET
5317 REG_WR(bp, BAR_TSTRORM_INTMEM +
5318 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5319 REG_WR(bp, BAR_TSTRORM_INTMEM +
5320 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5321 U64_HI(section));
5322 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5323 BNX2X_BTR);
5324
5325 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5326 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5327 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5328
5329 /* XSTORM */
5330 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5331 x_def_status_block);
5332 def_sb->x_def_status_block.status_block_id = id;
5333
49d66772
ET
5334 bp->def_x_idx = 0;
5335
a2fbb9ea
ET
5336 REG_WR(bp, BAR_XSTRORM_INTMEM +
5337 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5338 REG_WR(bp, BAR_XSTRORM_INTMEM +
5339 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5340 U64_HI(section));
5341 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5342 BNX2X_BTR);
5343
5344 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5345 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5346 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5347
49d66772
ET
5348 bp->stat_pending = 0;
5349
a2fbb9ea
ET
5350 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5351}
5352
5353static void bnx2x_update_coalesce(struct bnx2x *bp)
5354{
5355 int port = bp->port;
5356 int i;
5357
5358 for_each_queue(bp, i) {
5359
5360 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5361 REG_WR8(bp, BAR_USTRORM_INTMEM +
5362 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5363 HC_INDEX_U_ETH_RX_CQ_CONS),
5364 bp->rx_ticks_int/12);
5365 REG_WR16(bp, BAR_USTRORM_INTMEM +
5366 USTORM_SB_HC_DISABLE_OFFSET(port, i,
5367 HC_INDEX_U_ETH_RX_CQ_CONS),
5368 bp->rx_ticks_int ? 0 : 1);
5369
5370 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5371 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5372 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5373 HC_INDEX_C_ETH_TX_CQ_CONS),
5374 bp->tx_ticks_int/12);
5375 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5376 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5377 HC_INDEX_C_ETH_TX_CQ_CONS),
5378 bp->tx_ticks_int ? 0 : 1);
5379 }
5380}
5381
5382static void bnx2x_init_rx_rings(struct bnx2x *bp)
5383{
5384 u16 ring_prod;
5385 int i, j;
5386 int port = bp->port;
5387
5388 bp->rx_buf_use_size = bp->dev->mtu;
5389
5390 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5391 bp->rx_buf_size = bp->rx_buf_use_size + 64;
5392
5393 for_each_queue(bp, j) {
5394 struct bnx2x_fastpath *fp = &bp->fp[j];
5395
5396 fp->rx_bd_cons = 0;
5397 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5398
5399 for (i = 1; i <= NUM_RX_RINGS; i++) {
5400 struct eth_rx_bd *rx_bd;
5401
5402 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5403 rx_bd->addr_hi =
5404 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5405 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5406 rx_bd->addr_lo =
5407 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5408 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5409
5410 }
5411
5412 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5413 struct eth_rx_cqe_next_page *nextpg;
5414
5415 nextpg = (struct eth_rx_cqe_next_page *)
5416 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5417 nextpg->addr_hi =
5418 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5419 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5420 nextpg->addr_lo =
5421 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5422 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5423 }
5424
5425 /* rx completion queue */
5426 fp->rx_comp_cons = ring_prod = 0;
5427
5428 for (i = 0; i < bp->rx_ring_size; i++) {
5429 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5430 BNX2X_ERR("was only able to allocate "
5431 "%d rx skbs\n", i);
5432 break;
5433 }
5434 ring_prod = NEXT_RX_IDX(ring_prod);
5435 BUG_TRAP(ring_prod > i);
5436 }
5437
5438 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
5439 fp->rx_pkt = fp->rx_calls = 0;
5440
c14423fe 5441 /* Warning! this will generate an interrupt (to the TSTORM) */
a2fbb9ea
ET
5442 /* must only be done when chip is initialized */
5443 REG_WR(bp, BAR_TSTRORM_INTMEM +
5444 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
5445 if (j != 0)
5446 continue;
5447
5448 REG_WR(bp, BAR_USTRORM_INTMEM +
5449 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
5450 U64_LO(fp->rx_comp_mapping));
5451 REG_WR(bp, BAR_USTRORM_INTMEM +
5452 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
5453 U64_HI(fp->rx_comp_mapping));
5454 }
5455}
5456
5457static void bnx2x_init_tx_ring(struct bnx2x *bp)
5458{
5459 int i, j;
5460
5461 for_each_queue(bp, j) {
5462 struct bnx2x_fastpath *fp = &bp->fp[j];
5463
5464 for (i = 1; i <= NUM_TX_RINGS; i++) {
5465 struct eth_tx_bd *tx_bd =
5466 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
5467
5468 tx_bd->addr_hi =
5469 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5470 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5471 tx_bd->addr_lo =
5472 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5473 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5474 }
5475
5476 fp->tx_pkt_prod = 0;
5477 fp->tx_pkt_cons = 0;
5478 fp->tx_bd_prod = 0;
5479 fp->tx_bd_cons = 0;
5480 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5481 fp->tx_pkt = 0;
5482 }
5483}
5484
5485static void bnx2x_init_sp_ring(struct bnx2x *bp)
5486{
5487 int port = bp->port;
5488
5489 spin_lock_init(&bp->spq_lock);
5490
5491 bp->spq_left = MAX_SPQ_PENDING;
5492 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5493 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5494 bp->spq_prod_bd = bp->spq;
5495 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5496
5497 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
5498 U64_LO(bp->spq_mapping));
5499 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
5500 U64_HI(bp->spq_mapping));
5501
5502 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
5503 bp->spq_prod_idx);
5504}
5505
5506static void bnx2x_init_context(struct bnx2x *bp)
5507{
5508 int i;
5509
5510 for_each_queue(bp, i) {
5511 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5512 struct bnx2x_fastpath *fp = &bp->fp[i];
5513
5514 context->xstorm_st_context.tx_bd_page_base_hi =
5515 U64_HI(fp->tx_desc_mapping);
5516 context->xstorm_st_context.tx_bd_page_base_lo =
5517 U64_LO(fp->tx_desc_mapping);
5518 context->xstorm_st_context.db_data_addr_hi =
5519 U64_HI(fp->tx_prods_mapping);
5520 context->xstorm_st_context.db_data_addr_lo =
5521 U64_LO(fp->tx_prods_mapping);
5522
5523 context->ustorm_st_context.rx_bd_page_base_hi =
5524 U64_HI(fp->rx_desc_mapping);
5525 context->ustorm_st_context.rx_bd_page_base_lo =
5526 U64_LO(fp->rx_desc_mapping);
5527 context->ustorm_st_context.status_block_id = i;
5528 context->ustorm_st_context.sb_index_number =
5529 HC_INDEX_U_ETH_RX_CQ_CONS;
5530 context->ustorm_st_context.rcq_base_address_hi =
5531 U64_HI(fp->rx_comp_mapping);
5532 context->ustorm_st_context.rcq_base_address_lo =
5533 U64_LO(fp->rx_comp_mapping);
5534 context->ustorm_st_context.flags =
5535 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
5536 context->ustorm_st_context.mc_alignment_size = 64;
5537 context->ustorm_st_context.num_rss = bp->num_queues;
5538
5539 context->cstorm_st_context.sb_index_number =
5540 HC_INDEX_C_ETH_TX_CQ_CONS;
5541 context->cstorm_st_context.status_block_id = i;
5542
5543 context->xstorm_ag_context.cdu_reserved =
5544 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5545 CDU_REGION_NUMBER_XCM_AG,
5546 ETH_CONNECTION_TYPE);
5547 context->ustorm_ag_context.cdu_usage =
5548 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5549 CDU_REGION_NUMBER_UCM_AG,
5550 ETH_CONNECTION_TYPE);
5551 }
5552}
5553
5554static void bnx2x_init_ind_table(struct bnx2x *bp)
5555{
5556 int port = bp->port;
5557 int i;
5558
5559 if (!is_multi(bp))
5560 return;
5561
5562 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5563 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
5564 i % bp->num_queues);
5565
5566 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5567}
5568
49d66772
ET
5569static void bnx2x_set_client_config(struct bnx2x *bp)
5570{
5571#ifdef BCM_VLAN
5572 int mode = bp->rx_mode;
5573#endif
5574 int i, port = bp->port;
5575 struct tstorm_eth_client_config tstorm_client = {0};
5576
5577 tstorm_client.mtu = bp->dev->mtu;
5578 tstorm_client.statistics_counter_id = 0;
5579 tstorm_client.config_flags =
5580 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5581#ifdef BCM_VLAN
5582 if (mode && bp->vlgrp) {
5583 tstorm_client.config_flags |=
5584 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5585 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5586 }
5587#endif
5588 if (mode != BNX2X_RX_MODE_PROMISC)
5589 tstorm_client.drop_flags =
5590 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
5591
5592 for_each_queue(bp, i) {
5593 REG_WR(bp, BAR_TSTRORM_INTMEM +
5594 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
5595 ((u32 *)&tstorm_client)[0]);
5596 REG_WR(bp, BAR_TSTRORM_INTMEM +
5597 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
5598 ((u32 *)&tstorm_client)[1]);
5599 }
5600
5601/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5602 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5603}
5604
a2fbb9ea
ET
5605static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5606{
5607 int mode = bp->rx_mode;
5608 int port = bp->port;
5609 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5610 int i;
5611
5612 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
5613
5614 switch (mode) {
5615 case BNX2X_RX_MODE_NONE: /* no Rx */
5616 tstorm_mac_filter.ucast_drop_all = 1;
5617 tstorm_mac_filter.mcast_drop_all = 1;
5618 tstorm_mac_filter.bcast_drop_all = 1;
5619 break;
5620 case BNX2X_RX_MODE_NORMAL:
5621 tstorm_mac_filter.bcast_accept_all = 1;
5622 break;
5623 case BNX2X_RX_MODE_ALLMULTI:
5624 tstorm_mac_filter.mcast_accept_all = 1;
5625 tstorm_mac_filter.bcast_accept_all = 1;
5626 break;
5627 case BNX2X_RX_MODE_PROMISC:
5628 tstorm_mac_filter.ucast_accept_all = 1;
5629 tstorm_mac_filter.mcast_accept_all = 1;
5630 tstorm_mac_filter.bcast_accept_all = 1;
5631 break;
5632 default:
5633 BNX2X_ERR("bad rx mode (%d)\n", mode);
5634 }
5635
5636 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5637 REG_WR(bp, BAR_TSTRORM_INTMEM +
5638 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
5639 ((u32 *)&tstorm_mac_filter)[i]);
5640
5641/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5642 ((u32 *)&tstorm_mac_filter)[i]); */
5643 }
a2fbb9ea 5644
49d66772
ET
5645 if (mode != BNX2X_RX_MODE_NONE)
5646 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5647}
5648
5649static void bnx2x_init_internal(struct bnx2x *bp)
5650{
5651 int port = bp->port;
5652 struct tstorm_eth_function_common_config tstorm_config = {0};
5653 struct stats_indication_flags stats_flags = {0};
a2fbb9ea
ET
5654
5655 if (is_multi(bp)) {
5656 tstorm_config.config_flags = MULTI_FLAGS;
5657 tstorm_config.rss_result_mask = MULTI_MASK;
5658 }
5659
5660 REG_WR(bp, BAR_TSTRORM_INTMEM +
5661 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
5662 (*(u32 *)&tstorm_config));
5663
5664/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
5665 (*(u32 *)&tstorm_config)); */
5666
c14423fe 5667 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5668 bnx2x_set_storm_rx_mode(bp);
5669
a2fbb9ea
ET
5670 stats_flags.collect_eth = cpu_to_le32(1);
5671
5672 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
5673 ((u32 *)&stats_flags)[0]);
5674 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
5675 ((u32 *)&stats_flags)[1]);
5676
5677 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
5678 ((u32 *)&stats_flags)[0]);
5679 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
5680 ((u32 *)&stats_flags)[1]);
5681
5682 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
5683 ((u32 *)&stats_flags)[0]);
5684 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
5685 ((u32 *)&stats_flags)[1]);
5686
5687/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
5688 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
5689}
5690
5691static void bnx2x_nic_init(struct bnx2x *bp)
5692{
5693 int i;
5694
5695 for_each_queue(bp, i) {
5696 struct bnx2x_fastpath *fp = &bp->fp[i];
5697
5698 fp->state = BNX2X_FP_STATE_CLOSED;
5699 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
5700 bp, fp->status_blk, i);
5701 fp->index = i;
5702 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
5703 }
5704
5705 bnx2x_init_def_sb(bp, bp->def_status_blk,
5706 bp->def_status_blk_mapping, 0x10);
5707 bnx2x_update_coalesce(bp);
5708 bnx2x_init_rx_rings(bp);
5709 bnx2x_init_tx_ring(bp);
5710 bnx2x_init_sp_ring(bp);
5711 bnx2x_init_context(bp);
5712 bnx2x_init_internal(bp);
5713 bnx2x_init_stats(bp);
5714 bnx2x_init_ind_table(bp);
5715 bnx2x_enable_int(bp);
5716
5717}
5718
5719/* end of nic init */
5720
5721/*
5722 * gzip service functions
5723 */
5724
5725static int bnx2x_gunzip_init(struct bnx2x *bp)
5726{
5727 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5728 &bp->gunzip_mapping);
5729 if (bp->gunzip_buf == NULL)
5730 goto gunzip_nomem1;
5731
5732 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5733 if (bp->strm == NULL)
5734 goto gunzip_nomem2;
5735
5736 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5737 GFP_KERNEL);
5738 if (bp->strm->workspace == NULL)
5739 goto gunzip_nomem3;
5740
5741 return 0;
5742
5743gunzip_nomem3:
5744 kfree(bp->strm);
5745 bp->strm = NULL;
5746
5747gunzip_nomem2:
5748 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5749 bp->gunzip_mapping);
5750 bp->gunzip_buf = NULL;
5751
5752gunzip_nomem1:
5753 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5754 " uncompression\n", bp->dev->name);
5755 return -ENOMEM;
5756}
5757
5758static void bnx2x_gunzip_end(struct bnx2x *bp)
5759{
5760 kfree(bp->strm->workspace);
5761
5762 kfree(bp->strm);
5763 bp->strm = NULL;
5764
5765 if (bp->gunzip_buf) {
5766 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5767 bp->gunzip_mapping);
5768 bp->gunzip_buf = NULL;
5769 }
5770}
5771
5772static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5773{
5774 int n, rc;
5775
5776 /* check gzip header */
5777 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5778 return -EINVAL;
5779
5780 n = 10;
5781
5782#define FNAME 0x8
5783
5784 if (zbuf[3] & FNAME)
5785 while ((zbuf[n++] != 0) && (n < len));
5786
5787 bp->strm->next_in = zbuf + n;
5788 bp->strm->avail_in = len - n;
5789 bp->strm->next_out = bp->gunzip_buf;
5790 bp->strm->avail_out = FW_BUF_SIZE;
5791
5792 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5793 if (rc != Z_OK)
5794 return rc;
5795
5796 rc = zlib_inflate(bp->strm, Z_FINISH);
5797 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5798 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5799 bp->dev->name, bp->strm->msg);
5800
5801 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5802 if (bp->gunzip_outlen & 0x3)
5803 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5804 " gunzip_outlen (%d) not aligned\n",
5805 bp->dev->name, bp->gunzip_outlen);
5806 bp->gunzip_outlen >>= 2;
5807
5808 zlib_inflateEnd(bp->strm);
5809
5810 if (rc == Z_STREAM_END)
5811 return 0;
5812
5813 return rc;
5814}
5815
5816/* nic load/unload */
5817
5818/*
5819 * general service functions
5820 */
5821
5822/* send a NIG loopback debug packet */
5823static void bnx2x_lb_pckt(struct bnx2x *bp)
5824{
5825#ifdef USE_DMAE
5826 u32 wb_write[3];
5827#endif
5828
5829 /* Ethernet source and destination addresses */
5830#ifdef USE_DMAE
5831 wb_write[0] = 0x55555555;
5832 wb_write[1] = 0x55555555;
5833 wb_write[2] = 0x20; /* SOP */
5834 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5835#else
5836 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5837 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5838 /* SOP */
5839 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5840#endif
5841
5842 /* NON-IP protocol */
5843#ifdef USE_DMAE
5844 wb_write[0] = 0x09000000;
5845 wb_write[1] = 0x55555555;
5846 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5847 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5848#else
5849 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5850 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5851 /* EOP, eop_bvalid = 0 */
5852 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5853#endif
5854}
5855
5856/* some of the internal memories
5857 * are not directly readable from the driver
5858 * to test them we send debug packets
5859 */
5860static int bnx2x_int_mem_test(struct bnx2x *bp)
5861{
5862 int factor;
5863 int count, i;
5864 u32 val = 0;
5865
5866 switch (CHIP_REV(bp)) {
5867 case CHIP_REV_EMUL:
5868 factor = 200;
5869 break;
5870 case CHIP_REV_FPGA:
5871 factor = 120;
5872 break;
5873 default:
5874 factor = 1;
5875 break;
5876 }
5877
5878 DP(NETIF_MSG_HW, "start part1\n");
5879
5880 /* Disable inputs of parser neighbor blocks */
5881 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5882 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5883 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5884 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5885
5886 /* Write 0 to parser credits for CFC search request */
5887 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5888
5889 /* send Ethernet packet */
5890 bnx2x_lb_pckt(bp);
5891
5892 /* TODO do i reset NIG statistic? */
5893 /* Wait until NIG register shows 1 packet of size 0x10 */
5894 count = 1000 * factor;
5895 while (count) {
5896#ifdef BNX2X_DMAE_RD
5897 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5898 val = *bnx2x_sp(bp, wb_data[0]);
5899#else
5900 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5901 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5902#endif
5903 if (val == 0x10)
5904 break;
5905
5906 msleep(10);
5907 count--;
5908 }
5909 if (val != 0x10) {
5910 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5911 return -1;
5912 }
5913
5914 /* Wait until PRS register shows 1 packet */
5915 count = 1000 * factor;
5916 while (count) {
5917 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5918
5919 if (val == 1)
5920 break;
5921
5922 msleep(10);
5923 count--;
5924 }
5925 if (val != 0x1) {
5926 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5927 return -2;
5928 }
5929
5930 /* Reset and init BRB, PRS */
5931 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
5932 msleep(50);
5933 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
5934 msleep(50);
5935 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5936 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5937
5938 DP(NETIF_MSG_HW, "part2\n");
5939
5940 /* Disable inputs of parser neighbor blocks */
5941 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5942 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5943 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5944 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5945
5946 /* Write 0 to parser credits for CFC search request */
5947 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5948
5949 /* send 10 Ethernet packets */
5950 for (i = 0; i < 10; i++)
5951 bnx2x_lb_pckt(bp);
5952
5953 /* Wait until NIG register shows 10 + 1
5954 packets of size 11*0x10 = 0xb0 */
5955 count = 1000 * factor;
5956 while (count) {
5957#ifdef BNX2X_DMAE_RD
5958 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5959 val = *bnx2x_sp(bp, wb_data[0]);
5960#else
5961 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5962 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5963#endif
5964 if (val == 0xb0)
5965 break;
5966
5967 msleep(10);
5968 count--;
5969 }
5970 if (val != 0xb0) {
5971 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5972 return -3;
5973 }
5974
5975 /* Wait until PRS register shows 2 packets */
5976 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5977 if (val != 2)
5978 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5979
5980 /* Write 1 to parser credits for CFC search request */
5981 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5982
5983 /* Wait until PRS register shows 3 packets */
5984 msleep(10 * factor);
5985 /* Wait until NIG register shows 1 packet of size 0x10 */
5986 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5987 if (val != 3)
5988 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5989
5990 /* clear NIG EOP FIFO */
5991 for (i = 0; i < 11; i++)
5992 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5993 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5994 if (val != 1) {
5995 BNX2X_ERR("clear of NIG failed\n");
5996 return -4;
5997 }
5998
5999 /* Reset and init BRB, PRS, NIG */
6000 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6001 msleep(50);
6002 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6003 msleep(50);
6004 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6005 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6006#ifndef BCM_ISCSI
6007 /* set NIC mode */
6008 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6009#endif
6010
6011 /* Enable inputs of parser neighbor blocks */
6012 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6013 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6014 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6015 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
6016
6017 DP(NETIF_MSG_HW, "done\n");
6018
6019 return 0; /* OK */
6020}
6021
6022static void enable_blocks_attention(struct bnx2x *bp)
6023{
6024 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6025 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6026 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6027 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6028 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6029 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6030 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6031 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6032 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6033/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6034/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6035 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6036 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6037 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6038/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6039/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6040 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6041 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6042 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6043 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6044/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6045/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6046 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
6047 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6048 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6049 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6050/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6051/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6052 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6053 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6054/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6055 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6056}
6057
6058static int bnx2x_function_init(struct bnx2x *bp, int mode)
6059{
6060 int func = bp->port;
6061 int port = func ? PORT1 : PORT0;
6062 u32 val, i;
6063#ifdef USE_DMAE
6064 u32 wb_write[2];
6065#endif
6066
6067 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
6068 if ((func != 0) && (func != 1)) {
6069 BNX2X_ERR("BAD function number (%d)\n", func);
6070 return -ENODEV;
6071 }
6072
6073 bnx2x_gunzip_init(bp);
6074
6075 if (mode & 0x1) { /* init common */
6076 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
6077 func, mode);
f1410647
ET
6078 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6079 0xffffffff);
6080 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6081 0xfffc);
a2fbb9ea
ET
6082 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
6083
6084 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6085 msleep(30);
6086 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6087
6088 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
6089 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
6090
6091 bnx2x_init_pxp(bp);
6092
6093 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6094 /* enable HW interrupt from PXP on USDM
6095 overflow bit 16 on INT_MASK_0 */
6096 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6097 }
6098
6099#ifdef __BIG_ENDIAN
6100 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6101 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6102 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6103 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6104 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6105 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
6106
6107/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6108 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6109 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6110 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6111 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6112#endif
6113
6114#ifndef BCM_ISCSI
6115 /* set NIC mode */
6116 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6117#endif
6118
6119 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
6120#ifdef BCM_ISCSI
6121 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6122 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6123 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6124#endif
6125
6126 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
6127
6128 /* let the HW do it's magic ... */
6129 msleep(100);
6130 /* finish PXP init
6131 (can be moved up if we want to use the DMAE) */
6132 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6133 if (val != 1) {
6134 BNX2X_ERR("PXP2 CFG failed\n");
6135 return -EBUSY;
6136 }
6137
6138 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6139 if (val != 1) {
6140 BNX2X_ERR("PXP2 RD_INIT failed\n");
6141 return -EBUSY;
6142 }
6143
6144 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6145 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6146
6147 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6148
6149 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
6150 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
6151 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
6152 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
6153
6154#ifdef BNX2X_DMAE_RD
6155 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6156 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6157 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6158 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6159#else
6160 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
6161 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
6162 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
6163 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
6164 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
6165 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
6166 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
6167 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
6168 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
6169 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
6170 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
6171 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
6172#endif
6173 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
c14423fe 6174 /* soft reset pulse */
a2fbb9ea
ET
6175 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6176 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6177
6178#ifdef BCM_ISCSI
6179 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
6180#endif
6181 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
6182 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
6183 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6184 /* enable hw interrupt from doorbell Q */
6185 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6186 }
6187
6188 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6189
6190 if (CHIP_REV_IS_SLOW(bp)) {
6191 /* fix for emulation and FPGA for no pause */
6192 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
6193 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
6194 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
6195 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
6196 }
6197
6198 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6199
6200 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
6201 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
6202 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
6203 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
6204
6205 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6206 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6207 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6208 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6209
6210 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
6211 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
6212 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
6213 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
6214
6215 /* sync semi rtc */
6216 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6217 0x80000000);
6218 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6219 0x80000000);
6220
6221 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
6222 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
6223 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
6224
6225 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6226 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6227 REG_WR(bp, i, 0xc0cac01a);
c14423fe 6228 /* TODO: replace with something meaningful */
a2fbb9ea
ET
6229 }
6230 /* SRCH COMMON comes here */
6231 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6232
6233 if (sizeof(union cdu_context) != 1024) {
6234 /* we currently assume that a context is 1024 bytes */
6235 printk(KERN_ALERT PFX "please adjust the size of"
6236 " cdu_context(%ld)\n",
6237 (long)sizeof(union cdu_context));
6238 }
6239 val = (4 << 24) + (0 << 12) + 1024;
6240 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6241 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
6242
6243 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
6244 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6245
6246 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
6247 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
6248 MISC_AEU_COMMON_END);
6249 /* RXPCS COMMON comes here */
6250 /* EMAC0 COMMON comes here */
6251 /* EMAC1 COMMON comes here */
6252 /* DBU COMMON comes here */
6253 /* DBG COMMON comes here */
6254 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
6255
6256 if (CHIP_REV_IS_SLOW(bp))
6257 msleep(200);
6258
6259 /* finish CFC init */
6260 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
6261 if (val != 1) {
6262 BNX2X_ERR("CFC LL_INIT failed\n");
6263 return -EBUSY;
6264 }
6265
6266 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
6267 if (val != 1) {
6268 BNX2X_ERR("CFC AC_INIT failed\n");
6269 return -EBUSY;
6270 }
6271
6272 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
6273 if (val != 1) {
6274 BNX2X_ERR("CFC CAM_INIT failed\n");
6275 return -EBUSY;
6276 }
6277
6278 REG_WR(bp, CFC_REG_DEBUG0, 0);
6279
6280 /* read NIG statistic
6281 to see if this is our first up since powerup */
6282#ifdef BNX2X_DMAE_RD
6283 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6284 val = *bnx2x_sp(bp, wb_data[0]);
6285#else
6286 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6287 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6288#endif
6289 /* do internal memory self test */
6290 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6291 BNX2X_ERR("internal mem selftest failed\n");
6292 return -EBUSY;
6293 }
6294
6295 /* clear PXP2 attentions */
6296 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
6297
6298 enable_blocks_attention(bp);
6299 /* enable_blocks_parity(bp); */
6300
f1410647
ET
6301 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6302 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6303 /* Fan failure is indicated by SPIO 5 */
6304 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6305 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6306
6307 /* set to active low mode */
6308 val = REG_RD(bp, MISC_REG_SPIO_INT);
6309 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6310 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6311 REG_WR(bp, MISC_REG_SPIO_INT, val);
6312
6313 /* enable interrupt to signal the IGU */
6314 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6315 val |= (1 << MISC_REGISTERS_SPIO_5);
6316 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6317 break;
6318
6319 default:
6320 break;
6321 }
6322
a2fbb9ea
ET
6323 } /* end of common init */
6324
6325 /* per port init */
6326
6327 /* the phys address is shifted right 12 bits and has an added
6328 1=valid bit added to the 53rd bit
6329 then since this is a wide register(TM)
6330 we split it into two 32 bit writes
6331 */
6332#define RQ_ONCHIP_AT_PORT_SIZE 384
6333#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6334#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6335#define PXP_ONE_ILT(x) ((x << 10) | x)
6336
6337 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
6338
6339 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
6340
6341 /* Port PXP comes here */
6342 /* Port PXP2 comes here */
6343
6344 /* Offset is
6345 * Port0 0
6346 * Port1 384 */
6347 i = func * RQ_ONCHIP_AT_PORT_SIZE;
6348#ifdef USE_DMAE
6349 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
6350 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
6351 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6352#else
6353 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
6354 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
6355 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
6356 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
6357#endif
6358 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
6359
6360#ifdef BCM_ISCSI
6361 /* Port0 1
6362 * Port1 385 */
6363 i++;
6364 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6365 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6366 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6367 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6368
6369 /* Port0 2
6370 * Port1 386 */
6371 i++;
6372 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6373 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6374 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6375 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6376
6377 /* Port0 3
6378 * Port1 387 */
6379 i++;
6380 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6381 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6382 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6383 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6384#endif
6385
6386 /* Port TCM comes here */
6387 /* Port UCM comes here */
6388 /* Port CCM comes here */
6389 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
6390 func ? XCM_PORT1_END : XCM_PORT0_END);
6391
6392#ifdef USE_DMAE
6393 wb_write[0] = 0;
6394 wb_write[1] = 0;
6395#endif
6396 for (i = 0; i < 32; i++) {
6397 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
6398#ifdef USE_DMAE
6399 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
6400#else
6401 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
6402 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
6403#endif
6404 }
6405 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
6406
6407 /* Port QM comes here */
6408
6409#ifdef BCM_ISCSI
6410 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6411 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6412
6413 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
6414 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
6415#endif
6416 /* Port DQ comes here */
6417 /* Port BRB1 comes here */
6418 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
6419 func ? PRS_PORT1_END : PRS_PORT0_END);
6420 /* Port TSDM comes here */
6421 /* Port CSDM comes here */
6422 /* Port USDM comes here */
6423 /* Port XSDM comes here */
6424 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
6425 func ? TSEM_PORT1_END : TSEM_PORT0_END);
6426 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
6427 func ? USEM_PORT1_END : USEM_PORT0_END);
6428 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
6429 func ? CSEM_PORT1_END : CSEM_PORT0_END);
6430 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
6431 func ? XSEM_PORT1_END : XSEM_PORT0_END);
6432 /* Port UPB comes here */
6433 /* Port XSDM comes here */
6434 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
6435 func ? PBF_PORT1_END : PBF_PORT0_END);
6436
6437 /* configure PBF to work without PAUSE mtu 9000 */
6438 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
6439
6440 /* update threshold */
6441 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
6442 /* update init credit */
6443 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
6444
6445 /* probe changes */
6446 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
6447 msleep(5);
6448 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
6449
6450#ifdef BCM_ISCSI
6451 /* tell the searcher where the T2 table is */
6452 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6453
6454 wb_write[0] = U64_LO(bp->t2_mapping);
6455 wb_write[1] = U64_HI(bp->t2_mapping);
6456 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6457 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6458 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6459 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6460
6461 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6462 /* Port SRCH comes here */
6463#endif
6464 /* Port CDU comes here */
6465 /* Port CFC comes here */
6466 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
6467 func ? HC_PORT1_END : HC_PORT0_END);
6468 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
6469 MISC_AEU_PORT0_START,
6470 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
6471 /* Port PXPCS comes here */
6472 /* Port EMAC0 comes here */
6473 /* Port EMAC1 comes here */
6474 /* Port DBU comes here */
6475 /* Port DBG comes here */
6476 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
6477 func ? NIG_PORT1_END : NIG_PORT0_END);
6478 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
6479 /* Port MCP comes here */
6480 /* Port DMAE comes here */
6481
f1410647
ET
6482 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6483 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6484 /* add SPIO 5 to group 0 */
6485 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6486 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6487 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6488 break;
6489
6490 default:
6491 break;
6492 }
6493
a2fbb9ea
ET
6494 bnx2x_link_reset(bp);
6495
c14423fe 6496 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6497 REG_WR(bp, 0x2114, 0xffffffff);
6498 REG_WR(bp, 0x2120, 0xffffffff);
6499 REG_WR(bp, 0x2814, 0xffffffff);
6500
6501 /* !!! move to init_values.h */
6502 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6503 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6504 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6505 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6506
6507 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
6508 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
6509 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
6510 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
6511
6512 bnx2x_gunzip_end(bp);
6513
6514 if (!nomcp) {
6515 port = bp->port;
6516
6517 bp->fw_drv_pulse_wr_seq =
f1410647 6518 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
a2fbb9ea 6519 DRV_PULSE_SEQ_MASK);
f1410647 6520 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
a2fbb9ea
ET
6521 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
6522 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
6523 } else {
6524 bp->fw_mb = 0;
6525 }
6526
6527 return 0;
6528}
6529
c14423fe 6530/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6531static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6532{
a2fbb9ea 6533 int port = bp->port;
f1410647
ET
6534 u32 seq = ++bp->fw_seq;
6535 u32 rc = 0;
a2fbb9ea 6536
f1410647
ET
6537 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
6538 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea
ET
6539
6540 /* let the FW do it's magic ... */
6541 msleep(100); /* TBD */
6542
6543 if (CHIP_REV_IS_SLOW(bp))
6544 msleep(900);
6545
f1410647 6546 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
a2fbb9ea
ET
6547 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
6548
6549 /* is this a reply to our command? */
6550 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6551 rc &= FW_MSG_CODE_MASK;
f1410647 6552
a2fbb9ea
ET
6553 } else {
6554 /* FW BUG! */
6555 BNX2X_ERR("FW failed to respond!\n");
6556 bnx2x_fw_dump(bp);
6557 rc = 0;
6558 }
f1410647 6559
a2fbb9ea
ET
6560 return rc;
6561}
6562
6563static void bnx2x_free_mem(struct bnx2x *bp)
6564{
6565
6566#define BNX2X_PCI_FREE(x, y, size) \
6567 do { \
6568 if (x) { \
6569 pci_free_consistent(bp->pdev, size, x, y); \
6570 x = NULL; \
6571 y = 0; \
6572 } \
6573 } while (0)
6574
6575#define BNX2X_FREE(x) \
6576 do { \
6577 if (x) { \
6578 vfree(x); \
6579 x = NULL; \
6580 } \
6581 } while (0)
6582
6583 int i;
6584
6585 /* fastpath */
6586 for_each_queue(bp, i) {
6587
6588 /* Status blocks */
6589 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6590 bnx2x_fp(bp, i, status_blk_mapping),
6591 sizeof(struct host_status_block) +
6592 sizeof(struct eth_tx_db_data));
6593
6594 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6595 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6596 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6597 bnx2x_fp(bp, i, tx_desc_mapping),
6598 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6599
6600 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6601 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6602 bnx2x_fp(bp, i, rx_desc_mapping),
6603 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6604
6605 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6606 bnx2x_fp(bp, i, rx_comp_mapping),
6607 sizeof(struct eth_fast_path_rx_cqe) *
6608 NUM_RCQ_BD);
6609 }
6610
6611 BNX2X_FREE(bp->fp);
6612
6613 /* end of fastpath */
6614
6615 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6616 (sizeof(struct host_def_status_block)));
6617
6618 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6619 (sizeof(struct bnx2x_slowpath)));
6620
6621#ifdef BCM_ISCSI
6622 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6623 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6624 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6625 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6626#endif
6627 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
6628
6629#undef BNX2X_PCI_FREE
6630#undef BNX2X_KFREE
6631}
6632
6633static int bnx2x_alloc_mem(struct bnx2x *bp)
6634{
6635
6636#define BNX2X_PCI_ALLOC(x, y, size) \
6637 do { \
6638 x = pci_alloc_consistent(bp->pdev, size, y); \
6639 if (x == NULL) \
6640 goto alloc_mem_err; \
6641 memset(x, 0, size); \
6642 } while (0)
6643
6644#define BNX2X_ALLOC(x, size) \
6645 do { \
6646 x = vmalloc(size); \
6647 if (x == NULL) \
6648 goto alloc_mem_err; \
6649 memset(x, 0, size); \
6650 } while (0)
6651
6652 int i;
6653
6654 /* fastpath */
6655 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
6656
6657 for_each_queue(bp, i) {
6658 bnx2x_fp(bp, i, bp) = bp;
6659
6660 /* Status blocks */
6661 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6662 &bnx2x_fp(bp, i, status_blk_mapping),
6663 sizeof(struct host_status_block) +
6664 sizeof(struct eth_tx_db_data));
6665
6666 bnx2x_fp(bp, i, hw_tx_prods) =
6667 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6668
6669 bnx2x_fp(bp, i, tx_prods_mapping) =
6670 bnx2x_fp(bp, i, status_blk_mapping) +
6671 sizeof(struct host_status_block);
6672
6673 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6674 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6675 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6676 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6677 &bnx2x_fp(bp, i, tx_desc_mapping),
6678 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6679
6680 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6681 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6682 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6683 &bnx2x_fp(bp, i, rx_desc_mapping),
6684 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6685
6686 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6687 &bnx2x_fp(bp, i, rx_comp_mapping),
6688 sizeof(struct eth_fast_path_rx_cqe) *
6689 NUM_RCQ_BD);
6690
6691 }
6692 /* end of fastpath */
6693
6694 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6695 sizeof(struct host_def_status_block));
6696
6697 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6698 sizeof(struct bnx2x_slowpath));
6699
6700#ifdef BCM_ISCSI
6701 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6702
6703 /* Initialize T1 */
6704 for (i = 0; i < 64*1024; i += 64) {
6705 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6706 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6707 }
6708
6709 /* allocate searcher T2 table
6710 we allocate 1/4 of alloc num for T2
6711 (which is not entered into the ILT) */
6712 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6713
6714 /* Initialize T2 */
6715 for (i = 0; i < 16*1024; i += 64)
6716 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6717
c14423fe 6718 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6719 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6720
6721 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6722 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6723
6724 /* QM queues (128*MAX_CONN) */
6725 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6726#endif
6727
6728 /* Slow path ring */
6729 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6730
6731 return 0;
6732
6733alloc_mem_err:
6734 bnx2x_free_mem(bp);
6735 return -ENOMEM;
6736
6737#undef BNX2X_PCI_ALLOC
6738#undef BNX2X_ALLOC
6739}
6740
6741static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6742{
6743 int i;
6744
6745 for_each_queue(bp, i) {
6746 struct bnx2x_fastpath *fp = &bp->fp[i];
6747
6748 u16 bd_cons = fp->tx_bd_cons;
6749 u16 sw_prod = fp->tx_pkt_prod;
6750 u16 sw_cons = fp->tx_pkt_cons;
6751
6752 BUG_TRAP(fp->tx_buf_ring != NULL);
6753
6754 while (sw_cons != sw_prod) {
6755 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6756 sw_cons++;
6757 }
6758 }
6759}
6760
6761static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6762{
6763 int i, j;
6764
6765 for_each_queue(bp, j) {
6766 struct bnx2x_fastpath *fp = &bp->fp[j];
6767
6768 BUG_TRAP(fp->rx_buf_ring != NULL);
6769
6770 for (i = 0; i < NUM_RX_BD; i++) {
6771 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6772 struct sk_buff *skb = rx_buf->skb;
6773
6774 if (skb == NULL)
6775 continue;
6776
6777 pci_unmap_single(bp->pdev,
6778 pci_unmap_addr(rx_buf, mapping),
6779 bp->rx_buf_use_size,
6780 PCI_DMA_FROMDEVICE);
6781
6782 rx_buf->skb = NULL;
6783 dev_kfree_skb(skb);
6784 }
6785 }
6786}
6787
6788static void bnx2x_free_skbs(struct bnx2x *bp)
6789{
6790 bnx2x_free_tx_skbs(bp);
6791 bnx2x_free_rx_skbs(bp);
6792}
6793
6794static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6795{
6796 int i;
6797
6798 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6799 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6800 bp->msix_table[0].vector);
6801
6802 for_each_queue(bp, i) {
c14423fe 6803 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
a2fbb9ea
ET
6804 "state(%x)\n", i, bp->msix_table[i + 1].vector,
6805 bnx2x_fp(bp, i, state));
6806
6807 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
6808
6809 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
6810 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
6811
6812 } else
6813 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
6814
6815 }
6816
6817}
6818
6819static void bnx2x_free_irq(struct bnx2x *bp)
6820{
6821
6822 if (bp->flags & USING_MSIX_FLAG) {
6823
6824 bnx2x_free_msix_irqs(bp);
6825 pci_disable_msix(bp->pdev);
6826
6827 bp->flags &= ~USING_MSIX_FLAG;
6828
6829 } else
6830 free_irq(bp->pdev->irq, bp->dev);
6831}
6832
6833static int bnx2x_enable_msix(struct bnx2x *bp)
6834{
6835
6836 int i;
6837
6838 bp->msix_table[0].entry = 0;
6839 for_each_queue(bp, i)
6840 bp->msix_table[i + 1].entry = i + 1;
6841
6842 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
6843 bp->num_queues + 1)){
6844 BNX2X_ERR("failed to enable msix\n");
6845 return -1;
6846
6847 }
6848
6849 bp->flags |= USING_MSIX_FLAG;
6850
6851 return 0;
6852
6853}
6854
6855
6856static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6857{
6858
a2fbb9ea
ET
6859 int i, rc;
6860
6861 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6862
6863 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6864 bp->dev->name, bp->dev);
6865
6866 if (rc) {
6867 BNX2X_ERR("request sp irq failed\n");
6868 return -EBUSY;
6869 }
6870
6871 for_each_queue(bp, i) {
6872 rc = request_irq(bp->msix_table[i + 1].vector,
6873 bnx2x_msix_fp_int, 0,
6874 bp->dev->name, &bp->fp[i]);
6875
6876 if (rc) {
6877 BNX2X_ERR("request fp #%d irq failed\n", i);
6878 bnx2x_free_msix_irqs(bp);
6879 return -EBUSY;
6880 }
6881
6882 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6883
6884 }
6885
6886 return 0;
6887
6888}
6889
6890static int bnx2x_req_irq(struct bnx2x *bp)
6891{
6892
6893 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6894 IRQF_SHARED, bp->dev->name, bp->dev);
6895 if (!rc)
6896 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6897
6898 return rc;
6899
6900}
6901
6902/*
6903 * Init service functions
6904 */
6905
6906static void bnx2x_set_mac_addr(struct bnx2x *bp)
6907{
6908 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6909
6910 /* CAM allocation
6911 * unicasts 0-31:port0 32-63:port1
6912 * multicast 64-127:port0 128-191:port1
6913 */
6914 config->hdr.length_6b = 2;
6915 config->hdr.offset = bp->port ? 31 : 0;
6916 config->hdr.reserved0 = 0;
6917 config->hdr.reserved1 = 0;
6918
6919 /* primary MAC */
6920 config->config_table[0].cam_entry.msb_mac_addr =
6921 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6922 config->config_table[0].cam_entry.middle_mac_addr =
6923 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6924 config->config_table[0].cam_entry.lsb_mac_addr =
6925 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6926 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
6927 config->config_table[0].target_table_entry.flags = 0;
6928 config->config_table[0].target_table_entry.client_id = 0;
6929 config->config_table[0].target_table_entry.vlan_id = 0;
6930
6931 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6932 config->config_table[0].cam_entry.msb_mac_addr,
6933 config->config_table[0].cam_entry.middle_mac_addr,
6934 config->config_table[0].cam_entry.lsb_mac_addr);
6935
6936 /* broadcast */
6937 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6938 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6939 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6940 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
6941 config->config_table[1].target_table_entry.flags =
6942 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6943 config->config_table[1].target_table_entry.client_id = 0;
6944 config->config_table[1].target_table_entry.vlan_id = 0;
6945
6946 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6947 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6948 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6949}
6950
6951static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6952 int *state_p, int poll)
6953{
6954 /* can take a while if any port is running */
6955 int timeout = 500;
6956
c14423fe
ET
6957 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6958 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6959
6960 might_sleep();
6961
6962 while (timeout) {
6963
6964 if (poll) {
6965 bnx2x_rx_int(bp->fp, 10);
6966 /* If index is different from 0
6967 * The reply for some commands will
6968 * be on the none default queue
6969 */
6970 if (idx)
6971 bnx2x_rx_int(&bp->fp[idx], 10);
6972 }
6973
6974 mb(); /* state is changed by bnx2x_sp_event()*/
6975
49d66772 6976 if (*state_p == state)
a2fbb9ea
ET
6977 return 0;
6978
6979 timeout--;
6980 msleep(1);
6981
6982 }
6983
a2fbb9ea 6984 /* timeout! */
49d66772
ET
6985 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6986 poll ? "polling" : "waiting", state, idx);
a2fbb9ea 6987
49d66772 6988 return -EBUSY;
a2fbb9ea
ET
6989}
6990
6991static int bnx2x_setup_leading(struct bnx2x *bp)
6992{
6993
c14423fe 6994 /* reset IGU state */
a2fbb9ea
ET
6995 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6996
6997 /* SETUP ramrod */
6998 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6999
7000 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7001
7002}
7003
7004static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7005{
7006
7007 /* reset IGU state */
7008 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7009
7010 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
7011 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
7012
7013 /* Wait for completion */
7014 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7015 &(bp->fp[index].state), 1);
7016
7017}
7018
7019
7020static int bnx2x_poll(struct napi_struct *napi, int budget);
7021static void bnx2x_set_rx_mode(struct net_device *dev);
7022
7023static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
7024{
7025 int rc;
7026 int i = 0;
7027
7028 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7029
7030 /* Send LOAD_REQUEST command to MCP.
7031 Returns the type of LOAD command: if it is the
7032 first port to be initialized common blocks should be
7033 initialized, otherwise - not.
7034 */
7035 if (!nomcp) {
7036 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7037 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7038 return -EBUSY; /* other port in diagnostic mode */
7039 }
7040 } else {
7041 rc = FW_MSG_CODE_DRV_LOAD_COMMON;
7042 }
7043
a2fbb9ea
ET
7044 /* if we can't use msix we only need one fp,
7045 * so try to enable msix with the requested number of fp's
7046 * and fallback to inta with one fp
7047 */
7048 if (req_irq) {
a2fbb9ea
ET
7049 if (use_inta) {
7050 bp->num_queues = 1;
7051 } else {
c14423fe 7052 if ((use_multi > 1) && (use_multi <= 16))
a2fbb9ea
ET
7053 /* user requested number */
7054 bp->num_queues = use_multi;
7055 else if (use_multi == 1)
7056 bp->num_queues = num_online_cpus();
7057 else
7058 bp->num_queues = 1;
7059
7060 if (bnx2x_enable_msix(bp)) {
c14423fe 7061 /* failed to enable msix */
a2fbb9ea
ET
7062 bp->num_queues = 1;
7063 if (use_multi)
c14423fe 7064 BNX2X_ERR("Multi requested but failed"
a2fbb9ea
ET
7065 " to enable MSI-X\n");
7066 }
7067 }
7068 }
7069
c14423fe
ET
7070 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7071
a2fbb9ea
ET
7072 if (bnx2x_alloc_mem(bp))
7073 return -ENOMEM;
7074
7075 if (req_irq) {
7076 if (bp->flags & USING_MSIX_FLAG) {
7077 if (bnx2x_req_msix_irqs(bp)) {
7078 pci_disable_msix(bp->pdev);
7079 goto out_error;
7080 }
7081
7082 } else {
7083 if (bnx2x_req_irq(bp)) {
7084 BNX2X_ERR("IRQ request failed, aborting\n");
7085 goto out_error;
7086 }
7087 }
7088 }
7089
7090 for_each_queue(bp, i)
7091 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7092 bnx2x_poll, 128);
7093
7094
7095 /* Initialize HW */
7096 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
7097 BNX2X_ERR("HW init failed, aborting\n");
7098 goto out_error;
7099 }
7100
7101
7102 atomic_set(&bp->intr_sem, 0);
7103
a2fbb9ea
ET
7104
7105 /* Setup NIC internals and enable interrupts */
7106 bnx2x_nic_init(bp);
7107
7108 /* Send LOAD_DONE command to MCP */
7109 if (!nomcp) {
7110 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7111 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
7112 if (!rc) {
7113 BNX2X_ERR("MCP response failure, unloading\n");
7114 goto int_disable;
7115 }
7116 }
7117
7118 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7119
7120 /* Enable Rx interrupt handling before sending the ramrod
7121 as it's completed on Rx FP queue */
7122 for_each_queue(bp, i)
7123 napi_enable(&bnx2x_fp(bp, i, napi));
7124
7125 if (bnx2x_setup_leading(bp))
7126 goto stop_netif;
7127
7128 for_each_nondefault_queue(bp, i)
7129 if (bnx2x_setup_multi(bp, i))
7130 goto stop_netif;
7131
7132 bnx2x_set_mac_addr(bp);
7133
7134 bnx2x_phy_init(bp);
7135
7136 /* Start fast path */
7137 if (req_irq) { /* IRQ is only requested from bnx2x_open */
7138 netif_start_queue(bp->dev);
7139 if (bp->flags & USING_MSIX_FLAG)
7140 printk(KERN_INFO PFX "%s: using MSI-X\n",
7141 bp->dev->name);
7142
7143 /* Otherwise Tx queue should be only reenabled */
7144 } else if (netif_running(bp->dev)) {
7145 netif_wake_queue(bp->dev);
7146 bnx2x_set_rx_mode(bp->dev);
7147 }
7148
7149 /* start the timer */
7150 mod_timer(&bp->timer, jiffies + bp->current_interval);
7151
7152 return 0;
7153
7154stop_netif:
7155 for_each_queue(bp, i)
7156 napi_disable(&bnx2x_fp(bp, i, napi));
7157
7158int_disable:
7159 bnx2x_disable_int_sync(bp);
7160
7161 bnx2x_free_skbs(bp);
7162 bnx2x_free_irq(bp);
7163
7164out_error:
7165 bnx2x_free_mem(bp);
7166
7167 /* TBD we really need to reset the chip
7168 if we want to recover from this */
7169 return rc;
7170}
7171
7172static void bnx2x_netif_stop(struct bnx2x *bp)
7173{
7174 int i;
7175
7176 bp->rx_mode = BNX2X_RX_MODE_NONE;
7177 bnx2x_set_storm_rx_mode(bp);
7178
7179 bnx2x_disable_int_sync(bp);
7180 bnx2x_link_reset(bp);
7181
7182 for_each_queue(bp, i)
7183 napi_disable(&bnx2x_fp(bp, i, napi));
7184
7185 if (netif_running(bp->dev)) {
7186 netif_tx_disable(bp->dev);
7187 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7188 }
7189}
7190
7191static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7192{
7193 int port = bp->port;
7194#ifdef USE_DMAE
7195 u32 wb_write[2];
7196#endif
7197 int base, i;
7198
7199 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
7200
7201 /* Do not rcv packets to BRB */
7202 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7203 /* Do not direct rcv packets that are not for MCP to the BRB */
7204 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7205 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7206
7207 /* Configure IGU and AEU */
7208 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
7209 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7210
7211 /* TODO: Close Doorbell port? */
7212
7213 /* Clear ILT */
7214#ifdef USE_DMAE
7215 wb_write[0] = 0;
7216 wb_write[1] = 0;
7217#endif
7218 base = port * RQ_ONCHIP_AT_PORT_SIZE;
7219 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
7220#ifdef USE_DMAE
7221 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
7222#else
7223 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
7224 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
7225#endif
7226 }
7227
7228 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7229 /* reset_common */
7230 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7231 0xd3ffff7f);
7232 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7233 0x1403);
7234 }
7235}
7236
7237static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7238{
7239
7240 int rc;
7241
c14423fe 7242 /* halt the connection */
a2fbb9ea
ET
7243 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
7244 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
7245
7246
7247 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7248 &(bp->fp[index].state), 1);
c14423fe 7249 if (rc) /* timeout */
a2fbb9ea
ET
7250 return rc;
7251
7252 /* delete cfc entry */
7253 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7254
49d66772 7255 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
a2fbb9ea
ET
7256 &(bp->fp[index].state), 1);
7257
7258}
7259
7260
7261static void bnx2x_stop_leading(struct bnx2x *bp)
7262{
49d66772 7263 u16 dsb_sp_prod_idx;
c14423fe 7264 /* if the other port is handling traffic,
a2fbb9ea
ET
7265 this can take a lot of time */
7266 int timeout = 500;
7267
7268 might_sleep();
7269
7270 /* Send HALT ramrod */
7271 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7272 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
7273
7274 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7275 &(bp->fp[0].state), 1))
7276 return;
7277
49d66772 7278 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea
ET
7279
7280 /* Send CFC_DELETE ramrod */
7281 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7282
49d66772 7283 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7284 we are going to reset the chip anyway
7285 so there is not much to do if this times out
7286 */
49d66772
ET
7287 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
7288 timeout--;
7289 msleep(1);
a2fbb9ea 7290 }
49d66772
ET
7291 if (!timeout) {
7292 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
7293 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7294 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7295 }
7296 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7297 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
7298}
7299
49d66772 7300
a2fbb9ea
ET
7301static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
7302{
7303 u32 reset_code = 0;
7304 int rc;
7305 int i;
7306
7307 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7308
7309 /* Calling flush_scheduled_work() may deadlock because
7310 * linkwatch_event() may be on the workqueue and it will try to get
7311 * the rtnl_lock which we are holding.
7312 */
7313
7314 while (bp->in_reset_task)
7315 msleep(1);
7316
7317 /* Delete the timer: do it before disabling interrupts, as it
c14423fe 7318 may be still STAT_QUERY ramrod pending after stopping the timer */
a2fbb9ea
ET
7319 del_timer_sync(&bp->timer);
7320
7321 /* Wait until stat ramrod returns and all SP tasks complete */
7322 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
7323 msleep(1);
7324
7325 /* Stop fast path, disable MAC, disable interrupts, disable napi */
7326 bnx2x_netif_stop(bp);
7327
7328 if (bp->flags & NO_WOL_FLAG)
7329 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7330 else if (bp->wol) {
7331 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
7332 u8 *mac_addr = bp->dev->dev_addr;
7333 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
7334 EMAC_MODE_ACPI_RCVD);
7335
7336 EMAC_WR(EMAC_REG_EMAC_MODE, val);
7337
7338 val = (mac_addr[0] << 8) | mac_addr[1];
7339 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
7340
7341 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7342 (mac_addr[4] << 8) | mac_addr[5];
7343 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
7344
7345 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7346 } else
7347 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7348
7349 for_each_nondefault_queue(bp, i)
7350 if (bnx2x_stop_multi(bp, i))
7351 goto error;
7352
7353
7354 bnx2x_stop_leading(bp);
7355
7356error:
7357 if (!nomcp)
7358 rc = bnx2x_fw_command(bp, reset_code);
7359 else
7360 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7361
7362 /* Release IRQs */
7363 if (fre_irq)
7364 bnx2x_free_irq(bp);
7365
7366 /* Reset the chip */
7367 bnx2x_reset_chip(bp, rc);
7368
7369 /* Report UNLOAD_DONE to MCP */
7370 if (!nomcp)
7371 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7372
7373 /* Free SKBs and driver internals */
7374 bnx2x_free_skbs(bp);
7375 bnx2x_free_mem(bp);
7376
7377 bp->state = BNX2X_STATE_CLOSED;
7378 /* Set link down */
7379 bp->link_up = 0;
7380 netif_carrier_off(bp->dev);
7381
7382 return 0;
7383}
7384
7385/* end of nic load/unload */
7386
7387/* ethtool_ops */
7388
7389/*
7390 * Init service functions
7391 */
7392
7393static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
7394{
7395 int port = bp->port;
7396 u32 ext_phy_type;
7397
7398 bp->phy_flags = 0;
7399
7400 switch (switch_cfg) {
7401 case SWITCH_CFG_1G:
7402 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7403
7404 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
7405 switch (ext_phy_type) {
7406 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7407 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7408 ext_phy_type);
7409
7410 bp->supported |= (SUPPORTED_10baseT_Half |
7411 SUPPORTED_10baseT_Full |
7412 SUPPORTED_100baseT_Half |
7413 SUPPORTED_100baseT_Full |
7414 SUPPORTED_1000baseT_Full |
f1410647 7415 SUPPORTED_2500baseX_Full |
a2fbb9ea
ET
7416 SUPPORTED_TP | SUPPORTED_FIBRE |
7417 SUPPORTED_Autoneg |
7418 SUPPORTED_Pause |
7419 SUPPORTED_Asym_Pause);
7420 break;
7421
7422 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7423 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7424 ext_phy_type);
7425
7426 bp->phy_flags |= PHY_SGMII_FLAG;
7427
f1410647
ET
7428 bp->supported |= (SUPPORTED_10baseT_Half |
7429 SUPPORTED_10baseT_Full |
7430 SUPPORTED_100baseT_Half |
7431 SUPPORTED_100baseT_Full |
a2fbb9ea
ET
7432 SUPPORTED_1000baseT_Full |
7433 SUPPORTED_TP | SUPPORTED_FIBRE |
7434 SUPPORTED_Autoneg |
7435 SUPPORTED_Pause |
7436 SUPPORTED_Asym_Pause);
7437 break;
7438
7439 default:
7440 BNX2X_ERR("NVRAM config error. "
7441 "BAD SerDes ext_phy_config 0x%x\n",
7442 bp->ext_phy_config);
7443 return;
7444 }
7445
7446 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7447 port*0x10);
7448 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7449 break;
7450
7451 case SWITCH_CFG_10G:
7452 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7453
7454 bp->phy_flags |= PHY_XGXS_FLAG;
7455
7456 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7457 switch (ext_phy_type) {
7458 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7459 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7460 ext_phy_type);
7461
7462 bp->supported |= (SUPPORTED_10baseT_Half |
7463 SUPPORTED_10baseT_Full |
7464 SUPPORTED_100baseT_Half |
7465 SUPPORTED_100baseT_Full |
7466 SUPPORTED_1000baseT_Full |
f1410647 7467 SUPPORTED_2500baseX_Full |
a2fbb9ea
ET
7468 SUPPORTED_10000baseT_Full |
7469 SUPPORTED_TP | SUPPORTED_FIBRE |
7470 SUPPORTED_Autoneg |
7471 SUPPORTED_Pause |
7472 SUPPORTED_Asym_Pause);
7473 break;
7474
7475 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647
ET
7476 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7477 ext_phy_type);
7478
7479 bp->supported |= (SUPPORTED_10000baseT_Full |
7480 SUPPORTED_FIBRE |
7481 SUPPORTED_Pause |
7482 SUPPORTED_Asym_Pause);
7483 break;
7484
a2fbb9ea 7485 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7486 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7487 ext_phy_type);
7488
7489 bp->supported |= (SUPPORTED_10000baseT_Full |
7490 SUPPORTED_1000baseT_Full |
7491 SUPPORTED_Autoneg |
7492 SUPPORTED_FIBRE |
7493 SUPPORTED_Pause |
7494 SUPPORTED_Asym_Pause);
7495 break;
7496
7497 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7498 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7499 ext_phy_type);
7500
7501 bp->supported |= (SUPPORTED_10000baseT_Full |
f1410647 7502 SUPPORTED_1000baseT_Full |
a2fbb9ea 7503 SUPPORTED_FIBRE |
f1410647
ET
7504 SUPPORTED_Autoneg |
7505 SUPPORTED_Pause |
7506 SUPPORTED_Asym_Pause);
7507 break;
7508
7509 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7510 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7511 ext_phy_type);
7512
7513 bp->supported |= (SUPPORTED_10000baseT_Full |
7514 SUPPORTED_TP |
7515 SUPPORTED_Autoneg |
a2fbb9ea
ET
7516 SUPPORTED_Pause |
7517 SUPPORTED_Asym_Pause);
7518 break;
7519
7520 default:
7521 BNX2X_ERR("NVRAM config error. "
7522 "BAD XGXS ext_phy_config 0x%x\n",
7523 bp->ext_phy_config);
7524 return;
7525 }
7526
7527 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7528 port*0x18);
7529 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7530
7531 bp->ser_lane = ((bp->lane_config &
7532 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
7533 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
7534 bp->rx_lane_swap = ((bp->lane_config &
7535 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
7536 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
7537 bp->tx_lane_swap = ((bp->lane_config &
7538 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
7539 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
7540 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
7541 bp->rx_lane_swap, bp->tx_lane_swap);
7542 break;
7543
7544 default:
7545 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7546 bp->link_config);
7547 return;
7548 }
7549
7550 /* mask what we support according to speed_cap_mask */
7551 if (!(bp->speed_cap_mask &
7552 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7553 bp->supported &= ~SUPPORTED_10baseT_Half;
7554
7555 if (!(bp->speed_cap_mask &
7556 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7557 bp->supported &= ~SUPPORTED_10baseT_Full;
7558
7559 if (!(bp->speed_cap_mask &
7560 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7561 bp->supported &= ~SUPPORTED_100baseT_Half;
7562
7563 if (!(bp->speed_cap_mask &
7564 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7565 bp->supported &= ~SUPPORTED_100baseT_Full;
7566
7567 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7568 bp->supported &= ~(SUPPORTED_1000baseT_Half |
7569 SUPPORTED_1000baseT_Full);
7570
7571 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
f1410647 7572 bp->supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea
ET
7573
7574 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7575 bp->supported &= ~SUPPORTED_10000baseT_Full;
7576
7577 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
7578}
7579
7580static void bnx2x_link_settings_requested(struct bnx2x *bp)
7581{
7582 bp->req_autoneg = 0;
7583 bp->req_duplex = DUPLEX_FULL;
7584
7585 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7586 case PORT_FEATURE_LINK_SPEED_AUTO:
7587 if (bp->supported & SUPPORTED_Autoneg) {
7588 bp->req_autoneg |= AUTONEG_SPEED;
7589 bp->req_line_speed = 0;
7590 bp->advertising = bp->supported;
7591 } else {
f1410647
ET
7592 if (XGXS_EXT_PHY_TYPE(bp) ==
7593 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
a2fbb9ea
ET
7594 /* force 10G, no AN */
7595 bp->req_line_speed = SPEED_10000;
7596 bp->advertising =
7597 (ADVERTISED_10000baseT_Full |
7598 ADVERTISED_FIBRE);
7599 break;
7600 }
7601 BNX2X_ERR("NVRAM config error. "
7602 "Invalid link_config 0x%x"
7603 " Autoneg not supported\n",
7604 bp->link_config);
7605 return;
7606 }
7607 break;
7608
7609 case PORT_FEATURE_LINK_SPEED_10M_FULL:
f1410647 7610 if (bp->supported & SUPPORTED_10baseT_Full) {
a2fbb9ea
ET
7611 bp->req_line_speed = SPEED_10;
7612 bp->advertising = (ADVERTISED_10baseT_Full |
7613 ADVERTISED_TP);
7614 } else {
7615 BNX2X_ERR("NVRAM config error. "
7616 "Invalid link_config 0x%x"
7617 " speed_cap_mask 0x%x\n",
7618 bp->link_config, bp->speed_cap_mask);
7619 return;
7620 }
7621 break;
7622
7623 case PORT_FEATURE_LINK_SPEED_10M_HALF:
f1410647 7624 if (bp->supported & SUPPORTED_10baseT_Half) {
a2fbb9ea
ET
7625 bp->req_line_speed = SPEED_10;
7626 bp->req_duplex = DUPLEX_HALF;
7627 bp->advertising = (ADVERTISED_10baseT_Half |
7628 ADVERTISED_TP);
7629 } else {
7630 BNX2X_ERR("NVRAM config error. "
7631 "Invalid link_config 0x%x"
7632 " speed_cap_mask 0x%x\n",
7633 bp->link_config, bp->speed_cap_mask);
7634 return;
7635 }
7636 break;
7637
7638 case PORT_FEATURE_LINK_SPEED_100M_FULL:
f1410647 7639 if (bp->supported & SUPPORTED_100baseT_Full) {
a2fbb9ea
ET
7640 bp->req_line_speed = SPEED_100;
7641 bp->advertising = (ADVERTISED_100baseT_Full |
7642 ADVERTISED_TP);
7643 } else {
7644 BNX2X_ERR("NVRAM config error. "
7645 "Invalid link_config 0x%x"
7646 " speed_cap_mask 0x%x\n",
7647 bp->link_config, bp->speed_cap_mask);
7648 return;
7649 }
7650 break;
7651
7652 case PORT_FEATURE_LINK_SPEED_100M_HALF:
f1410647 7653 if (bp->supported & SUPPORTED_100baseT_Half) {
a2fbb9ea
ET
7654 bp->req_line_speed = SPEED_100;
7655 bp->req_duplex = DUPLEX_HALF;
7656 bp->advertising = (ADVERTISED_100baseT_Half |
7657 ADVERTISED_TP);
7658 } else {
7659 BNX2X_ERR("NVRAM config error. "
7660 "Invalid link_config 0x%x"
7661 " speed_cap_mask 0x%x\n",
7662 bp->link_config, bp->speed_cap_mask);
7663 return;
7664 }
7665 break;
7666
7667 case PORT_FEATURE_LINK_SPEED_1G:
f1410647 7668 if (bp->supported & SUPPORTED_1000baseT_Full) {
a2fbb9ea
ET
7669 bp->req_line_speed = SPEED_1000;
7670 bp->advertising = (ADVERTISED_1000baseT_Full |
7671 ADVERTISED_TP);
7672 } else {
7673 BNX2X_ERR("NVRAM config error. "
7674 "Invalid link_config 0x%x"
7675 " speed_cap_mask 0x%x\n",
7676 bp->link_config, bp->speed_cap_mask);
7677 return;
7678 }
7679 break;
7680
7681 case PORT_FEATURE_LINK_SPEED_2_5G:
f1410647 7682 if (bp->supported & SUPPORTED_2500baseX_Full) {
a2fbb9ea 7683 bp->req_line_speed = SPEED_2500;
f1410647 7684 bp->advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7685 ADVERTISED_TP);
7686 } else {
7687 BNX2X_ERR("NVRAM config error. "
7688 "Invalid link_config 0x%x"
7689 " speed_cap_mask 0x%x\n",
7690 bp->link_config, bp->speed_cap_mask);
7691 return;
7692 }
7693 break;
7694
7695 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7696 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7697 case PORT_FEATURE_LINK_SPEED_10G_KR:
f1410647 7698 if (bp->supported & SUPPORTED_10000baseT_Full) {
a2fbb9ea
ET
7699 bp->req_line_speed = SPEED_10000;
7700 bp->advertising = (ADVERTISED_10000baseT_Full |
7701 ADVERTISED_FIBRE);
7702 } else {
7703 BNX2X_ERR("NVRAM config error. "
7704 "Invalid link_config 0x%x"
7705 " speed_cap_mask 0x%x\n",
7706 bp->link_config, bp->speed_cap_mask);
7707 return;
7708 }
7709 break;
7710
7711 default:
7712 BNX2X_ERR("NVRAM config error. "
7713 "BAD link speed link_config 0x%x\n",
7714 bp->link_config);
7715 bp->req_autoneg |= AUTONEG_SPEED;
7716 bp->req_line_speed = 0;
7717 bp->advertising = bp->supported;
7718 break;
7719 }
7720 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
7721 bp->req_line_speed, bp->req_duplex);
7722
7723 bp->req_flow_ctrl = (bp->link_config &
7724 PORT_FEATURE_FLOW_CONTROL_MASK);
f1410647
ET
7725 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
7726 (bp->supported & SUPPORTED_Autoneg))
a2fbb9ea 7727 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
a2fbb9ea 7728
f1410647
ET
7729 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
7730 " advertising 0x%x\n",
7731 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
a2fbb9ea
ET
7732}
7733
7734static void bnx2x_get_hwinfo(struct bnx2x *bp)
7735{
7736 u32 val, val2, val3, val4, id;
7737 int port = bp->port;
7738 u32 switch_cfg;
7739
7740 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7741 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
7742
7743 /* Get the chip revision id and number. */
7744 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7745 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7746 id = ((val & 0xffff) << 16);
7747 val = REG_RD(bp, MISC_REG_CHIP_REV);
7748 id |= ((val & 0xf) << 12);
7749 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7750 id |= ((val & 0xff) << 4);
7751 REG_RD(bp, MISC_REG_BOND_ID);
7752 id |= (val & 0xf);
7753 bp->chip_id = id;
7754 BNX2X_DEV_INFO("chip ID is %x\n", id);
7755
7756 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
7757 BNX2X_DEV_INFO("MCP not active\n");
7758 nomcp = 1;
7759 goto set_mac;
7760 }
7761
7762 val = SHMEM_RD(bp, validity_map[port]);
7763 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f1410647
ET
7764 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7765 BNX2X_ERR("BAD MCP validity signature\n");
a2fbb9ea 7766
f1410647 7767 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
a2fbb9ea
ET
7768 DRV_MSG_SEQ_NUMBER_MASK);
7769
7770 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
f1410647 7771 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
a2fbb9ea 7772 bp->serdes_config =
f1410647 7773 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
a2fbb9ea
ET
7774 bp->lane_config =
7775 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7776 bp->ext_phy_config =
7777 SHMEM_RD(bp,
7778 dev_info.port_hw_config[port].external_phy_config);
7779 bp->speed_cap_mask =
7780 SHMEM_RD(bp,
7781 dev_info.port_hw_config[port].speed_capability_mask);
7782
7783 bp->link_config =
7784 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7785
f1410647 7786 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
a2fbb9ea
ET
7787 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
7788 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
7789 " fw_seq (%08x)\n",
f1410647
ET
7790 bp->hw_config, bp->board, bp->serdes_config,
7791 bp->lane_config, bp->ext_phy_config,
7792 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
a2fbb9ea
ET
7793
7794 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
7795 bnx2x_link_settings_supported(bp, switch_cfg);
7796
7797 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
7798 /* for now disable cl73 */
7799 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
7800 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
7801
7802 bnx2x_link_settings_requested(bp);
7803
7804 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7805 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7806 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7807 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7808 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7809 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7810 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7811 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7812
7813 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7814
7815
7816 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7817 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7818 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7819 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7820
7821 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7822 val, val2, val3, val4);
7823
7824 /* bc ver */
7825 if (!nomcp) {
7826 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
7827 BNX2X_DEV_INFO("bc_ver %X\n", val);
7828 if (val < BNX2X_BC_VER) {
7829 /* for now only warn
7830 * later we might need to enforce this */
7831 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7832 " please upgrade BC\n", BNX2X_BC_VER, val);
7833 }
7834 } else {
7835 bp->bc_ver = 0;
7836 }
7837
7838 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7839 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7840 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7841 bp->flash_size, bp->flash_size);
7842
7843 return;
7844
7845set_mac: /* only supposed to happen on emulation/FPGA */
f1410647
ET
7846 BNX2X_ERR("warning rendom MAC workaround active\n");
7847 random_ether_addr(bp->dev->dev_addr);
a2fbb9ea
ET
7848 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7849
7850}
7851
7852/*
7853 * ethtool service functions
7854 */
7855
7856/* All ethtool functions called with rtnl_lock */
7857
7858static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7859{
7860 struct bnx2x *bp = netdev_priv(dev);
7861
7862 cmd->supported = bp->supported;
7863 cmd->advertising = bp->advertising;
7864
7865 if (netif_carrier_ok(dev)) {
7866 cmd->speed = bp->line_speed;
7867 cmd->duplex = bp->duplex;
7868 } else {
7869 cmd->speed = bp->req_line_speed;
7870 cmd->duplex = bp->req_duplex;
7871 }
7872
7873 if (bp->phy_flags & PHY_XGXS_FLAG) {
f1410647
ET
7874 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7875
7876 switch (ext_phy_type) {
7877 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7879 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7880 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7881 cmd->port = PORT_FIBRE;
7882 break;
7883
7884 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7885 cmd->port = PORT_TP;
7886 break;
7887
7888 default:
7889 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7890 bp->ext_phy_config);
7891 }
7892 } else
a2fbb9ea 7893 cmd->port = PORT_TP;
a2fbb9ea
ET
7894
7895 cmd->phy_address = bp->phy_addr;
7896 cmd->transceiver = XCVR_INTERNAL;
7897
f1410647 7898 if (bp->req_autoneg & AUTONEG_SPEED)
a2fbb9ea 7899 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7900 else
a2fbb9ea 7901 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7902
7903 cmd->maxtxpkt = 0;
7904 cmd->maxrxpkt = 0;
7905
7906 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7907 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7908 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7909 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7910 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7911 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7912 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7913
7914 return 0;
7915}
7916
7917static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7918{
7919 struct bnx2x *bp = netdev_priv(dev);
7920 u32 advertising;
7921
7922 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7923 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7924 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7925 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7926 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7927 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7928 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7929
7930 switch (cmd->port) {
7931 case PORT_TP:
f1410647
ET
7932 if (!(bp->supported & SUPPORTED_TP)) {
7933 DP(NETIF_MSG_LINK, "TP not supported\n");
a2fbb9ea 7934 return -EINVAL;
f1410647 7935 }
a2fbb9ea
ET
7936
7937 if (bp->phy_flags & PHY_XGXS_FLAG) {
7938 bnx2x_link_reset(bp);
7939 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
7940 bnx2x_phy_deassert(bp);
7941 }
7942 break;
7943
7944 case PORT_FIBRE:
f1410647
ET
7945 if (!(bp->supported & SUPPORTED_FIBRE)) {
7946 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
a2fbb9ea 7947 return -EINVAL;
f1410647 7948 }
a2fbb9ea
ET
7949
7950 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7951 bnx2x_link_reset(bp);
7952 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
7953 bnx2x_phy_deassert(bp);
7954 }
7955 break;
7956
7957 default:
f1410647 7958 DP(NETIF_MSG_LINK, "Unknown port type\n");
a2fbb9ea
ET
7959 return -EINVAL;
7960 }
7961
7962 if (cmd->autoneg == AUTONEG_ENABLE) {
f1410647
ET
7963 if (!(bp->supported & SUPPORTED_Autoneg)) {
7964 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
a2fbb9ea 7965 return -EINVAL;
f1410647 7966 }
a2fbb9ea
ET
7967
7968 /* advertise the requested speed and duplex if supported */
7969 cmd->advertising &= bp->supported;
7970
7971 bp->req_autoneg |= AUTONEG_SPEED;
7972 bp->req_line_speed = 0;
7973 bp->req_duplex = DUPLEX_FULL;
7974 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
7975
7976 } else { /* forced speed */
7977 /* advertise the requested speed and duplex if supported */
7978 switch (cmd->speed) {
7979 case SPEED_10:
7980 if (cmd->duplex == DUPLEX_FULL) {
f1410647
ET
7981 if (!(bp->supported &
7982 SUPPORTED_10baseT_Full)) {
7983 DP(NETIF_MSG_LINK,
7984 "10M full not supported\n");
a2fbb9ea 7985 return -EINVAL;
f1410647 7986 }
a2fbb9ea
ET
7987
7988 advertising = (ADVERTISED_10baseT_Full |
7989 ADVERTISED_TP);
7990 } else {
f1410647
ET
7991 if (!(bp->supported &
7992 SUPPORTED_10baseT_Half)) {
7993 DP(NETIF_MSG_LINK,
7994 "10M half not supported\n");
a2fbb9ea 7995 return -EINVAL;
f1410647 7996 }
a2fbb9ea
ET
7997
7998 advertising = (ADVERTISED_10baseT_Half |
7999 ADVERTISED_TP);
8000 }
8001 break;
8002
8003 case SPEED_100:
8004 if (cmd->duplex == DUPLEX_FULL) {
8005 if (!(bp->supported &
f1410647
ET
8006 SUPPORTED_100baseT_Full)) {
8007 DP(NETIF_MSG_LINK,
8008 "100M full not supported\n");
a2fbb9ea 8009 return -EINVAL;
f1410647 8010 }
a2fbb9ea
ET
8011
8012 advertising = (ADVERTISED_100baseT_Full |
8013 ADVERTISED_TP);
8014 } else {
8015 if (!(bp->supported &
f1410647
ET
8016 SUPPORTED_100baseT_Half)) {
8017 DP(NETIF_MSG_LINK,
8018 "100M half not supported\n");
a2fbb9ea 8019 return -EINVAL;
f1410647 8020 }
a2fbb9ea
ET
8021
8022 advertising = (ADVERTISED_100baseT_Half |
8023 ADVERTISED_TP);
8024 }
8025 break;
8026
8027 case SPEED_1000:
f1410647
ET
8028 if (cmd->duplex != DUPLEX_FULL) {
8029 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8030 return -EINVAL;
f1410647 8031 }
a2fbb9ea 8032
f1410647
ET
8033 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8034 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8035 return -EINVAL;
f1410647 8036 }
a2fbb9ea
ET
8037
8038 advertising = (ADVERTISED_1000baseT_Full |
8039 ADVERTISED_TP);
8040 break;
8041
8042 case SPEED_2500:
f1410647
ET
8043 if (cmd->duplex != DUPLEX_FULL) {
8044 DP(NETIF_MSG_LINK,
8045 "2.5G half not supported\n");
a2fbb9ea 8046 return -EINVAL;
f1410647 8047 }
a2fbb9ea 8048
f1410647
ET
8049 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8050 DP(NETIF_MSG_LINK,
8051 "2.5G full not supported\n");
a2fbb9ea 8052 return -EINVAL;
f1410647 8053 }
a2fbb9ea 8054
f1410647 8055 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8056 ADVERTISED_TP);
8057 break;
8058
8059 case SPEED_10000:
f1410647
ET
8060 if (cmd->duplex != DUPLEX_FULL) {
8061 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8062 return -EINVAL;
f1410647 8063 }
a2fbb9ea 8064
f1410647
ET
8065 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8066 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8067 return -EINVAL;
f1410647 8068 }
a2fbb9ea
ET
8069
8070 advertising = (ADVERTISED_10000baseT_Full |
8071 ADVERTISED_FIBRE);
8072 break;
8073
8074 default:
f1410647 8075 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8076 return -EINVAL;
8077 }
8078
8079 bp->req_autoneg &= ~AUTONEG_SPEED;
8080 bp->req_line_speed = cmd->speed;
8081 bp->req_duplex = cmd->duplex;
8082 bp->advertising = advertising;
8083 }
8084
8085 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
8086 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8087 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
8088 bp->advertising);
8089
8090 bnx2x_stop_stats(bp);
8091 bnx2x_link_initialize(bp);
8092
8093 return 0;
8094}
8095
8096static void bnx2x_get_drvinfo(struct net_device *dev,
8097 struct ethtool_drvinfo *info)
8098{
8099 struct bnx2x *bp = netdev_priv(dev);
8100
8101 strcpy(info->driver, DRV_MODULE_NAME);
8102 strcpy(info->version, DRV_MODULE_VERSION);
8103 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
8104 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
8105 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
8106 bp->bc_ver);
8107 strcpy(info->bus_info, pci_name(bp->pdev));
8108 info->n_stats = BNX2X_NUM_STATS;
8109 info->testinfo_len = BNX2X_NUM_TESTS;
8110 info->eedump_len = bp->flash_size;
8111 info->regdump_len = 0;
8112}
8113
8114static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8115{
8116 struct bnx2x *bp = netdev_priv(dev);
8117
8118 if (bp->flags & NO_WOL_FLAG) {
8119 wol->supported = 0;
8120 wol->wolopts = 0;
8121 } else {
8122 wol->supported = WAKE_MAGIC;
8123 if (bp->wol)
8124 wol->wolopts = WAKE_MAGIC;
8125 else
8126 wol->wolopts = 0;
8127 }
8128 memset(&wol->sopass, 0, sizeof(wol->sopass));
8129}
8130
8131static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8132{
8133 struct bnx2x *bp = netdev_priv(dev);
8134
8135 if (wol->wolopts & ~WAKE_MAGIC)
8136 return -EINVAL;
8137
8138 if (wol->wolopts & WAKE_MAGIC) {
8139 if (bp->flags & NO_WOL_FLAG)
8140 return -EINVAL;
8141
8142 bp->wol = 1;
8143 } else {
8144 bp->wol = 0;
8145 }
8146 return 0;
8147}
8148
8149static u32 bnx2x_get_msglevel(struct net_device *dev)
8150{
8151 struct bnx2x *bp = netdev_priv(dev);
8152
8153 return bp->msglevel;
8154}
8155
8156static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8157{
8158 struct bnx2x *bp = netdev_priv(dev);
8159
8160 if (capable(CAP_NET_ADMIN))
8161 bp->msglevel = level;
8162}
8163
8164static int bnx2x_nway_reset(struct net_device *dev)
8165{
8166 struct bnx2x *bp = netdev_priv(dev);
8167
8168 if (bp->state != BNX2X_STATE_OPEN) {
8169 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8170 return -EAGAIN;
8171 }
8172
8173 bnx2x_stop_stats(bp);
8174 bnx2x_link_initialize(bp);
8175
8176 return 0;
8177}
8178
8179static int bnx2x_get_eeprom_len(struct net_device *dev)
8180{
8181 struct bnx2x *bp = netdev_priv(dev);
8182
8183 return bp->flash_size;
8184}
8185
8186static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8187{
8188 int port = bp->port;
8189 int count, i;
8190 u32 val = 0;
8191
8192 /* adjust timeout for emulation/FPGA */
8193 count = NVRAM_TIMEOUT_COUNT;
8194 if (CHIP_REV_IS_SLOW(bp))
8195 count *= 100;
8196
8197 /* request access to nvram interface */
8198 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8199 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8200
8201 for (i = 0; i < count*10; i++) {
8202 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8203 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8204 break;
8205
8206 udelay(5);
8207 }
8208
8209 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8210 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
8211 return -EBUSY;
8212 }
8213
8214 return 0;
8215}
8216
8217static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8218{
8219 int port = bp->port;
8220 int count, i;
8221 u32 val = 0;
8222
8223 /* adjust timeout for emulation/FPGA */
8224 count = NVRAM_TIMEOUT_COUNT;
8225 if (CHIP_REV_IS_SLOW(bp))
8226 count *= 100;
8227
8228 /* relinquish nvram interface */
8229 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8230 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8231
8232 for (i = 0; i < count*10; i++) {
8233 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8234 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8235 break;
8236
8237 udelay(5);
8238 }
8239
8240 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8241 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
8242 return -EBUSY;
8243 }
8244
8245 return 0;
8246}
8247
8248static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8249{
8250 u32 val;
8251
8252 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8253
8254 /* enable both bits, even on read */
8255 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8256 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8257 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8258}
8259
8260static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8261{
8262 u32 val;
8263
8264 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8265
8266 /* disable both bits, even after read */
8267 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8268 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8269 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8270}
8271
8272static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8273 u32 cmd_flags)
8274{
f1410647 8275 int count, i, rc;
a2fbb9ea
ET
8276 u32 val;
8277
8278 /* build the command word */
8279 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8280
8281 /* need to clear DONE bit separately */
8282 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8283
8284 /* address of the NVRAM to read from */
8285 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8286 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8287
8288 /* issue a read command */
8289 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8290
8291 /* adjust timeout for emulation/FPGA */
8292 count = NVRAM_TIMEOUT_COUNT;
8293 if (CHIP_REV_IS_SLOW(bp))
8294 count *= 100;
8295
8296 /* wait for completion */
8297 *ret_val = 0;
8298 rc = -EBUSY;
8299 for (i = 0; i < count; i++) {
8300 udelay(5);
8301 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8302
8303 if (val & MCPR_NVM_COMMAND_DONE) {
8304 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8305 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8306 /* we read nvram data in cpu order
8307 * but ethtool sees it as an array of bytes
8308 * converting to big-endian will do the work */
8309 val = cpu_to_be32(val);
8310 *ret_val = val;
8311 rc = 0;
8312 break;
8313 }
8314 }
8315
8316 return rc;
8317}
8318
8319static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8320 int buf_size)
8321{
8322 int rc;
8323 u32 cmd_flags;
8324 u32 val;
8325
8326 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8327 DP(NETIF_MSG_NVM,
c14423fe 8328 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8329 offset, buf_size);
8330 return -EINVAL;
8331 }
8332
8333 if (offset + buf_size > bp->flash_size) {
c14423fe 8334 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8335 " buf_size (0x%x) > flash_size (0x%x)\n",
8336 offset, buf_size, bp->flash_size);
8337 return -EINVAL;
8338 }
8339
8340 /* request access to nvram interface */
8341 rc = bnx2x_acquire_nvram_lock(bp);
8342 if (rc)
8343 return rc;
8344
8345 /* enable access to nvram interface */
8346 bnx2x_enable_nvram_access(bp);
8347
8348 /* read the first word(s) */
8349 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8350 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8351 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8352 memcpy(ret_buf, &val, 4);
8353
8354 /* advance to the next dword */
8355 offset += sizeof(u32);
8356 ret_buf += sizeof(u32);
8357 buf_size -= sizeof(u32);
8358 cmd_flags = 0;
8359 }
8360
8361 if (rc == 0) {
8362 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8363 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8364 memcpy(ret_buf, &val, 4);
8365 }
8366
8367 /* disable access to nvram interface */
8368 bnx2x_disable_nvram_access(bp);
8369 bnx2x_release_nvram_lock(bp);
8370
8371 return rc;
8372}
8373
8374static int bnx2x_get_eeprom(struct net_device *dev,
8375 struct ethtool_eeprom *eeprom, u8 *eebuf)
8376{
8377 struct bnx2x *bp = netdev_priv(dev);
8378 int rc;
8379
8380 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8381 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8382 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8383 eeprom->len, eeprom->len);
8384
8385 /* parameters already validated in ethtool_get_eeprom */
8386
8387 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8388
8389 return rc;
8390}
8391
8392static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8393 u32 cmd_flags)
8394{
f1410647 8395 int count, i, rc;
a2fbb9ea
ET
8396
8397 /* build the command word */
8398 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8399
8400 /* need to clear DONE bit separately */
8401 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8402
8403 /* write the data */
8404 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8405
8406 /* address of the NVRAM to write to */
8407 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8408 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8409
8410 /* issue the write command */
8411 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8412
8413 /* adjust timeout for emulation/FPGA */
8414 count = NVRAM_TIMEOUT_COUNT;
8415 if (CHIP_REV_IS_SLOW(bp))
8416 count *= 100;
8417
8418 /* wait for completion */
8419 rc = -EBUSY;
8420 for (i = 0; i < count; i++) {
8421 udelay(5);
8422 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8423 if (val & MCPR_NVM_COMMAND_DONE) {
8424 rc = 0;
8425 break;
8426 }
8427 }
8428
8429 return rc;
8430}
8431
f1410647 8432#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8433
8434static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8435 int buf_size)
8436{
8437 int rc;
8438 u32 cmd_flags;
8439 u32 align_offset;
8440 u32 val;
8441
8442 if (offset + buf_size > bp->flash_size) {
c14423fe 8443 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8444 " buf_size (0x%x) > flash_size (0x%x)\n",
8445 offset, buf_size, bp->flash_size);
8446 return -EINVAL;
8447 }
8448
8449 /* request access to nvram interface */
8450 rc = bnx2x_acquire_nvram_lock(bp);
8451 if (rc)
8452 return rc;
8453
8454 /* enable access to nvram interface */
8455 bnx2x_enable_nvram_access(bp);
8456
8457 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8458 align_offset = (offset & ~0x03);
8459 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8460
8461 if (rc == 0) {
8462 val &= ~(0xff << BYTE_OFFSET(offset));
8463 val |= (*data_buf << BYTE_OFFSET(offset));
8464
8465 /* nvram data is returned as an array of bytes
8466 * convert it back to cpu order */
8467 val = be32_to_cpu(val);
8468
8469 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8470
8471 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8472 cmd_flags);
8473 }
8474
8475 /* disable access to nvram interface */
8476 bnx2x_disable_nvram_access(bp);
8477 bnx2x_release_nvram_lock(bp);
8478
8479 return rc;
8480}
8481
8482static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8483 int buf_size)
8484{
8485 int rc;
8486 u32 cmd_flags;
8487 u32 val;
8488 u32 written_so_far;
8489
8490 if (buf_size == 1) { /* ethtool */
8491 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8492 }
8493
8494 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8495 DP(NETIF_MSG_NVM,
c14423fe 8496 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8497 offset, buf_size);
8498 return -EINVAL;
8499 }
8500
8501 if (offset + buf_size > bp->flash_size) {
c14423fe 8502 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
8503 " buf_size (0x%x) > flash_size (0x%x)\n",
8504 offset, buf_size, bp->flash_size);
8505 return -EINVAL;
8506 }
8507
8508 /* request access to nvram interface */
8509 rc = bnx2x_acquire_nvram_lock(bp);
8510 if (rc)
8511 return rc;
8512
8513 /* enable access to nvram interface */
8514 bnx2x_enable_nvram_access(bp);
8515
8516 written_so_far = 0;
8517 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8518 while ((written_so_far < buf_size) && (rc == 0)) {
8519 if (written_so_far == (buf_size - sizeof(u32)))
8520 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8521 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8522 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8523 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8524 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8525
8526 memcpy(&val, data_buf, 4);
8527 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8528
8529 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8530
8531 /* advance to the next dword */
8532 offset += sizeof(u32);
8533 data_buf += sizeof(u32);
8534 written_so_far += sizeof(u32);
8535 cmd_flags = 0;
8536 }
8537
8538 /* disable access to nvram interface */
8539 bnx2x_disable_nvram_access(bp);
8540 bnx2x_release_nvram_lock(bp);
8541
8542 return rc;
8543}
8544
8545static int bnx2x_set_eeprom(struct net_device *dev,
8546 struct ethtool_eeprom *eeprom, u8 *eebuf)
8547{
8548 struct bnx2x *bp = netdev_priv(dev);
8549 int rc;
8550
8551 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8552 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8553 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8554 eeprom->len, eeprom->len);
8555
8556 /* parameters already validated in ethtool_set_eeprom */
8557
8558 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8559
8560 return rc;
8561}
8562
8563static int bnx2x_get_coalesce(struct net_device *dev,
8564 struct ethtool_coalesce *coal)
8565{
8566 struct bnx2x *bp = netdev_priv(dev);
8567
8568 memset(coal, 0, sizeof(struct ethtool_coalesce));
8569
8570 coal->rx_coalesce_usecs = bp->rx_ticks;
8571 coal->tx_coalesce_usecs = bp->tx_ticks;
8572 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8573
8574 return 0;
8575}
8576
8577static int bnx2x_set_coalesce(struct net_device *dev,
8578 struct ethtool_coalesce *coal)
8579{
8580 struct bnx2x *bp = netdev_priv(dev);
8581
8582 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8583 if (bp->rx_ticks > 3000)
8584 bp->rx_ticks = 3000;
8585
8586 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8587 if (bp->tx_ticks > 0x3000)
8588 bp->tx_ticks = 0x3000;
8589
8590 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8591 if (bp->stats_ticks > 0xffff00)
8592 bp->stats_ticks = 0xffff00;
8593 bp->stats_ticks &= 0xffff00;
8594
8595 if (netif_running(bp->dev))
8596 bnx2x_update_coalesce(bp);
8597
8598 return 0;
8599}
8600
8601static void bnx2x_get_ringparam(struct net_device *dev,
8602 struct ethtool_ringparam *ering)
8603{
8604 struct bnx2x *bp = netdev_priv(dev);
8605
8606 ering->rx_max_pending = MAX_RX_AVAIL;
8607 ering->rx_mini_max_pending = 0;
8608 ering->rx_jumbo_max_pending = 0;
8609
8610 ering->rx_pending = bp->rx_ring_size;
8611 ering->rx_mini_pending = 0;
8612 ering->rx_jumbo_pending = 0;
8613
8614 ering->tx_max_pending = MAX_TX_AVAIL;
8615 ering->tx_pending = bp->tx_ring_size;
8616}
8617
8618static int bnx2x_set_ringparam(struct net_device *dev,
8619 struct ethtool_ringparam *ering)
8620{
8621 struct bnx2x *bp = netdev_priv(dev);
8622
8623 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8624 (ering->tx_pending > MAX_TX_AVAIL) ||
8625 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8626 return -EINVAL;
8627
8628 bp->rx_ring_size = ering->rx_pending;
8629 bp->tx_ring_size = ering->tx_pending;
8630
8631 if (netif_running(bp->dev)) {
8632 bnx2x_nic_unload(bp, 0);
8633 bnx2x_nic_load(bp, 0);
8634 }
8635
8636 return 0;
8637}
8638
8639static void bnx2x_get_pauseparam(struct net_device *dev,
8640 struct ethtool_pauseparam *epause)
8641{
8642 struct bnx2x *bp = netdev_priv(dev);
8643
8644 epause->autoneg =
8645 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
8646 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
8647 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
8648
8649 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8650 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8651 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8652}
8653
8654static int bnx2x_set_pauseparam(struct net_device *dev,
8655 struct ethtool_pauseparam *epause)
8656{
8657 struct bnx2x *bp = netdev_priv(dev);
8658
8659 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8660 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8661 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8662
a2fbb9ea 8663 if (epause->autoneg) {
f1410647
ET
8664 if (!(bp->supported & SUPPORTED_Autoneg)) {
8665 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
8666 return -EINVAL;
a2fbb9ea
ET
8667 }
8668
f1410647
ET
8669 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8670 } else
a2fbb9ea
ET
8671 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
8672
f1410647 8673 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8674
f1410647
ET
8675 if (epause->rx_pause)
8676 bp->req_flow_ctrl |= FLOW_CTRL_RX;
8677 if (epause->tx_pause)
8678 bp->req_flow_ctrl |= FLOW_CTRL_TX;
a2fbb9ea 8679
f1410647
ET
8680 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
8681 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
8682 bp->req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8683
f1410647
ET
8684 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
8685 bp->req_autoneg, bp->req_flow_ctrl);
a2fbb9ea
ET
8686
8687 bnx2x_stop_stats(bp);
8688 bnx2x_link_initialize(bp);
8689
8690 return 0;
8691}
8692
8693static u32 bnx2x_get_rx_csum(struct net_device *dev)
8694{
8695 struct bnx2x *bp = netdev_priv(dev);
8696
8697 return bp->rx_csum;
8698}
8699
8700static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8701{
8702 struct bnx2x *bp = netdev_priv(dev);
8703
8704 bp->rx_csum = data;
8705 return 0;
8706}
8707
8708static int bnx2x_set_tso(struct net_device *dev, u32 data)
8709{
8710 if (data)
8711 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8712 else
8713 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8714 return 0;
8715}
8716
8717static struct {
8718 char string[ETH_GSTRING_LEN];
8719} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8720 { "MC Errors (online)" }
8721};
8722
8723static int bnx2x_self_test_count(struct net_device *dev)
8724{
8725 return BNX2X_NUM_TESTS;
8726}
8727
8728static void bnx2x_self_test(struct net_device *dev,
8729 struct ethtool_test *etest, u64 *buf)
8730{
8731 struct bnx2x *bp = netdev_priv(dev);
8732 int stats_state;
8733
8734 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8735
8736 if (bp->state != BNX2X_STATE_OPEN) {
8737 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8738 return;
8739 }
8740
8741 stats_state = bp->stats_state;
8742 bnx2x_stop_stats(bp);
8743
8744 if (bnx2x_mc_assert(bp) != 0) {
8745 buf[0] = 1;
8746 etest->flags |= ETH_TEST_FL_FAILED;
8747 }
8748
8749#ifdef BNX2X_EXTRA_DEBUG
8750 bnx2x_panic_dump(bp);
8751#endif
8752 bp->stats_state = stats_state;
8753}
8754
8755static struct {
8756 char string[ETH_GSTRING_LEN];
8757} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
8758 { "rx_bytes"}, /* 0 */
8759 { "rx_error_bytes"}, /* 1 */
8760 { "tx_bytes"}, /* 2 */
8761 { "tx_error_bytes"}, /* 3 */
8762 { "rx_ucast_packets"}, /* 4 */
8763 { "rx_mcast_packets"}, /* 5 */
8764 { "rx_bcast_packets"}, /* 6 */
8765 { "tx_ucast_packets"}, /* 7 */
8766 { "tx_mcast_packets"}, /* 8 */
8767 { "tx_bcast_packets"}, /* 9 */
8768 { "tx_mac_errors"}, /* 10 */
8769 { "tx_carrier_errors"}, /* 11 */
8770 { "rx_crc_errors"}, /* 12 */
8771 { "rx_align_errors"}, /* 13 */
8772 { "tx_single_collisions"}, /* 14 */
8773 { "tx_multi_collisions"}, /* 15 */
8774 { "tx_deferred"}, /* 16 */
8775 { "tx_excess_collisions"}, /* 17 */
8776 { "tx_late_collisions"}, /* 18 */
8777 { "tx_total_collisions"}, /* 19 */
8778 { "rx_fragments"}, /* 20 */
8779 { "rx_jabbers"}, /* 21 */
8780 { "rx_undersize_packets"}, /* 22 */
8781 { "rx_oversize_packets"}, /* 23 */
8782 { "rx_xon_frames"}, /* 24 */
8783 { "rx_xoff_frames"}, /* 25 */
8784 { "tx_xon_frames"}, /* 26 */
8785 { "tx_xoff_frames"}, /* 27 */
8786 { "rx_mac_ctrl_frames"}, /* 28 */
8787 { "rx_filtered_packets"}, /* 29 */
8788 { "rx_discards"}, /* 30 */
8789};
8790
8791#define STATS_OFFSET32(offset_name) \
8792 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
8793
8794static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
8795 STATS_OFFSET32(total_bytes_received_hi), /* 0 */
8796 STATS_OFFSET32(stat_IfHCInBadOctets_hi), /* 1 */
8797 STATS_OFFSET32(total_bytes_transmitted_hi), /* 2 */
8798 STATS_OFFSET32(stat_IfHCOutBadOctets_hi), /* 3 */
8799 STATS_OFFSET32(total_unicast_packets_received_hi), /* 4 */
8800 STATS_OFFSET32(total_multicast_packets_received_hi), /* 5 */
8801 STATS_OFFSET32(total_broadcast_packets_received_hi), /* 6 */
8802 STATS_OFFSET32(total_unicast_packets_transmitted_hi), /* 7 */
8803 STATS_OFFSET32(total_multicast_packets_transmitted_hi), /* 8 */
8804 STATS_OFFSET32(total_broadcast_packets_transmitted_hi), /* 9 */
8805 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
8806 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), /* 11 */
8807 STATS_OFFSET32(crc_receive_errors), /* 12 */
8808 STATS_OFFSET32(alignment_errors), /* 13 */
8809 STATS_OFFSET32(single_collision_transmit_frames), /* 14 */
8810 STATS_OFFSET32(multiple_collision_transmit_frames), /* 15 */
8811 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), /* 16 */
8812 STATS_OFFSET32(excessive_collision_frames), /* 17 */
8813 STATS_OFFSET32(late_collision_frames), /* 18 */
8814 STATS_OFFSET32(number_of_bugs_found_in_stats_spec), /* 19 */
8815 STATS_OFFSET32(runt_packets_received), /* 20 */
8816 STATS_OFFSET32(jabber_packets_received), /* 21 */
8817 STATS_OFFSET32(error_runt_packets_received), /* 22 */
8818 STATS_OFFSET32(error_jabber_packets_received), /* 23 */
8819 STATS_OFFSET32(pause_xon_frames_received), /* 24 */
8820 STATS_OFFSET32(pause_xoff_frames_received), /* 25 */
8821 STATS_OFFSET32(pause_xon_frames_transmitted), /* 26 */
8822 STATS_OFFSET32(pause_xoff_frames_transmitted), /* 27 */
8823 STATS_OFFSET32(control_frames_received), /* 28 */
8824 STATS_OFFSET32(mac_filter_discard), /* 29 */
8825 STATS_OFFSET32(no_buff_discard), /* 30 */
8826};
8827
8828static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
8829 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
8830 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
8831 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
8832 4,
8833};
8834
8835static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8836{
8837 switch (stringset) {
8838 case ETH_SS_STATS:
8839 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
8840 break;
8841
8842 case ETH_SS_TEST:
8843 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8844 break;
8845 }
8846}
8847
8848static int bnx2x_get_stats_count(struct net_device *dev)
8849{
8850 return BNX2X_NUM_STATS;
8851}
8852
8853static void bnx2x_get_ethtool_stats(struct net_device *dev,
8854 struct ethtool_stats *stats, u64 *buf)
8855{
8856 struct bnx2x *bp = netdev_priv(dev);
8857 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8858 int i;
8859
8860 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8861 if (bnx2x_stats_len_arr[i] == 0) {
8862 /* skip this counter */
8863 buf[i] = 0;
8864 continue;
8865 }
8866 if (!hw_stats) {
8867 buf[i] = 0;
8868 continue;
8869 }
8870 if (bnx2x_stats_len_arr[i] == 4) {
8871 /* 4-byte counter */
8872 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8873 continue;
8874 }
8875 /* 8-byte counter */
8876 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8877 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8878 }
8879}
8880
8881static int bnx2x_phys_id(struct net_device *dev, u32 data)
8882{
8883 struct bnx2x *bp = netdev_priv(dev);
8884 int i;
8885
8886 if (data == 0)
8887 data = 2;
8888
8889 for (i = 0; i < (data * 2); i++) {
8890 if ((i % 2) == 0) {
8891 bnx2x_leds_set(bp, SPEED_1000);
8892 } else {
8893 bnx2x_leds_unset(bp);
8894 }
8895 msleep_interruptible(500);
8896 if (signal_pending(current))
8897 break;
8898 }
8899
8900 if (bp->link_up)
8901 bnx2x_leds_set(bp, bp->line_speed);
8902
8903 return 0;
8904}
8905
8906static struct ethtool_ops bnx2x_ethtool_ops = {
8907 .get_settings = bnx2x_get_settings,
8908 .set_settings = bnx2x_set_settings,
8909 .get_drvinfo = bnx2x_get_drvinfo,
8910 .get_wol = bnx2x_get_wol,
8911 .set_wol = bnx2x_set_wol,
8912 .get_msglevel = bnx2x_get_msglevel,
8913 .set_msglevel = bnx2x_set_msglevel,
8914 .nway_reset = bnx2x_nway_reset,
8915 .get_link = ethtool_op_get_link,
8916 .get_eeprom_len = bnx2x_get_eeprom_len,
8917 .get_eeprom = bnx2x_get_eeprom,
8918 .set_eeprom = bnx2x_set_eeprom,
8919 .get_coalesce = bnx2x_get_coalesce,
8920 .set_coalesce = bnx2x_set_coalesce,
8921 .get_ringparam = bnx2x_get_ringparam,
8922 .set_ringparam = bnx2x_set_ringparam,
8923 .get_pauseparam = bnx2x_get_pauseparam,
8924 .set_pauseparam = bnx2x_set_pauseparam,
8925 .get_rx_csum = bnx2x_get_rx_csum,
8926 .set_rx_csum = bnx2x_set_rx_csum,
8927 .get_tx_csum = ethtool_op_get_tx_csum,
8928 .set_tx_csum = ethtool_op_set_tx_csum,
8929 .get_sg = ethtool_op_get_sg,
8930 .set_sg = ethtool_op_set_sg,
8931 .get_tso = ethtool_op_get_tso,
8932 .set_tso = bnx2x_set_tso,
8933 .self_test_count = bnx2x_self_test_count,
8934 .self_test = bnx2x_self_test,
8935 .get_strings = bnx2x_get_strings,
8936 .phys_id = bnx2x_phys_id,
8937 .get_stats_count = bnx2x_get_stats_count,
8938 .get_ethtool_stats = bnx2x_get_ethtool_stats
8939};
8940
8941/* end of ethtool_ops */
8942
8943/****************************************************************************
8944* General service functions
8945****************************************************************************/
8946
8947static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8948{
8949 u16 pmcsr;
8950
8951 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
8952
8953 switch (state) {
8954 case PCI_D0:
8955 pci_write_config_word(bp->pdev,
8956 bp->pm_cap + PCI_PM_CTRL,
8957 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
8958 PCI_PM_CTRL_PME_STATUS));
8959
8960 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
8961 /* delay required during transition out of D3hot */
8962 msleep(20);
8963 break;
8964
8965 case PCI_D3hot:
8966 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
8967 pmcsr |= 3;
8968
8969 if (bp->wol)
8970 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
8971
8972 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
8973 pmcsr);
8974
8975 /* No more memory access after this point until
8976 * device is brought back to D0.
8977 */
8978 break;
8979
8980 default:
8981 return -EINVAL;
8982 }
8983 return 0;
8984}
8985
8986/*
8987 * net_device service functions
8988 */
8989
49d66772 8990/* called with netif_tx_lock from set_multicast */
a2fbb9ea
ET
8991static void bnx2x_set_rx_mode(struct net_device *dev)
8992{
8993 struct bnx2x *bp = netdev_priv(dev);
8994 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8995
8996 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
8997
8998 if (dev->flags & IFF_PROMISC)
8999 rx_mode = BNX2X_RX_MODE_PROMISC;
9000
9001 else if ((dev->flags & IFF_ALLMULTI) ||
9002 (dev->mc_count > BNX2X_MAX_MULTICAST))
9003 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9004
9005 else { /* some multicasts */
9006 int i, old, offset;
9007 struct dev_mc_list *mclist;
9008 struct mac_configuration_cmd *config =
9009 bnx2x_sp(bp, mcast_config);
9010
9011 for (i = 0, mclist = dev->mc_list;
9012 mclist && (i < dev->mc_count);
9013 i++, mclist = mclist->next) {
9014
9015 config->config_table[i].cam_entry.msb_mac_addr =
9016 swab16(*(u16 *)&mclist->dmi_addr[0]);
9017 config->config_table[i].cam_entry.middle_mac_addr =
9018 swab16(*(u16 *)&mclist->dmi_addr[2]);
9019 config->config_table[i].cam_entry.lsb_mac_addr =
9020 swab16(*(u16 *)&mclist->dmi_addr[4]);
9021 config->config_table[i].cam_entry.flags =
9022 cpu_to_le16(bp->port);
9023 config->config_table[i].target_table_entry.flags = 0;
9024 config->config_table[i].target_table_entry.
9025 client_id = 0;
9026 config->config_table[i].target_table_entry.
9027 vlan_id = 0;
9028
9029 DP(NETIF_MSG_IFUP,
9030 "setting MCAST[%d] (%04x:%04x:%04x)\n",
9031 i, config->config_table[i].cam_entry.msb_mac_addr,
9032 config->config_table[i].cam_entry.middle_mac_addr,
9033 config->config_table[i].cam_entry.lsb_mac_addr);
9034 }
9035 old = config->hdr.length_6b;
9036 if (old > i) {
9037 for (; i < old; i++) {
9038 if (CAM_IS_INVALID(config->config_table[i])) {
9039 i--; /* already invalidated */
9040 break;
9041 }
9042 /* invalidate */
9043 CAM_INVALIDATE(config->config_table[i]);
9044 }
9045 }
9046
9047 if (CHIP_REV_IS_SLOW(bp))
9048 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
9049 else
9050 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
9051
9052 config->hdr.length_6b = i;
9053 config->hdr.offset = offset;
9054 config->hdr.reserved0 = 0;
9055 config->hdr.reserved1 = 0;
9056
9057 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9058 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9059 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
9060 }
9061
9062 bp->rx_mode = rx_mode;
9063 bnx2x_set_storm_rx_mode(bp);
9064}
9065
9066static int bnx2x_poll(struct napi_struct *napi, int budget)
9067{
9068 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9069 napi);
9070 struct bnx2x *bp = fp->bp;
9071 int work_done = 0;
9072
9073#ifdef BNX2X_STOP_ON_ERROR
9074 if (unlikely(bp->panic))
9075 goto out_panic;
9076#endif
9077
9078 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9079 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9080 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9081
9082 bnx2x_update_fpsb_idx(fp);
9083
9084 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
9085 bnx2x_tx_int(fp, budget);
9086
9087
9088 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9089 work_done = bnx2x_rx_int(fp, budget);
9090
9091
9092 rmb(); /* bnx2x_has_work() reads the status block */
9093
9094 /* must not complete if we consumed full budget */
9095 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9096
9097#ifdef BNX2X_STOP_ON_ERROR
9098out_panic:
9099#endif
9100 netif_rx_complete(bp->dev, napi);
9101
9102 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
9103 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9104 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
9105 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9106 }
9107
9108 return work_done;
9109}
9110
9111/* Called with netif_tx_lock.
9112 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9113 * netif_wake_queue().
9114 */
9115static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9116{
9117 struct bnx2x *bp = netdev_priv(dev);
9118 struct bnx2x_fastpath *fp;
9119 struct sw_tx_bd *tx_buf;
9120 struct eth_tx_bd *tx_bd;
9121 struct eth_tx_parse_bd *pbd = NULL;
9122 u16 pkt_prod, bd_prod;
9123 int nbd, fp_index = 0;
9124 dma_addr_t mapping;
9125
9126#ifdef BNX2X_STOP_ON_ERROR
9127 if (unlikely(bp->panic))
9128 return NETDEV_TX_BUSY;
9129#endif
9130
9131 fp_index = smp_processor_id() % (bp->num_queues);
9132
9133 fp = &bp->fp[fp_index];
9134 if (unlikely(bnx2x_tx_avail(bp->fp) <
9135 (skb_shinfo(skb)->nr_frags + 3))) {
9136 bp->slowpath->eth_stats.driver_xoff++,
9137 netif_stop_queue(dev);
9138 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9139 return NETDEV_TX_BUSY;
9140 }
9141
9142 /*
9143 This is a bit ugly. First we use one BD which we mark as start,
9144 then for TSO or xsum we have a parsing info BD,
9145 and only then we have the rest of the TSO bds.
9146 (don't forget to mark the last one as last,
9147 and to unmap only AFTER you write to the BD ...)
9148 I would like to thank DovH for this mess.
9149 */
9150
9151 pkt_prod = fp->tx_pkt_prod++;
9152 bd_prod = fp->tx_bd_prod;
9153 bd_prod = TX_BD(bd_prod);
9154
9155 /* get a tx_buff and first bd */
9156 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9157 tx_bd = &fp->tx_desc_ring[bd_prod];
9158
9159 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9160 tx_bd->general_data = (UNICAST_ADDRESS <<
9161 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9162 tx_bd->general_data |= 1; /* header nbd */
9163
c14423fe 9164 /* remember the first bd of the packet */
a2fbb9ea
ET
9165 tx_buf->first_bd = bd_prod;
9166
9167 DP(NETIF_MSG_TX_QUEUED,
9168 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9169 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9170
9171 if (skb->ip_summed == CHECKSUM_PARTIAL) {
9172 struct iphdr *iph = ip_hdr(skb);
9173 u8 len;
9174
9175 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
9176
9177 /* turn on parsing and get a bd */
9178 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9179 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9180 len = ((u8 *)iph - (u8 *)skb->data) / 2;
9181
9182 /* for now NS flag is not used in Linux */
9183 pbd->global_data = (len |
9184 ((skb->protocol == ETH_P_8021Q) <<
9185 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9186 pbd->ip_hlen = ip_hdrlen(skb) / 2;
9187 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
9188 if (iph->protocol == IPPROTO_TCP) {
9189 struct tcphdr *th = tcp_hdr(skb);
9190
9191 tx_bd->bd_flags.as_bitfield |=
9192 ETH_TX_BD_FLAGS_TCP_CSUM;
9193 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
9194 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
9195 pbd->tcp_pseudo_csum = swab16(th->check);
9196
9197 } else if (iph->protocol == IPPROTO_UDP) {
9198 struct udphdr *uh = udp_hdr(skb);
9199
9200 tx_bd->bd_flags.as_bitfield |=
9201 ETH_TX_BD_FLAGS_TCP_CSUM;
9202 pbd->total_hlen += cpu_to_le16(4);
9203 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9204 pbd->cs_offset = 5; /* 10 >> 1 */
9205 pbd->tcp_pseudo_csum = 0;
9206 /* HW bug: we need to subtract 10 bytes before the
9207 * UDP header from the csum
9208 */
9209 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
9210 csum_partial(((u8 *)(uh)-10), 10, 0)));
9211 }
9212 }
9213
9214 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9215 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9216 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9217 } else {
9218 tx_bd->vlan = cpu_to_le16(pkt_prod);
9219 }
9220
9221 mapping = pci_map_single(bp->pdev, skb->data,
9222 skb->len, PCI_DMA_TODEVICE);
9223
9224 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9225 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9226 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9227 tx_bd->nbd = cpu_to_le16(nbd);
9228 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9229
9230 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9231 " nbytes %d flags %x vlan %u\n",
9232 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
9233 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
9234
9235 if (skb_shinfo(skb)->gso_size &&
9236 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
9237 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
9238
9239 DP(NETIF_MSG_TX_QUEUED,
9240 "TSO packet len %d hlen %d total len %d tso size %d\n",
9241 skb->len, hlen, skb_headlen(skb),
9242 skb_shinfo(skb)->gso_size);
9243
9244 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9245
9246 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
9247 /* we split the first bd into headers and data bds
9248 * to ease the pain of our fellow micocode engineers
9249 * we use one mapping for both bds
9250 * So far this has only been observed to happen
9251 * in Other Operating Systems(TM)
9252 */
9253
9254 /* first fix first bd */
9255 nbd++;
9256 tx_bd->nbd = cpu_to_le16(nbd);
9257 tx_bd->nbytes = cpu_to_le16(hlen);
9258
9259 /* we only print this as an error
9260 * because we don't think this will ever happen.
9261 */
9262 BNX2X_ERR("TSO split header size is %d (%x:%x)"
9263 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
9264 tx_bd->addr_lo, tx_bd->nbd);
9265
9266 /* now get a new data bd
9267 * (after the pbd) and fill it */
9268 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9269 tx_bd = &fp->tx_desc_ring[bd_prod];
9270
9271 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9272 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
9273 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
9274 tx_bd->vlan = cpu_to_le16(pkt_prod);
9275 /* this marks the bd
9276 * as one that has no individual mapping
c14423fe 9277 * the FW ignores this flag in a bd not marked start
a2fbb9ea
ET
9278 */
9279 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9280 DP(NETIF_MSG_TX_QUEUED,
9281 "TSO split data size is %d (%x:%x)\n",
9282 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
9283 }
9284
9285 if (!pbd) {
9286 /* supposed to be unreached
9287 * (and therefore not handled properly...)
9288 */
9289 BNX2X_ERR("LSO with no PBD\n");
9290 BUG();
9291 }
9292
9293 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9294 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9295 pbd->ip_id = swab16(ip_hdr(skb)->id);
9296 pbd->tcp_pseudo_csum =
9297 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9298 ip_hdr(skb)->daddr,
9299 0, IPPROTO_TCP, 0));
9300 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9301 }
9302
9303 {
9304 int i;
9305
9306 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9307 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9308
9309 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9310 tx_bd = &fp->tx_desc_ring[bd_prod];
9311
9312 mapping = pci_map_page(bp->pdev, frag->page,
9313 frag->page_offset,
9314 frag->size, PCI_DMA_TODEVICE);
9315
9316 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9317 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9318 tx_bd->nbytes = cpu_to_le16(frag->size);
9319 tx_bd->vlan = cpu_to_le16(pkt_prod);
9320 tx_bd->bd_flags.as_bitfield = 0;
9321 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
9322 " addr (%x:%x) nbytes %d flags %x\n",
9323 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9324 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
9325 } /* for */
9326 }
9327
9328 /* now at last mark the bd as the last bd */
9329 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9330
9331 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9332 tx_bd, tx_bd->bd_flags.as_bitfield);
9333
9334 tx_buf->skb = skb;
9335
9336 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9337
9338 /* now send a tx doorbell, counting the next bd
9339 * if the packet contains or ends with it
9340 */
9341 if (TX_BD_POFF(bd_prod) < nbd)
9342 nbd++;
9343
9344 if (pbd)
9345 DP(NETIF_MSG_TX_QUEUED,
9346 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9347 " tcp_flags %x xsum %x seq %u hlen %u\n",
9348 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9349 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9350 pbd->tcp_send_seq, pbd->total_hlen);
9351
9352 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
9353
9354 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
9355 mb(); /* FW restriction: must not reorder writing nbd and packets */
9356 fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
9357 DOORBELL(bp, fp_index, 0);
9358
9359 mmiowb();
9360
9361 fp->tx_bd_prod = bd_prod;
9362 dev->trans_start = jiffies;
9363
9364 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9365 netif_stop_queue(dev);
9366 bp->slowpath->eth_stats.driver_xoff++;
9367 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9368 netif_wake_queue(dev);
9369 }
9370 fp->tx_pkt++;
9371
9372 return NETDEV_TX_OK;
9373}
9374
9375static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
9376{
9377 return &dev->stats;
9378}
9379
9380/* Called with rtnl_lock */
9381static int bnx2x_open(struct net_device *dev)
9382{
9383 struct bnx2x *bp = netdev_priv(dev);
9384
9385 bnx2x_set_power_state(bp, PCI_D0);
9386
9387 return bnx2x_nic_load(bp, 1);
9388}
9389
9390/* Called with rtnl_lock */
9391static int bnx2x_close(struct net_device *dev)
9392{
9393 int rc;
9394 struct bnx2x *bp = netdev_priv(dev);
9395
9396 /* Unload the driver, release IRQs */
9397 rc = bnx2x_nic_unload(bp, 1);
9398 if (rc) {
9399 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
9400 return rc;
9401 }
9402 bnx2x_set_power_state(bp, PCI_D3hot);
9403
9404 return 0;
9405}
9406
9407/* Called with rtnl_lock */
9408static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9409{
9410 struct sockaddr *addr = p;
9411 struct bnx2x *bp = netdev_priv(dev);
9412
9413 if (!is_valid_ether_addr(addr->sa_data))
9414 return -EINVAL;
9415
9416 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9417 if (netif_running(dev))
9418 bnx2x_set_mac_addr(bp);
9419
9420 return 0;
9421}
9422
9423/* Called with rtnl_lock */
9424static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9425{
9426 struct mii_ioctl_data *data = if_mii(ifr);
9427 struct bnx2x *bp = netdev_priv(dev);
9428 int err;
9429
9430 switch (cmd) {
9431 case SIOCGMIIPHY:
9432 data->phy_id = bp->phy_addr;
9433
c14423fe 9434 /* fallthrough */
a2fbb9ea
ET
9435 case SIOCGMIIREG: {
9436 u32 mii_regval;
9437
9438 spin_lock_bh(&bp->phy_lock);
9439 if (bp->state == BNX2X_STATE_OPEN) {
9440 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
9441 &mii_regval);
9442
9443 data->val_out = mii_regval;
9444 } else {
9445 err = -EAGAIN;
9446 }
9447 spin_unlock_bh(&bp->phy_lock);
9448 return err;
9449 }
9450
9451 case SIOCSMIIREG:
9452 if (!capable(CAP_NET_ADMIN))
9453 return -EPERM;
9454
9455 spin_lock_bh(&bp->phy_lock);
9456 if (bp->state == BNX2X_STATE_OPEN) {
9457 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
9458 data->val_in);
9459 } else {
9460 err = -EAGAIN;
9461 }
9462 spin_unlock_bh(&bp->phy_lock);
9463 return err;
9464
9465 default:
9466 /* do nothing */
9467 break;
9468 }
9469
9470 return -EOPNOTSUPP;
9471}
9472
9473/* Called with rtnl_lock */
9474static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9475{
9476 struct bnx2x *bp = netdev_priv(dev);
9477
9478 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9479 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9480 return -EINVAL;
9481
9482 /* This does not race with packet allocation
c14423fe 9483 * because the actual alloc size is
a2fbb9ea
ET
9484 * only updated as part of load
9485 */
9486 dev->mtu = new_mtu;
9487
9488 if (netif_running(dev)) {
9489 bnx2x_nic_unload(bp, 0);
9490 bnx2x_nic_load(bp, 0);
9491 }
9492 return 0;
9493}
9494
9495static void bnx2x_tx_timeout(struct net_device *dev)
9496{
9497 struct bnx2x *bp = netdev_priv(dev);
9498
9499#ifdef BNX2X_STOP_ON_ERROR
9500 if (!bp->panic)
9501 bnx2x_panic();
9502#endif
9503 /* This allows the netif to be shutdown gracefully before resetting */
9504 schedule_work(&bp->reset_task);
9505}
9506
9507#ifdef BCM_VLAN
9508/* Called with rtnl_lock */
9509static void bnx2x_vlan_rx_register(struct net_device *dev,
9510 struct vlan_group *vlgrp)
9511{
9512 struct bnx2x *bp = netdev_priv(dev);
9513
9514 bp->vlgrp = vlgrp;
9515 if (netif_running(dev))
49d66772 9516 bnx2x_set_client_config(bp);
a2fbb9ea
ET
9517}
9518#endif
9519
9520#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9521static void poll_bnx2x(struct net_device *dev)
9522{
9523 struct bnx2x *bp = netdev_priv(dev);
9524
9525 disable_irq(bp->pdev->irq);
9526 bnx2x_interrupt(bp->pdev->irq, dev);
9527 enable_irq(bp->pdev->irq);
9528}
9529#endif
9530
9531static void bnx2x_reset_task(struct work_struct *work)
9532{
9533 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
9534
9535#ifdef BNX2X_STOP_ON_ERROR
9536 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
9537 " so reset not done to allow debug dump,\n"
9538 KERN_ERR " you will need to reboot when done\n");
9539 return;
9540#endif
9541
9542 if (!netif_running(bp->dev))
9543 return;
9544
9545 bp->in_reset_task = 1;
9546
9547 bnx2x_netif_stop(bp);
9548
9549 bnx2x_nic_unload(bp, 0);
9550 bnx2x_nic_load(bp, 0);
9551
9552 bp->in_reset_task = 0;
9553}
9554
9555static int __devinit bnx2x_init_board(struct pci_dev *pdev,
9556 struct net_device *dev)
9557{
9558 struct bnx2x *bp;
9559 int rc;
9560
9561 SET_NETDEV_DEV(dev, &pdev->dev);
9562 bp = netdev_priv(dev);
9563
9564 bp->flags = 0;
9565 bp->port = PCI_FUNC(pdev->devfn);
9566
9567 rc = pci_enable_device(pdev);
9568 if (rc) {
9569 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9570 goto err_out;
9571 }
9572
9573 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9574 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9575 " aborting\n");
9576 rc = -ENODEV;
9577 goto err_out_disable;
9578 }
9579
9580 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9581 printk(KERN_ERR PFX "Cannot find second PCI device"
9582 " base address, aborting\n");
9583 rc = -ENODEV;
9584 goto err_out_disable;
9585 }
9586
9587 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9588 if (rc) {
9589 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9590 " aborting\n");
9591 goto err_out_disable;
9592 }
9593
9594 pci_set_master(pdev);
9595
9596 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9597 if (bp->pm_cap == 0) {
9598 printk(KERN_ERR PFX "Cannot find power management"
9599 " capability, aborting\n");
9600 rc = -EIO;
9601 goto err_out_release;
9602 }
9603
9604 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9605 if (bp->pcie_cap == 0) {
9606 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9607 " aborting\n");
9608 rc = -EIO;
9609 goto err_out_release;
9610 }
9611
9612 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9613 bp->flags |= USING_DAC_FLAG;
9614 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9615 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9616 " failed, aborting\n");
9617 rc = -EIO;
9618 goto err_out_release;
9619 }
9620
9621 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9622 printk(KERN_ERR PFX "System does not support DMA,"
9623 " aborting\n");
9624 rc = -EIO;
9625 goto err_out_release;
9626 }
9627
9628 bp->dev = dev;
9629 bp->pdev = pdev;
9630
9631 spin_lock_init(&bp->phy_lock);
9632
9633 bp->in_reset_task = 0;
9634
9635 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
9636 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
9637
cba0516d 9638 dev->base_addr = pci_resource_start(pdev, 0);
a2fbb9ea
ET
9639
9640 dev->irq = pdev->irq;
9641
9642 bp->regview = ioremap_nocache(dev->base_addr,
9643 pci_resource_len(pdev, 0));
9644 if (!bp->regview) {
9645 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9646 rc = -ENOMEM;
9647 goto err_out_release;
9648 }
9649
9650 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
9651 pci_resource_len(pdev, 2));
9652 if (!bp->doorbells) {
9653 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9654 rc = -ENOMEM;
9655 goto err_out_unmap;
9656 }
9657
9658 bnx2x_set_power_state(bp, PCI_D0);
9659
9660 bnx2x_get_hwinfo(bp);
9661
9662 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
c14423fe 9663 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
a2fbb9ea
ET
9664 " will only init first device\n");
9665 onefunc = 1;
9666 nomcp = 1;
9667 }
9668
9669 if (nomcp) {
9670 printk(KERN_ERR PFX "MCP disabled, will only"
9671 " init first device\n");
9672 onefunc = 1;
9673 }
9674
9675 if (onefunc && bp->port) {
9676 printk(KERN_ERR PFX "Second device disabled, exiting\n");
9677 rc = -ENODEV;
9678 goto err_out_unmap;
9679 }
9680
9681 bp->tx_ring_size = MAX_TX_AVAIL;
9682 bp->rx_ring_size = MAX_RX_AVAIL;
9683
9684 bp->rx_csum = 1;
9685
9686 bp->rx_offset = 0;
9687
9688 bp->tx_quick_cons_trip_int = 0xff;
9689 bp->tx_quick_cons_trip = 0xff;
9690 bp->tx_ticks_int = 50;
9691 bp->tx_ticks = 50;
9692
9693 bp->rx_quick_cons_trip_int = 0xff;
9694 bp->rx_quick_cons_trip = 0xff;
9695 bp->rx_ticks_int = 25;
9696 bp->rx_ticks = 25;
9697
9698 bp->stats_ticks = 1000000 & 0xffff00;
9699
9700 bp->timer_interval = HZ;
9701 bp->current_interval = (poll ? poll : HZ);
9702
9703 init_timer(&bp->timer);
9704 bp->timer.expires = jiffies + bp->current_interval;
9705 bp->timer.data = (unsigned long) bp;
9706 bp->timer.function = bnx2x_timer;
9707
9708 return 0;
9709
9710err_out_unmap:
9711 if (bp->regview) {
9712 iounmap(bp->regview);
9713 bp->regview = NULL;
9714 }
9715
9716 if (bp->doorbells) {
9717 iounmap(bp->doorbells);
9718 bp->doorbells = NULL;
9719 }
9720
9721err_out_release:
9722 pci_release_regions(pdev);
9723
9724err_out_disable:
9725 pci_disable_device(pdev);
9726 pci_set_drvdata(pdev, NULL);
9727
9728err_out:
9729 return rc;
9730}
9731
25047950
ET
9732static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9733{
9734 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9735
9736 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9737 return val;
9738}
9739
9740/* return value of 1=2.5GHz 2=5GHz */
9741static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9742{
9743 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9744
9745 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9746 return val;
9747}
9748
a2fbb9ea
ET
9749static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9750 const struct pci_device_id *ent)
9751{
9752 static int version_printed;
9753 struct net_device *dev = NULL;
9754 struct bnx2x *bp;
25047950 9755 int rc;
a2fbb9ea 9756 int port = PCI_FUNC(pdev->devfn);
25047950 9757 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
9758
9759 if (version_printed++ == 0)
9760 printk(KERN_INFO "%s", version);
9761
9762 /* dev zeroed in init_etherdev */
9763 dev = alloc_etherdev(sizeof(*bp));
9764 if (!dev)
9765 return -ENOMEM;
9766
9767 netif_carrier_off(dev);
9768
9769 bp = netdev_priv(dev);
9770 bp->msglevel = debug;
9771
9772 if (port && onefunc) {
9773 printk(KERN_ERR PFX "second function disabled. exiting\n");
25047950 9774 free_netdev(dev);
a2fbb9ea
ET
9775 return 0;
9776 }
9777
9778 rc = bnx2x_init_board(pdev, dev);
9779 if (rc < 0) {
9780 free_netdev(dev);
9781 return rc;
9782 }
9783
9784 dev->hard_start_xmit = bnx2x_start_xmit;
9785 dev->watchdog_timeo = TX_TIMEOUT;
9786
9787 dev->get_stats = bnx2x_get_stats;
9788 dev->ethtool_ops = &bnx2x_ethtool_ops;
9789 dev->open = bnx2x_open;
9790 dev->stop = bnx2x_close;
9791 dev->set_multicast_list = bnx2x_set_rx_mode;
9792 dev->set_mac_address = bnx2x_change_mac_addr;
9793 dev->do_ioctl = bnx2x_ioctl;
9794 dev->change_mtu = bnx2x_change_mtu;
9795 dev->tx_timeout = bnx2x_tx_timeout;
9796#ifdef BCM_VLAN
9797 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9798#endif
9799#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9800 dev->poll_controller = poll_bnx2x;
9801#endif
9802 dev->features |= NETIF_F_SG;
9803 if (bp->flags & USING_DAC_FLAG)
9804 dev->features |= NETIF_F_HIGHDMA;
9805 dev->features |= NETIF_F_IP_CSUM;
9806#ifdef BCM_VLAN
9807 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9808#endif
9809 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
9810
9811 rc = register_netdev(dev);
9812 if (rc) {
c14423fe 9813 dev_err(&pdev->dev, "Cannot register net device\n");
a2fbb9ea
ET
9814 if (bp->regview)
9815 iounmap(bp->regview);
9816 if (bp->doorbells)
9817 iounmap(bp->doorbells);
9818 pci_release_regions(pdev);
9819 pci_disable_device(pdev);
9820 pci_set_drvdata(pdev, NULL);
9821 free_netdev(dev);
9822 return rc;
9823 }
9824
9825 pci_set_drvdata(pdev, dev);
9826
9827 bp->name = board_info[ent->driver_data].name;
25047950
ET
9828 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
9829 " IRQ %d, ", dev->name, bp->name,
a2fbb9ea
ET
9830 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
9831 ((CHIP_ID(bp) & 0x0ff0) >> 4),
25047950
ET
9832 bnx2x_get_pcie_width(bp),
9833 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
9834 dev->base_addr, bp->pdev->irq);
9835 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea
ET
9836 return 0;
9837}
9838
9839static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9840{
9841 struct net_device *dev = pci_get_drvdata(pdev);
9842 struct bnx2x *bp = netdev_priv(dev);
9843
9844 flush_scheduled_work();
9845 /*tasklet_kill(&bp->sp_task);*/
9846 unregister_netdev(dev);
9847
9848 if (bp->regview)
9849 iounmap(bp->regview);
9850
9851 if (bp->doorbells)
9852 iounmap(bp->doorbells);
9853
9854 free_netdev(dev);
9855 pci_release_regions(pdev);
9856 pci_disable_device(pdev);
9857 pci_set_drvdata(pdev, NULL);
9858}
9859
9860static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9861{
9862 struct net_device *dev = pci_get_drvdata(pdev);
9863 struct bnx2x *bp = netdev_priv(dev);
9864 int rc;
9865
9866 if (!netif_running(dev))
9867 return 0;
9868
9869 rc = bnx2x_nic_unload(bp, 0);
9870 if (!rc)
9871 return rc;
9872
9873 netif_device_detach(dev);
9874 pci_save_state(pdev);
9875
9876 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9877 return 0;
9878}
9879
9880static int bnx2x_resume(struct pci_dev *pdev)
9881{
9882 struct net_device *dev = pci_get_drvdata(pdev);
9883 struct bnx2x *bp = netdev_priv(dev);
9884 int rc;
9885
9886 if (!netif_running(dev))
9887 return 0;
9888
9889 pci_restore_state(pdev);
9890
9891 bnx2x_set_power_state(bp, PCI_D0);
9892 netif_device_attach(dev);
9893
9894 rc = bnx2x_nic_load(bp, 0);
9895 if (rc)
9896 return rc;
9897
9898 return 0;
9899}
9900
9901static struct pci_driver bnx2x_pci_driver = {
9902 .name = DRV_MODULE_NAME,
9903 .id_table = bnx2x_pci_tbl,
9904 .probe = bnx2x_init_one,
9905 .remove = __devexit_p(bnx2x_remove_one),
9906 .suspend = bnx2x_suspend,
9907 .resume = bnx2x_resume,
9908};
9909
9910static int __init bnx2x_init(void)
9911{
9912 return pci_register_driver(&bnx2x_pci_driver);
9913}
9914
9915static void __exit bnx2x_cleanup(void)
9916{
9917 pci_unregister_driver(&bnx2x_pci_driver);
9918}
9919
9920module_init(bnx2x_init);
9921module_exit(bnx2x_cleanup);
9922