]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Add support for BCM57711 HW
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
619c714c
ET
63#define DRV_MODULE_VERSION "1.42.4"
64#define DRV_MODULE_RELDATE "2008/4/9"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea
ET
78
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04
EG
82static int nomcp;
83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
34f80b04 89module_param(nomcp, int, 0);
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
34f80b04 93MODULE_PARM_DESC(nomcp, "ignore management CPU");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
240 msleep(100);
241 else
242 udelay(5);
243
244 if (!cnt) {
a2fbb9ea
ET
245 BNX2X_ERR("dmae timeout!\n");
246 break;
247 }
ad8d3948 248 cnt--;
a2fbb9ea 249 }
ad8d3948
EG
250
251 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
252}
253
c18487ee 254void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 255{
ad8d3948 256 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
258 int cnt = 200;
259
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 int i;
263
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 return;
269 }
270
271 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
272
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
275
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
34f80b04
EG
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->len = len32;
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 293 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 294
ad8d3948 295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
302
303 *wb_comp = 0;
304
34f80b04 305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
306
307 udelay(5);
ad8d3948
EG
308
309 while (*wb_comp != DMAE_COMP_VAL) {
310
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
313 msleep(100);
314 else
315 udelay(5);
316
317 if (!cnt) {
a2fbb9ea
ET
318 BNX2X_ERR("dmae timeout!\n");
319 break;
320 }
ad8d3948 321 cnt--;
a2fbb9ea 322 }
ad8d3948 323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
326
327 mutex_unlock(&bp->dmae_mutex);
328}
329
330/* used only for slowpath so not inlined */
331static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332{
333 u32 wb_write[2];
334
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 338}
a2fbb9ea 339
ad8d3948
EG
340#ifdef USE_WB_RD
341static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342{
343 u32 wb_data[2];
344
345 REG_RD_DMAE(bp, reg, wb_data, 2);
346
347 return HILO_U64(wb_data[0], wb_data[1]);
348}
349#endif
350
a2fbb9ea
ET
351static int bnx2x_mc_assert(struct bnx2x *bp)
352{
a2fbb9ea 353 char last_idx;
34f80b04
EG
354 int i, rc = 0;
355 u32 row0, row1, row2, row3;
356
357 /* XSTORM */
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 if (last_idx)
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
379 rc++;
380 } else {
381 break;
382 }
383 }
384
385 /* TSTORM */
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 if (last_idx)
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
407 rc++;
408 } else {
409 break;
410 }
411 }
412
413 /* CSTORM */
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 if (last_idx)
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
435 rc++;
436 } else {
437 break;
438 }
439 }
440
441 /* USTORM */
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 if (last_idx)
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
463 rc++;
464 } else {
465 break;
a2fbb9ea
ET
466 }
467 }
34f80b04 468
a2fbb9ea
ET
469 return rc;
470}
c14423fe 471
a2fbb9ea
ET
472static void bnx2x_fw_dump(struct bnx2x *bp)
473{
474 u32 mark, offset;
475 u32 data[9];
476 int word;
477
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
481
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 offset + 4*word));
486 data[8] = 0x0;
49d66772 487 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
488 }
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
497}
498
499static void bnx2x_panic_dump(struct bnx2x *bp)
500{
501 int i;
502 u16 j, start, end;
503
504 BNX2X_ERR("begin crash dump -----------------\n");
505
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04
EG
513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)"
515 " *rx_cons_sb(%x)\n",
516 fp->rx_comp_prod, fp->rx_comp_cons,
517 le16_to_cpu(*fp->rx_cons_sb));
518 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)"
519 " bd data(%x,%x)\n",
520 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod,
a2fbb9ea
ET
521 hw_prods->bds_prod);
522
523 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
524 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
525 for (j = start; j < end; j++) {
526 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
527
528 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
529 sw_bd->skb, sw_bd->first_bd);
530 }
531
532 start = TX_BD(fp->tx_bd_cons - 10);
533 end = TX_BD(fp->tx_bd_cons + 254);
534 for (j = start; j < end; j++) {
535 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
536
537 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
538 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
539 }
540
541 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
542 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
543 for (j = start; j < end; j++) {
544 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
545 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
546
547 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 548 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
549 }
550
551 start = RCQ_BD(fp->rx_comp_cons - 10);
552 end = RCQ_BD(fp->rx_comp_cons + 503);
553 for (j = start; j < end; j++) {
554 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
555
556 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
557 j, cqe[0], cqe[1], cqe[2], cqe[3]);
558 }
559 }
560
49d66772
ET
561 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
562 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 563 " spq_prod_idx(%u)\n",
49d66772 564 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
565 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
566
34f80b04 567 bnx2x_fw_dump(bp);
a2fbb9ea
ET
568 bnx2x_mc_assert(bp);
569 BNX2X_ERR("end crash dump -----------------\n");
570
571 bp->stats_state = STATS_STATE_DISABLE;
572 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
573}
574
615f8fd9 575static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 576{
34f80b04 577 int port = BP_PORT(bp);
a2fbb9ea
ET
578 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
579 u32 val = REG_RD(bp, addr);
580 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
581
582 if (msix) {
583 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
584 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
585 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
586 } else {
587 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 588 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
589 HC_CONFIG_0_REG_INT_LINE_EN_0 |
590 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 591
615f8fd9
ET
592 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
593 val, port, addr, msix);
594
595 REG_WR(bp, addr, val);
596
a2fbb9ea
ET
597 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
598 }
599
615f8fd9 600 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
601 val, port, addr, msix);
602
603 REG_WR(bp, addr, val);
34f80b04
EG
604
605 if (CHIP_IS_E1H(bp)) {
606 /* init leading/trailing edge */
607 if (IS_E1HMF(bp)) {
608 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
609 if (bp->port.pmf)
610 /* enable nig attention */
611 val |= 0x0100;
612 } else
613 val = 0xffff;
614
615 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
616 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
617 }
a2fbb9ea
ET
618}
619
615f8fd9 620static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 621{
34f80b04 622 int port = BP_PORT(bp);
a2fbb9ea
ET
623 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
624 u32 val = REG_RD(bp, addr);
625
626 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
627 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
628 HC_CONFIG_0_REG_INT_LINE_EN_0 |
629 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
630
631 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
632 val, port, addr);
633
634 REG_WR(bp, addr, val);
635 if (REG_RD(bp, addr) != val)
636 BNX2X_ERR("BUG! proper val not read from IGU!\n");
637}
638
615f8fd9 639static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 640{
a2fbb9ea
ET
641 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
642 int i;
643
34f80b04 644 /* disable interrupt handling */
a2fbb9ea 645 atomic_inc(&bp->intr_sem);
c14423fe 646 /* prevent the HW from sending interrupts */
615f8fd9 647 bnx2x_int_disable(bp);
a2fbb9ea
ET
648
649 /* make sure all ISRs are done */
650 if (msix) {
651 for_each_queue(bp, i)
652 synchronize_irq(bp->msix_table[i].vector);
653
654 /* one more for the Slow Path IRQ */
655 synchronize_irq(bp->msix_table[i].vector);
656 } else
657 synchronize_irq(bp->pdev->irq);
658
659 /* make sure sp_task is not running */
660 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
661}
662
34f80b04 663/* fast path */
a2fbb9ea
ET
664
665/*
34f80b04 666 * General service functions
a2fbb9ea
ET
667 */
668
34f80b04 669static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
670 u8 storm, u16 index, u8 op, u8 update)
671{
34f80b04 672 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
673 struct igu_ack_register igu_ack;
674
675 igu_ack.status_block_index = index;
676 igu_ack.sb_id_and_flags =
34f80b04 677 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
678 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
679 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
680 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
681
34f80b04
EG
682 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
683 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
684 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
685}
686
687static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
688{
689 struct host_status_block *fpsb = fp->status_blk;
690 u16 rc = 0;
691
692 barrier(); /* status block is written to by the chip */
693 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
694 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
695 rc |= 1;
696 }
697 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
698 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
699 rc |= 2;
700 }
701 return rc;
702}
703
704static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
705{
706 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
707
708 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
709 rx_cons_sb++;
710
34f80b04
EG
711 if ((fp->rx_comp_cons != rx_cons_sb) ||
712 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
713 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
714 return 1;
715
716 return 0;
717}
718
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
34f80b04 721 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
722 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
723
34f80b04
EG
724 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
725 result, BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
726
727#ifdef IGU_DEBUG
728#warning IGU_DEBUG active
729 if (result == 0) {
730 BNX2X_ERR("read %x from IGU\n", result);
731 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
732 }
733#endif
734 return result;
735}
736
737
738/*
739 * fast path service functions
740 */
741
742/* free skb in the packet ring at pos idx
743 * return idx of last bd freed
744 */
745static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
746 u16 idx)
747{
748 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
749 struct eth_tx_bd *tx_bd;
750 struct sk_buff *skb = tx_buf->skb;
34f80b04 751 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
752 int nbd;
753
754 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
755 idx, tx_buf, skb);
756
757 /* unmap first bd */
758 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
759 tx_bd = &fp->tx_desc_ring[bd_idx];
760 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
761 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
762
763 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 764 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
765#ifdef BNX2X_STOP_ON_ERROR
766 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 767 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
768 bnx2x_panic();
769 }
770#endif
771
772 /* Skip a parse bd and the TSO split header bd
773 since they have no mapping */
774 if (nbd)
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776
777 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
778 ETH_TX_BD_FLAGS_TCP_CSUM |
779 ETH_TX_BD_FLAGS_SW_LSO)) {
780 if (--nbd)
781 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
782 tx_bd = &fp->tx_desc_ring[bd_idx];
783 /* is this a TSO split header bd? */
784 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
785 if (--nbd)
786 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787 }
788 }
789
790 /* now free frags */
791 while (nbd > 0) {
792
793 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
794 tx_bd = &fp->tx_desc_ring[bd_idx];
795 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
796 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
797 if (--nbd)
798 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
799 }
800
801 /* release skb */
802 BUG_TRAP(skb);
803 dev_kfree_skb(skb);
804 tx_buf->first_bd = 0;
805 tx_buf->skb = NULL;
806
34f80b04 807 return new_cons;
a2fbb9ea
ET
808}
809
34f80b04 810static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 811{
34f80b04
EG
812 s16 used;
813 u16 prod;
814 u16 cons;
a2fbb9ea 815
34f80b04 816 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
817 prod = fp->tx_bd_prod;
818 cons = fp->tx_bd_cons;
819
34f80b04
EG
820 /* NUM_TX_RINGS = number of "next-page" entries
821 It will be used as a threshold */
822 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 823
34f80b04
EG
824#ifdef BNX2X_STOP_ON_ERROR
825 BUG_TRAP(used >= 0);
a2fbb9ea
ET
826 BUG_TRAP(used <= fp->bp->tx_ring_size);
827 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
34f80b04 828#endif
a2fbb9ea 829
34f80b04 830 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
831}
832
833static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
834{
835 struct bnx2x *bp = fp->bp;
836 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
837 int done = 0;
838
839#ifdef BNX2X_STOP_ON_ERROR
840 if (unlikely(bp->panic))
841 return;
842#endif
843
844 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
845 sw_cons = fp->tx_pkt_cons;
846
847 while (sw_cons != hw_cons) {
848 u16 pkt_cons;
849
850 pkt_cons = TX_BD(sw_cons);
851
852 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
853
34f80b04 854 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
855 hw_cons, sw_cons, pkt_cons);
856
34f80b04 857/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
858 rmb();
859 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
860 }
861*/
862 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
863 sw_cons++;
864 done++;
865
866 if (done == work)
867 break;
868 }
869
870 fp->tx_pkt_cons = sw_cons;
871 fp->tx_bd_cons = bd_cons;
872
873 /* Need to make the tx_cons update visible to start_xmit()
874 * before checking for netif_queue_stopped(). Without the
875 * memory barrier, there is a small possibility that start_xmit()
876 * will miss it and cause the queue to be stopped forever.
877 */
878 smp_mb();
879
880 /* TBD need a thresh? */
881 if (unlikely(netif_queue_stopped(bp->dev))) {
882
883 netif_tx_lock(bp->dev);
884
885 if (netif_queue_stopped(bp->dev) &&
886 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
887 netif_wake_queue(bp->dev);
888
889 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
890 }
891}
892
893static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
894 union eth_rx_cqe *rr_cqe)
895{
896 struct bnx2x *bp = fp->bp;
897 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
898 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
899
34f80b04 900 DP(BNX2X_MSG_SP,
a2fbb9ea 901 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
902 FP_IDX(fp), cid, command, bp->state,
903 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
904
905 bp->spq_left++;
906
34f80b04 907 if (FP_IDX(fp)) {
a2fbb9ea
ET
908 switch (command | fp->state) {
909 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
910 BNX2X_FP_STATE_OPENING):
911 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
912 cid);
913 fp->state = BNX2X_FP_STATE_OPEN;
914 break;
915
916 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
917 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
918 cid);
919 fp->state = BNX2X_FP_STATE_HALTED;
920 break;
921
922 default:
34f80b04
EG
923 BNX2X_ERR("unexpected MC reply (%d) "
924 "fp->state is %x\n", command, fp->state);
925 break;
a2fbb9ea 926 }
34f80b04 927 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
928 return;
929 }
c14423fe 930
a2fbb9ea
ET
931 switch (command | bp->state) {
932 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
933 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
934 bp->state = BNX2X_STATE_OPEN;
935 break;
936
937 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
938 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
939 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
940 fp->state = BNX2X_FP_STATE_HALTED;
941 break;
942
a2fbb9ea 943 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 944 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 945 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
946 break;
947
948 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 949 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea
ET
950 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
951 break;
952
49d66772 953 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 954 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
955 break;
956
a2fbb9ea 957 default:
34f80b04 958 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 959 command, bp->state);
34f80b04 960 break;
a2fbb9ea 961 }
34f80b04 962 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
963}
964
965static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
966 struct bnx2x_fastpath *fp, u16 index)
967{
968 struct sk_buff *skb;
969 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
970 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
971 dma_addr_t mapping;
972
973 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
974 if (unlikely(skb == NULL))
975 return -ENOMEM;
976
977 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
978 PCI_DMA_FROMDEVICE);
979 if (unlikely(dma_mapping_error(mapping))) {
a2fbb9ea
ET
980 dev_kfree_skb(skb);
981 return -ENOMEM;
982 }
983
984 rx_buf->skb = skb;
985 pci_unmap_addr_set(rx_buf, mapping, mapping);
986
987 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
988 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
989
990 return 0;
991}
992
993/* note that we are not allocating a new skb,
994 * we are just moving one from cons to prod
995 * we are not creating a new mapping,
996 * so there is no need to check for dma_mapping_error().
997 */
998static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
999 struct sk_buff *skb, u16 cons, u16 prod)
1000{
1001 struct bnx2x *bp = fp->bp;
1002 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1003 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1004 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1005 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1006
1007 pci_dma_sync_single_for_device(bp->pdev,
1008 pci_unmap_addr(cons_rx_buf, mapping),
1009 bp->rx_offset + RX_COPY_THRESH,
1010 PCI_DMA_FROMDEVICE);
1011
1012 prod_rx_buf->skb = cons_rx_buf->skb;
1013 pci_unmap_addr_set(prod_rx_buf, mapping,
1014 pci_unmap_addr(cons_rx_buf, mapping));
1015 *prod_bd = *cons_bd;
1016}
1017
1018static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1019{
1020 struct bnx2x *bp = fp->bp;
34f80b04 1021 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1022 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1023 int rx_pkt = 0;
1024
1025#ifdef BNX2X_STOP_ON_ERROR
1026 if (unlikely(bp->panic))
1027 return 0;
1028#endif
1029
34f80b04
EG
1030 /* CQ "next element" is of the size of the regular element,
1031 that's why it's ok here */
a2fbb9ea
ET
1032 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1033 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1034 hw_comp_cons++;
1035
1036 bd_cons = fp->rx_bd_cons;
1037 bd_prod = fp->rx_bd_prod;
34f80b04 1038 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1039 sw_comp_cons = fp->rx_comp_cons;
1040 sw_comp_prod = fp->rx_comp_prod;
1041
1042 /* Memory barrier necessary as speculative reads of the rx
1043 * buffer can be ahead of the index in the status block
1044 */
1045 rmb();
1046
1047 DP(NETIF_MSG_RX_STATUS,
1048 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1049 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1050
1051 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1052 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1053 struct sk_buff *skb;
1054 union eth_rx_cqe *cqe;
34f80b04
EG
1055 u8 cqe_fp_flags;
1056 u16 len, pad;
a2fbb9ea
ET
1057
1058 comp_ring_cons = RCQ_BD(sw_comp_cons);
1059 bd_prod = RX_BD(bd_prod);
1060 bd_cons = RX_BD(bd_cons);
1061
1062 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1063 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1064
a2fbb9ea 1065 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1066 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1067 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1068 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1069 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1070 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1071
1072 /* is this a slowpath msg? */
34f80b04 1073 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1074 bnx2x_sp_event(fp, cqe);
1075 goto next_cqe;
1076
1077 /* this is an rx packet */
1078 } else {
1079 rx_buf = &fp->rx_buf_ring[bd_cons];
1080 skb = rx_buf->skb;
a2fbb9ea
ET
1081 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1082 pad = cqe->fast_path_cqe.placement_offset;
1083
1084 pci_dma_sync_single_for_device(bp->pdev,
1085 pci_unmap_addr(rx_buf, mapping),
1086 pad + RX_COPY_THRESH,
1087 PCI_DMA_FROMDEVICE);
1088 prefetch(skb);
1089 prefetch(((char *)(skb)) + 128);
1090
1091 /* is this an error packet? */
34f80b04 1092 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea
ET
1093 /* do we sometimes forward error packets anyway? */
1094 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1095 "ERROR flags %x rx packet %u\n",
1096 cqe_fp_flags, sw_comp_cons);
a2fbb9ea
ET
1097 /* TBD make sure MC counts this as a drop */
1098 goto reuse_rx;
1099 }
1100
1101 /* Since we don't have a jumbo ring
1102 * copy small packets if mtu > 1500
1103 */
1104 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1105 (len <= RX_COPY_THRESH)) {
1106 struct sk_buff *new_skb;
1107
1108 new_skb = netdev_alloc_skb(bp->dev,
1109 len + pad);
1110 if (new_skb == NULL) {
1111 DP(NETIF_MSG_RX_ERR,
34f80b04 1112 "ERROR packet dropped "
a2fbb9ea
ET
1113 "because of alloc failure\n");
1114 /* TBD count this as a drop? */
1115 goto reuse_rx;
1116 }
1117
1118 /* aligned copy */
1119 skb_copy_from_linear_data_offset(skb, pad,
1120 new_skb->data + pad, len);
1121 skb_reserve(new_skb, pad);
1122 skb_put(new_skb, len);
1123
1124 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1125
1126 skb = new_skb;
1127
1128 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1129 pci_unmap_single(bp->pdev,
1130 pci_unmap_addr(rx_buf, mapping),
1131 bp->rx_buf_use_size,
1132 PCI_DMA_FROMDEVICE);
1133 skb_reserve(skb, pad);
1134 skb_put(skb, len);
1135
1136 } else {
1137 DP(NETIF_MSG_RX_ERR,
34f80b04 1138 "ERROR packet dropped because "
a2fbb9ea
ET
1139 "of alloc failure\n");
1140reuse_rx:
1141 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1142 goto next_rx;
1143 }
1144
1145 skb->protocol = eth_type_trans(skb, bp->dev);
1146
1147 skb->ip_summed = CHECKSUM_NONE;
1148 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1149 skb->ip_summed = CHECKSUM_UNNECESSARY;
1150
1151 /* TBD do we pass bad csum packets in promisc */
1152 }
1153
1154#ifdef BCM_VLAN
34f80b04
EG
1155 if ((bp->vlgrp != NULL) &&
1156 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1157 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1158 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1159 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1160 else
1161#endif
34f80b04 1162 netif_receive_skb(skb);
a2fbb9ea
ET
1163
1164 bp->dev->last_rx = jiffies;
1165
1166next_rx:
1167 rx_buf->skb = NULL;
1168
1169 bd_cons = NEXT_RX_IDX(bd_cons);
1170 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1171 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1172 rx_pkt++;
a2fbb9ea
ET
1173next_cqe:
1174 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1175 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1176
34f80b04 1177 if (rx_pkt == budget)
a2fbb9ea
ET
1178 break;
1179 } /* while */
1180
1181 fp->rx_bd_cons = bd_cons;
34f80b04 1182 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1183 fp->rx_comp_cons = sw_comp_cons;
1184 fp->rx_comp_prod = sw_comp_prod;
1185
1186 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04
EG
1187 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)),
1188 sw_comp_prod);
1189
a2fbb9ea
ET
1190
1191 mmiowb(); /* keep prod updates ordered */
1192
1193 fp->rx_pkt += rx_pkt;
1194 fp->rx_calls++;
1195
1196 return rx_pkt;
1197}
1198
1199static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1200{
1201 struct bnx2x_fastpath *fp = fp_cookie;
1202 struct bnx2x *bp = fp->bp;
1203 struct net_device *dev = bp->dev;
34f80b04 1204 int index = FP_IDX(fp);
a2fbb9ea 1205
34f80b04
EG
1206 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1207 index, FP_SB_ID(fp));
1208 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1209
1210#ifdef BNX2X_STOP_ON_ERROR
1211 if (unlikely(bp->panic))
1212 return IRQ_HANDLED;
1213#endif
1214
1215 prefetch(fp->rx_cons_sb);
1216 prefetch(fp->tx_cons_sb);
1217 prefetch(&fp->status_blk->c_status_block.status_block_index);
1218 prefetch(&fp->status_blk->u_status_block.status_block_index);
1219
1220 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1221
a2fbb9ea
ET
1222 return IRQ_HANDLED;
1223}
1224
1225static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1226{
1227 struct net_device *dev = dev_instance;
1228 struct bnx2x *bp = netdev_priv(dev);
1229 u16 status = bnx2x_ack_int(bp);
34f80b04 1230 u16 mask;
a2fbb9ea 1231
34f80b04 1232 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1233 if (unlikely(status == 0)) {
1234 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1235 return IRQ_NONE;
1236 }
34f80b04 1237 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1238
1239#ifdef BNX2X_STOP_ON_ERROR
1240 if (unlikely(bp->panic))
1241 return IRQ_HANDLED;
1242#endif
1243
34f80b04 1244 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1245 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1246 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1247 return IRQ_HANDLED;
1248 }
1249
34f80b04
EG
1250 mask = 0x2 << bp->fp[0].sb_id;
1251 if (status & mask) {
a2fbb9ea
ET
1252 struct bnx2x_fastpath *fp = &bp->fp[0];
1253
1254 prefetch(fp->rx_cons_sb);
1255 prefetch(fp->tx_cons_sb);
1256 prefetch(&fp->status_blk->c_status_block.status_block_index);
1257 prefetch(&fp->status_blk->u_status_block.status_block_index);
1258
1259 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1260
34f80b04 1261 status &= ~mask;
a2fbb9ea
ET
1262 }
1263
a2fbb9ea 1264
34f80b04 1265 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1266 schedule_work(&bp->sp_task);
1267
1268 status &= ~0x1;
1269 if (!status)
1270 return IRQ_HANDLED;
1271 }
1272
34f80b04
EG
1273 if (status)
1274 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1275 status);
a2fbb9ea 1276
c18487ee 1277 return IRQ_HANDLED;
a2fbb9ea
ET
1278}
1279
c18487ee 1280/* end of fast path */
a2fbb9ea 1281
a2fbb9ea 1282
c18487ee
YR
1283/* Link */
1284
1285/*
1286 * General service functions
1287 */
a2fbb9ea 1288
c18487ee
YR
1289static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1290{
1291 u32 lock_status;
1292 u32 resource_bit = (1 << resource);
34f80b04 1293 u8 port = BP_PORT(bp);
c18487ee 1294 int cnt;
a2fbb9ea 1295
c18487ee
YR
1296 /* Validating that the resource is within range */
1297 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1298 DP(NETIF_MSG_HW,
1299 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1300 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1301 return -EINVAL;
1302 }
a2fbb9ea 1303
c18487ee
YR
1304 /* Validating that the resource is not already taken */
1305 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1306 if (lock_status & resource_bit) {
1307 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1308 lock_status, resource_bit);
1309 return -EEXIST;
1310 }
a2fbb9ea 1311
c18487ee
YR
1312 /* Try for 1 second every 5ms */
1313 for (cnt = 0; cnt < 200; cnt++) {
1314 /* Try to acquire the lock */
1315 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1316 resource_bit);
1317 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1318 if (lock_status & resource_bit)
1319 return 0;
a2fbb9ea 1320
c18487ee 1321 msleep(5);
a2fbb9ea 1322 }
c18487ee
YR
1323 DP(NETIF_MSG_HW, "Timeout\n");
1324 return -EAGAIN;
1325}
a2fbb9ea 1326
c18487ee
YR
1327static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1328{
1329 u32 lock_status;
1330 u32 resource_bit = (1 << resource);
34f80b04 1331 u8 port = BP_PORT(bp);
a2fbb9ea 1332
c18487ee
YR
1333 /* Validating that the resource is within range */
1334 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1335 DP(NETIF_MSG_HW,
1336 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1337 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1338 return -EINVAL;
1339 }
1340
1341 /* Validating that the resource is currently taken */
1342 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1343 if (!(lock_status & resource_bit)) {
1344 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1345 lock_status, resource_bit);
1346 return -EFAULT;
a2fbb9ea
ET
1347 }
1348
c18487ee
YR
1349 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1350 return 0;
1351}
1352
1353/* HW Lock for shared dual port PHYs */
1354static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1355{
1356 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1357
34f80b04 1358 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1359
c18487ee
YR
1360 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1361 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1362 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1363}
a2fbb9ea 1364
c18487ee
YR
1365static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1366{
1367 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1368
c18487ee
YR
1369 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1370 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1371 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1372
34f80b04 1373 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1374}
a2fbb9ea 1375
c18487ee
YR
1376int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1377{
1378 /* The GPIO should be swapped if swap register is set and active */
1379 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1380 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1381 int gpio_shift = gpio_num +
1382 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1383 u32 gpio_mask = (1 << gpio_shift);
1384 u32 gpio_reg;
a2fbb9ea 1385
c18487ee
YR
1386 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1387 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1388 return -EINVAL;
1389 }
a2fbb9ea 1390
c18487ee
YR
1391 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1392 /* read GPIO and mask except the float bits */
1393 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1394
c18487ee
YR
1395 switch (mode) {
1396 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1397 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1398 gpio_num, gpio_shift);
1399 /* clear FLOAT and set CLR */
1400 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1401 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1402 break;
a2fbb9ea 1403
c18487ee
YR
1404 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1405 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1406 gpio_num, gpio_shift);
1407 /* clear FLOAT and set SET */
1408 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1409 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1410 break;
a2fbb9ea 1411
c18487ee
YR
1412 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1413 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1414 gpio_num, gpio_shift);
1415 /* set FLOAT */
1416 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1417 break;
a2fbb9ea 1418
c18487ee
YR
1419 default:
1420 break;
a2fbb9ea
ET
1421 }
1422
c18487ee
YR
1423 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1424 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1425
c18487ee 1426 return 0;
a2fbb9ea
ET
1427}
1428
c18487ee 1429static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1430{
c18487ee
YR
1431 u32 spio_mask = (1 << spio_num);
1432 u32 spio_reg;
a2fbb9ea 1433
c18487ee
YR
1434 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1435 (spio_num > MISC_REGISTERS_SPIO_7)) {
1436 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1437 return -EINVAL;
a2fbb9ea
ET
1438 }
1439
c18487ee
YR
1440 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1441 /* read SPIO and mask except the float bits */
1442 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1443
c18487ee
YR
1444 switch (mode) {
1445 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1446 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1447 /* clear FLOAT and set CLR */
1448 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1449 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1450 break;
a2fbb9ea 1451
c18487ee
YR
1452 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1453 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1454 /* clear FLOAT and set SET */
1455 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1456 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1457 break;
a2fbb9ea 1458
c18487ee
YR
1459 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1460 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1461 /* set FLOAT */
1462 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1463 break;
a2fbb9ea 1464
c18487ee
YR
1465 default:
1466 break;
a2fbb9ea
ET
1467 }
1468
c18487ee
YR
1469 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1470 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1471
a2fbb9ea
ET
1472 return 0;
1473}
1474
c18487ee 1475static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1476{
c18487ee
YR
1477 switch (bp->link_vars.ieee_fc) {
1478 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1479 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1480 ADVERTISED_Pause);
1481 break;
1482 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1483 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1484 ADVERTISED_Pause);
1485 break;
1486 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1487 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1488 break;
1489 default:
34f80b04 1490 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1491 ADVERTISED_Pause);
1492 break;
1493 }
1494}
f1410647 1495
c18487ee
YR
1496static void bnx2x_link_report(struct bnx2x *bp)
1497{
1498 if (bp->link_vars.link_up) {
1499 if (bp->state == BNX2X_STATE_OPEN)
1500 netif_carrier_on(bp->dev);
1501 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1502
c18487ee 1503 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1504
c18487ee
YR
1505 if (bp->link_vars.duplex == DUPLEX_FULL)
1506 printk("full duplex");
1507 else
1508 printk("half duplex");
f1410647 1509
c18487ee
YR
1510 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1511 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1512 printk(", receive ");
1513 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1514 printk("& transmit ");
1515 } else {
1516 printk(", transmit ");
1517 }
1518 printk("flow control ON");
1519 }
1520 printk("\n");
f1410647 1521
c18487ee
YR
1522 } else { /* link_down */
1523 netif_carrier_off(bp->dev);
1524 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1525 }
c18487ee
YR
1526}
1527
1528static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1529{
1530 u8 rc;
a2fbb9ea 1531
c18487ee
YR
1532 /* Initialize link parameters structure variables */
1533 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1534
c18487ee
YR
1535 bnx2x_phy_hw_lock(bp);
1536 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1537 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1538
c18487ee
YR
1539 if (bp->link_vars.link_up)
1540 bnx2x_link_report(bp);
a2fbb9ea 1541
c18487ee 1542 bnx2x_calc_fc_adv(bp);
34f80b04 1543
c18487ee 1544 return rc;
a2fbb9ea
ET
1545}
1546
c18487ee 1547static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1548{
c18487ee
YR
1549 bnx2x_phy_hw_lock(bp);
1550 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1551 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1552
c18487ee
YR
1553 bnx2x_calc_fc_adv(bp);
1554}
a2fbb9ea 1555
c18487ee
YR
1556static void bnx2x__link_reset(struct bnx2x *bp)
1557{
1558 bnx2x_phy_hw_lock(bp);
1559 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1560 bnx2x_phy_hw_unlock(bp);
1561}
a2fbb9ea 1562
c18487ee
YR
1563static u8 bnx2x_link_test(struct bnx2x *bp)
1564{
1565 u8 rc;
a2fbb9ea 1566
c18487ee
YR
1567 bnx2x_phy_hw_lock(bp);
1568 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1569 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1570
c18487ee
YR
1571 return rc;
1572}
a2fbb9ea 1573
34f80b04
EG
1574/* Calculates the sum of vn_min_rates.
1575 It's needed for further normalizing of the min_rates.
1576
1577 Returns:
1578 sum of vn_min_rates
1579 or
1580 0 - if all the min_rates are 0.
1581 In the later case fainess algorithm should be deactivated.
1582 If not all min_rates are zero then those that are zeroes will
1583 be set to 1.
1584 */
1585static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
1586{
1587 int i, port = BP_PORT(bp);
1588 u32 wsum = 0;
1589 int all_zero = 1;
1590
1591 for (i = 0; i < E1HVN_MAX; i++) {
1592 u32 vn_cfg =
1593 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
1594 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1595 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1596 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
1597 /* If min rate is zero - set it to 1 */
1598 if (!vn_min_rate)
1599 vn_min_rate = DEF_MIN_RATE;
1600 else
1601 all_zero = 0;
1602
1603 wsum += vn_min_rate;
1604 }
1605 }
1606
1607 /* ... only if all min rates are zeros - disable FAIRNESS */
1608 if (all_zero)
1609 return 0;
1610
1611 return wsum;
1612}
1613
1614static void bnx2x_init_port_minmax(struct bnx2x *bp,
1615 int en_fness,
1616 u16 port_rate,
1617 struct cmng_struct_per_port *m_cmng_port)
1618{
1619 u32 r_param = port_rate / 8;
1620 int port = BP_PORT(bp);
1621 int i;
1622
1623 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
1624
1625 /* Enable minmax only if we are in e1hmf mode */
1626 if (IS_E1HMF(bp)) {
1627 u32 fair_periodic_timeout_usec;
1628 u32 t_fair;
1629
1630 /* Enable rate shaping and fairness */
1631 m_cmng_port->flags.cmng_vn_enable = 1;
1632 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
1633 m_cmng_port->flags.rate_shaping_enable = 1;
1634
1635 if (!en_fness)
1636 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1637 " fairness will be disabled\n");
1638
1639 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1640 m_cmng_port->rs_vars.rs_periodic_timeout =
1641 RS_PERIODIC_TIMEOUT_USEC / 4;
1642
1643 /* this is the threshold below which no timer arming will occur
1644 1.25 coefficient is for the threshold to be a little bigger
1645 than the real time, to compensate for timer in-accuracy */
1646 m_cmng_port->rs_vars.rs_threshold =
1647 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1648
1649 /* resolution of fairness timer */
1650 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1651 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1652 t_fair = T_FAIR_COEF / port_rate;
1653
1654 /* this is the threshold below which we won't arm
1655 the timer anymore */
1656 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
1657
1658 /* we multiply by 1e3/8 to get bytes/msec.
1659 We don't want the credits to pass a credit
1660 of the T_FAIR*FAIR_MEM (algorithm resolution) */
1661 m_cmng_port->fair_vars.upper_bound =
1662 r_param * t_fair * FAIR_MEM;
1663 /* since each tick is 4 usec */
1664 m_cmng_port->fair_vars.fairness_timeout =
1665 fair_periodic_timeout_usec / 4;
1666
1667 } else {
1668 /* Disable rate shaping and fairness */
1669 m_cmng_port->flags.cmng_vn_enable = 0;
1670 m_cmng_port->flags.fairness_enable = 0;
1671 m_cmng_port->flags.rate_shaping_enable = 0;
1672
1673 DP(NETIF_MSG_IFUP,
1674 "Single function mode minmax will be disabled\n");
1675 }
1676
1677 /* Store it to internal memory */
1678 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1679 REG_WR(bp, BAR_XSTRORM_INTMEM +
1680 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
1681 ((u32 *)(m_cmng_port))[i]);
1682}
1683
1684static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
1685 u32 wsum, u16 port_rate,
1686 struct cmng_struct_per_port *m_cmng_port)
1687{
1688 struct rate_shaping_vars_per_vn m_rs_vn;
1689 struct fairness_vars_per_vn m_fair_vn;
1690 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1691 u16 vn_min_rate, vn_max_rate;
1692 int i;
1693
1694 /* If function is hidden - set min and max to zeroes */
1695 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1696 vn_min_rate = 0;
1697 vn_max_rate = 0;
1698
1699 } else {
1700 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1701 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1702 /* If FAIRNESS is enabled (not all min rates are zeroes) and
1703 if current min rate is zero - set it to 1.
1704 This is a requirment of the algorithm. */
1705 if ((vn_min_rate == 0) && wsum)
1706 vn_min_rate = DEF_MIN_RATE;
1707 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1708 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1709 }
1710
1711 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
1712 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
1713
1714 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1715 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1716
1717 /* global vn counter - maximal Mbps for this vn */
1718 m_rs_vn.vn_counter.rate = vn_max_rate;
1719
1720 /* quota - number of bytes transmitted in this period */
1721 m_rs_vn.vn_counter.quota =
1722 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1723
1724#ifdef BNX2X_PER_PROT_QOS
1725 /* per protocol counter */
1726 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
1727 /* maximal Mbps for this protocol */
1728 m_rs_vn.protocol_counters[protocol].rate =
1729 protocol_max_rate[protocol];
1730 /* the quota in each timer period -
1731 number of bytes transmitted in this period */
1732 m_rs_vn.protocol_counters[protocol].quota =
1733 (u32)(rs_periodic_timeout_usec *
1734 ((double)m_rs_vn.
1735 protocol_counters[protocol].rate/8));
1736 }
1737#endif
1738
1739 if (wsum) {
1740 /* credit for each period of the fairness algorithm:
1741 number of bytes in T_FAIR (the vn share the port rate).
1742 wsum should not be larger than 10000, thus
1743 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
1744 m_fair_vn.vn_credit_delta =
1745 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
1746 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
1747 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
1748 m_fair_vn.vn_credit_delta);
1749 }
1750
1751#ifdef BNX2X_PER_PROT_QOS
1752 do {
1753 u32 protocolWeightSum = 0;
1754
1755 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
1756 protocolWeightSum +=
1757 drvInit.protocol_min_rate[protocol];
1758 /* per protocol counter -
1759 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
1760 if (protocolWeightSum > 0) {
1761 for (protocol = 0;
1762 protocol < NUM_OF_PROTOCOLS; protocol++)
1763 /* credit for each period of the
1764 fairness algorithm - number of bytes in
1765 T_FAIR (the protocol share the vn rate) */
1766 m_fair_vn.protocol_credit_delta[protocol] =
1767 (u32)((vn_min_rate / 8) * t_fair *
1768 protocol_min_rate / protocolWeightSum);
1769 }
1770 } while (0);
1771#endif
1772
1773 /* Store it to internal memory */
1774 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1775 REG_WR(bp, BAR_XSTRORM_INTMEM +
1776 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1777 ((u32 *)(&m_rs_vn))[i]);
1778
1779 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1780 REG_WR(bp, BAR_XSTRORM_INTMEM +
1781 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1782 ((u32 *)(&m_fair_vn))[i]);
1783}
1784
c18487ee
YR
1785/* This function is called upon link interrupt */
1786static void bnx2x_link_attn(struct bnx2x *bp)
1787{
34f80b04
EG
1788 int vn;
1789
c18487ee
YR
1790 bnx2x_phy_hw_lock(bp);
1791 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1792 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1793
c18487ee
YR
1794 /* indicate link status */
1795 bnx2x_link_report(bp);
34f80b04
EG
1796
1797 if (IS_E1HMF(bp)) {
1798 int func;
1799
1800 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1801 if (vn == BP_E1HVN(bp))
1802 continue;
1803
1804 func = ((vn << 1) | BP_PORT(bp));
1805
1806 /* Set the attention towards other drivers
1807 on the same port */
1808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1809 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1810 }
1811 }
1812
1813 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
1814 struct cmng_struct_per_port m_cmng_port;
1815 u32 wsum;
1816 int port = BP_PORT(bp);
1817
1818 /* Init RATE SHAPING and FAIRNESS contexts */
1819 wsum = bnx2x_calc_vn_wsum(bp);
1820 bnx2x_init_port_minmax(bp, (int)wsum,
1821 bp->link_vars.line_speed,
1822 &m_cmng_port);
1823 if (IS_E1HMF(bp))
1824 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1825 bnx2x_init_vn_minmax(bp, 2*vn + port,
1826 wsum, bp->link_vars.line_speed,
1827 &m_cmng_port);
1828 }
c18487ee 1829}
a2fbb9ea 1830
c18487ee
YR
1831static void bnx2x__link_status_update(struct bnx2x *bp)
1832{
1833 if (bp->state != BNX2X_STATE_OPEN)
1834 return;
a2fbb9ea 1835
c18487ee 1836 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1837
c18487ee
YR
1838 /* indicate link status */
1839 bnx2x_link_report(bp);
a2fbb9ea 1840}
a2fbb9ea 1841
34f80b04
EG
1842static void bnx2x_pmf_update(struct bnx2x *bp)
1843{
1844 int port = BP_PORT(bp);
1845 u32 val;
1846
1847 bp->port.pmf = 1;
1848 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1849
1850 /* enable nig attention */
1851 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1852 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1853 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1854}
1855
c18487ee 1856/* end of Link */
a2fbb9ea
ET
1857
1858/* slow path */
1859
1860/*
1861 * General service functions
1862 */
1863
1864/* the slow path queue is odd since completions arrive on the fastpath ring */
1865static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1866 u32 data_hi, u32 data_lo, int common)
1867{
34f80b04 1868 int func = BP_FUNC(bp);
a2fbb9ea 1869
34f80b04
EG
1870 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1871 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
1872 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
1873 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1874 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1875
1876#ifdef BNX2X_STOP_ON_ERROR
1877 if (unlikely(bp->panic))
1878 return -EIO;
1879#endif
1880
34f80b04 1881 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
1882
1883 if (!bp->spq_left) {
1884 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 1885 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1886 bnx2x_panic();
1887 return -EBUSY;
1888 }
f1410647 1889
a2fbb9ea
ET
1890 /* CID needs port number to be encoded int it */
1891 bp->spq_prod_bd->hdr.conn_and_cmd_data =
1892 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
1893 HW_CID(bp, cid)));
1894 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1895 if (common)
1896 bp->spq_prod_bd->hdr.type |=
1897 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1898
1899 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1900 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1901
1902 bp->spq_left--;
1903
1904 if (bp->spq_prod_bd == bp->spq_last_bd) {
1905 bp->spq_prod_bd = bp->spq;
1906 bp->spq_prod_idx = 0;
1907 DP(NETIF_MSG_TIMER, "end of spq\n");
1908
1909 } else {
1910 bp->spq_prod_bd++;
1911 bp->spq_prod_idx++;
1912 }
1913
34f80b04 1914 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
1915 bp->spq_prod_idx);
1916
34f80b04 1917 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1918 return 0;
1919}
1920
1921/* acquire split MCP access lock register */
1922static int bnx2x_lock_alr(struct bnx2x *bp)
1923{
a2fbb9ea 1924 u32 i, j, val;
34f80b04 1925 int rc = 0;
a2fbb9ea
ET
1926
1927 might_sleep();
1928 i = 100;
1929 for (j = 0; j < i*10; j++) {
1930 val = (1UL << 31);
1931 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1932 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1933 if (val & (1L << 31))
1934 break;
1935
1936 msleep(5);
1937 }
a2fbb9ea
ET
1938 if (!(val & (1L << 31))) {
1939 BNX2X_ERR("Cannot acquire nvram interface\n");
a2fbb9ea
ET
1940 rc = -EBUSY;
1941 }
1942
1943 return rc;
1944}
1945
1946/* Release split MCP access lock register */
1947static void bnx2x_unlock_alr(struct bnx2x *bp)
1948{
1949 u32 val = 0;
1950
1951 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1952}
1953
1954static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1955{
1956 struct host_def_status_block *def_sb = bp->def_status_blk;
1957 u16 rc = 0;
1958
1959 barrier(); /* status block is written to by the chip */
1960
1961 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1962 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1963 rc |= 1;
1964 }
1965 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1966 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1967 rc |= 2;
1968 }
1969 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1970 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1971 rc |= 4;
1972 }
1973 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1974 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1975 rc |= 8;
1976 }
1977 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1978 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1979 rc |= 16;
1980 }
1981 return rc;
1982}
1983
1984/*
1985 * slow path service functions
1986 */
1987
1988static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1989{
34f80b04
EG
1990 int port = BP_PORT(bp);
1991 int func = BP_FUNC(bp);
1992 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
a2fbb9ea
ET
1993 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1994 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
1995 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1996 NIG_REG_MASK_INTERRUPT_PORT0;
a2fbb9ea
ET
1997
1998 if (~bp->aeu_mask & (asserted & 0xff))
1999 BNX2X_ERR("IGU ERROR\n");
2000 if (bp->attn_state & asserted)
2001 BNX2X_ERR("IGU ERROR\n");
2002
2003 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2004 bp->aeu_mask, asserted);
2005 bp->aeu_mask &= ~(asserted & 0xff);
2006 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2007
2008 REG_WR(bp, aeu_addr, bp->aeu_mask);
2009
2010 bp->attn_state |= asserted;
2011
2012 if (asserted & ATTN_HARD_WIRED_MASK) {
2013 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2014
877e9aa4
ET
2015 /* save nig interrupt mask */
2016 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2017 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2018
c18487ee 2019 bnx2x_link_attn(bp);
a2fbb9ea
ET
2020
2021 /* handle unicore attn? */
2022 }
2023 if (asserted & ATTN_SW_TIMER_4_FUNC)
2024 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2025
2026 if (asserted & GPIO_2_FUNC)
2027 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2028
2029 if (asserted & GPIO_3_FUNC)
2030 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2031
2032 if (asserted & GPIO_4_FUNC)
2033 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2034
2035 if (port == 0) {
2036 if (asserted & ATTN_GENERAL_ATTN_1) {
2037 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2038 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2039 }
2040 if (asserted & ATTN_GENERAL_ATTN_2) {
2041 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2042 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2043 }
2044 if (asserted & ATTN_GENERAL_ATTN_3) {
2045 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2046 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2047 }
2048 } else {
2049 if (asserted & ATTN_GENERAL_ATTN_4) {
2050 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2051 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2052 }
2053 if (asserted & ATTN_GENERAL_ATTN_5) {
2054 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2055 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2056 }
2057 if (asserted & ATTN_GENERAL_ATTN_6) {
2058 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2059 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2060 }
2061 }
2062
2063 } /* if hardwired */
2064
2065 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2066 asserted, BAR_IGU_INTMEM + igu_addr);
2067 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2068
2069 /* now set back the mask */
2070 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2071 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2072}
2073
877e9aa4 2074static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2075{
34f80b04 2076 int port = BP_PORT(bp);
877e9aa4
ET
2077 int reg_offset;
2078 u32 val;
2079
34f80b04
EG
2080 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2081 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2082
34f80b04 2083 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2084
2085 val = REG_RD(bp, reg_offset);
2086 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2087 REG_WR(bp, reg_offset, val);
2088
2089 BNX2X_ERR("SPIO5 hw attention\n");
2090
34f80b04 2091 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2092 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2093 /* Fan failure attention */
2094
2095 /* The PHY reset is controled by GPIO 1 */
2096 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2097 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2098 /* Low power mode is controled by GPIO 2 */
2099 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2100 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2101 /* mark the failure */
c18487ee 2102 bp->link_params.ext_phy_config &=
877e9aa4 2103 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2104 bp->link_params.ext_phy_config |=
877e9aa4
ET
2105 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2106 SHMEM_WR(bp,
2107 dev_info.port_hw_config[port].
2108 external_phy_config,
c18487ee 2109 bp->link_params.ext_phy_config);
877e9aa4
ET
2110 /* log the failure */
2111 printk(KERN_ERR PFX "Fan Failure on Network"
2112 " Controller %s has caused the driver to"
2113 " shutdown the card to prevent permanent"
2114 " damage. Please contact Dell Support for"
2115 " assistance\n", bp->dev->name);
2116 break;
2117
2118 default:
2119 break;
2120 }
2121 }
34f80b04
EG
2122
2123 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2124
2125 val = REG_RD(bp, reg_offset);
2126 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2127 REG_WR(bp, reg_offset, val);
2128
2129 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2130 (attn & HW_INTERRUT_ASSERT_SET_0));
2131 bnx2x_panic();
2132 }
877e9aa4
ET
2133}
2134
2135static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2136{
2137 u32 val;
2138
2139 if (attn & BNX2X_DOORQ_ASSERT) {
2140
2141 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2142 BNX2X_ERR("DB hw attention 0x%x\n", val);
2143 /* DORQ discard attention */
2144 if (val & 0x2)
2145 BNX2X_ERR("FATAL error from DORQ\n");
2146 }
34f80b04
EG
2147
2148 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2149
2150 int port = BP_PORT(bp);
2151 int reg_offset;
2152
2153 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2154 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2155
2156 val = REG_RD(bp, reg_offset);
2157 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2158 REG_WR(bp, reg_offset, val);
2159
2160 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2161 (attn & HW_INTERRUT_ASSERT_SET_1));
2162 bnx2x_panic();
2163 }
877e9aa4
ET
2164}
2165
2166static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2167{
2168 u32 val;
2169
2170 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2171
2172 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2173 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2174 /* CFC error attention */
2175 if (val & 0x2)
2176 BNX2X_ERR("FATAL error from CFC\n");
2177 }
2178
2179 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2180
2181 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2182 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2183 /* RQ_USDMDP_FIFO_OVERFLOW */
2184 if (val & 0x18000)
2185 BNX2X_ERR("FATAL error from PXP\n");
2186 }
34f80b04
EG
2187
2188 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2189
2190 int port = BP_PORT(bp);
2191 int reg_offset;
2192
2193 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2194 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2195
2196 val = REG_RD(bp, reg_offset);
2197 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2198 REG_WR(bp, reg_offset, val);
2199
2200 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2201 (attn & HW_INTERRUT_ASSERT_SET_2));
2202 bnx2x_panic();
2203 }
877e9aa4
ET
2204}
2205
2206static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2207{
34f80b04
EG
2208 u32 val;
2209
877e9aa4
ET
2210 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2211
34f80b04
EG
2212 if (attn & BNX2X_PMF_LINK_ASSERT) {
2213 int func = BP_FUNC(bp);
2214
2215 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2216 bnx2x__link_status_update(bp);
2217 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2218 DRV_STATUS_PMF)
2219 bnx2x_pmf_update(bp);
2220
2221 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2222
2223 BNX2X_ERR("MC assert!\n");
2224 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2225 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2226 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2227 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2228 bnx2x_panic();
2229
2230 } else if (attn & BNX2X_MCP_ASSERT) {
2231
2232 BNX2X_ERR("MCP assert!\n");
2233 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2234 bnx2x_fw_dump(bp);
877e9aa4
ET
2235
2236 } else
2237 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2238 }
2239
2240 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2241 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2242 if (attn & BNX2X_GRC_TIMEOUT) {
2243 val = CHIP_IS_E1H(bp) ?
2244 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2245 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2246 }
2247 if (attn & BNX2X_GRC_RSV) {
2248 val = CHIP_IS_E1H(bp) ?
2249 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2250 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2251 }
877e9aa4 2252 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2253 }
2254}
2255
2256static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2257{
a2fbb9ea
ET
2258 struct attn_route attn;
2259 struct attn_route group_mask;
34f80b04 2260 int port = BP_PORT(bp);
877e9aa4 2261 int index;
a2fbb9ea
ET
2262 u32 reg_addr;
2263 u32 val;
2264
2265 /* need to take HW lock because MCP or other port might also
2266 try to handle this event */
2267 bnx2x_lock_alr(bp);
2268
2269 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2270 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2271 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2272 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2273 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2274 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2275
2276 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2277 if (deasserted & (1 << index)) {
2278 group_mask = bp->attn_group[index];
2279
34f80b04
EG
2280 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2281 index, group_mask.sig[0], group_mask.sig[1],
2282 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2283
877e9aa4
ET
2284 bnx2x_attn_int_deasserted3(bp,
2285 attn.sig[3] & group_mask.sig[3]);
2286 bnx2x_attn_int_deasserted1(bp,
2287 attn.sig[1] & group_mask.sig[1]);
2288 bnx2x_attn_int_deasserted2(bp,
2289 attn.sig[2] & group_mask.sig[2]);
2290 bnx2x_attn_int_deasserted0(bp,
2291 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2292
a2fbb9ea
ET
2293 if ((attn.sig[0] & group_mask.sig[0] &
2294 HW_PRTY_ASSERT_SET_0) ||
2295 (attn.sig[1] & group_mask.sig[1] &
2296 HW_PRTY_ASSERT_SET_1) ||
2297 (attn.sig[2] & group_mask.sig[2] &
2298 HW_PRTY_ASSERT_SET_2))
877e9aa4 2299 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2300 }
2301 }
2302
2303 bnx2x_unlock_alr(bp);
2304
34f80b04 2305 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
2306
2307 val = ~deasserted;
34f80b04 2308/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
a2fbb9ea
ET
2309 val, BAR_IGU_INTMEM + reg_addr); */
2310 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2311
2312 if (bp->aeu_mask & (deasserted & 0xff))
34f80b04 2313 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea 2314 if (~bp->attn_state & deasserted)
34f80b04 2315 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea
ET
2316
2317 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2318 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2319
2320 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2321 bp->aeu_mask |= (deasserted & 0xff);
2322
2323 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2324 REG_WR(bp, reg_addr, bp->aeu_mask);
2325
2326 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2327 bp->attn_state &= ~deasserted;
2328 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2329}
2330
2331static void bnx2x_attn_int(struct bnx2x *bp)
2332{
2333 /* read local copy of bits */
2334 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2335 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2336 u32 attn_state = bp->attn_state;
2337
2338 /* look for changed bits */
2339 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2340 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2341
2342 DP(NETIF_MSG_HW,
2343 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2344 attn_bits, attn_ack, asserted, deasserted);
2345
2346 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2347 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2348
2349 /* handle bits that were raised */
2350 if (asserted)
2351 bnx2x_attn_int_asserted(bp, asserted);
2352
2353 if (deasserted)
2354 bnx2x_attn_int_deasserted(bp, deasserted);
2355}
2356
2357static void bnx2x_sp_task(struct work_struct *work)
2358{
2359 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2360 u16 status;
2361
34f80b04 2362
a2fbb9ea
ET
2363 /* Return here if interrupt is disabled */
2364 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2365 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2366 return;
2367 }
2368
2369 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2370/* if (status == 0) */
2371/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2372
34f80b04 2373 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2374
877e9aa4
ET
2375 /* HW attentions */
2376 if (status & 0x1)
a2fbb9ea 2377 bnx2x_attn_int(bp);
a2fbb9ea 2378
a2fbb9ea
ET
2379 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2380 IGU_INT_NOP, 1);
2381 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2382 IGU_INT_NOP, 1);
2383 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2384 IGU_INT_NOP, 1);
2385 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2386 IGU_INT_NOP, 1);
2387 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2388 IGU_INT_ENABLE, 1);
877e9aa4 2389
a2fbb9ea
ET
2390}
2391
2392static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2393{
2394 struct net_device *dev = dev_instance;
2395 struct bnx2x *bp = netdev_priv(dev);
2396
2397 /* Return here if interrupt is disabled */
2398 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2399 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2400 return IRQ_HANDLED;
2401 }
2402
877e9aa4 2403 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2404
2405#ifdef BNX2X_STOP_ON_ERROR
2406 if (unlikely(bp->panic))
2407 return IRQ_HANDLED;
2408#endif
2409
2410 schedule_work(&bp->sp_task);
2411
2412 return IRQ_HANDLED;
2413}
2414
2415/* end of slow path */
2416
2417/* Statistics */
2418
2419/****************************************************************************
2420* Macros
2421****************************************************************************/
2422
2423#define UPDATE_STAT(s, t) \
2424 do { \
2425 estats->t += new->s - old->s; \
2426 old->s = new->s; \
2427 } while (0)
2428
2429/* sum[hi:lo] += add[hi:lo] */
2430#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2431 do { \
2432 s_lo += a_lo; \
2433 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2434 } while (0)
2435
2436/* difference = minuend - subtrahend */
2437#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2438 do { \
2439 if (m_lo < s_lo) { /* underflow */ \
2440 d_hi = m_hi - s_hi; \
2441 if (d_hi > 0) { /* we can 'loan' 1 */ \
2442 d_hi--; \
2443 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2444 } else { /* m_hi <= s_hi */ \
2445 d_hi = 0; \
2446 d_lo = 0; \
2447 } \
2448 } else { /* m_lo >= s_lo */ \
2449 if (m_hi < s_hi) { \
2450 d_hi = 0; \
2451 d_lo = 0; \
2452 } else { /* m_hi >= s_hi */ \
2453 d_hi = m_hi - s_hi; \
2454 d_lo = m_lo - s_lo; \
2455 } \
2456 } \
2457 } while (0)
2458
2459/* minuend -= subtrahend */
2460#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2461 do { \
2462 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2463 } while (0)
2464
2465#define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
2466 do { \
2467 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
2468 diff.lo, new->s_lo, old->s_lo); \
2469 old->s_hi = new->s_hi; \
2470 old->s_lo = new->s_lo; \
2471 ADD_64(estats->t_hi, diff.hi, \
2472 estats->t_lo, diff.lo); \
2473 } while (0)
2474
2475/* sum[hi:lo] += add */
2476#define ADD_EXTEND_64(s_hi, s_lo, a) \
2477 do { \
2478 s_lo += a; \
2479 s_hi += (s_lo < a) ? 1 : 0; \
2480 } while (0)
2481
2482#define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
2483 do { \
2484 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
2485 } while (0)
2486
2487#define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
2488 do { \
2489 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2490 old_tclient->s = le32_to_cpu(tclient->s); \
2491 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
2492 } while (0)
2493
2494/*
2495 * General service functions
2496 */
2497
2498static inline long bnx2x_hilo(u32 *hiref)
2499{
2500 u32 lo = *(hiref + 1);
2501#if (BITS_PER_LONG == 64)
2502 u32 hi = *hiref;
2503
2504 return HILO_U64(hi, lo);
2505#else
2506 return lo;
2507#endif
2508}
2509
2510/*
2511 * Init service functions
2512 */
2513
2514static void bnx2x_init_mac_stats(struct bnx2x *bp)
2515{
2516 struct dmae_command *dmae;
34f80b04 2517 int port = BP_PORT(bp);
a2fbb9ea
ET
2518 int loader_idx = port * 8;
2519 u32 opcode;
2520 u32 mac_addr;
2521
2522 bp->executer_idx = 0;
34f80b04 2523 if (bp->func_stx) {
a2fbb9ea
ET
2524 /* MCP */
2525 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2526 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2527#ifdef __BIG_ENDIAN
2528 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2529#else
2530 DMAE_CMD_ENDIANITY_DW_SWAP |
2531#endif
2532 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2533
c18487ee 2534 if (bp->link_vars.link_up)
a2fbb9ea
ET
2535 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
2536
2537 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2538 dmae->opcode = opcode;
2539 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
2540 sizeof(u32));
2541 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
2542 sizeof(u32));
34f80b04 2543 dmae->dst_addr_lo = bp->func_stx >> 2;
a2fbb9ea
ET
2544 dmae->dst_addr_hi = 0;
2545 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
2546 sizeof(u32)) >> 2;
c18487ee 2547 if (bp->link_vars.link_up) {
a2fbb9ea
ET
2548 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2549 dmae->comp_addr_hi = 0;
2550 dmae->comp_val = 1;
2551 } else {
2552 dmae->comp_addr_lo = 0;
2553 dmae->comp_addr_hi = 0;
2554 dmae->comp_val = 0;
2555 }
2556 }
2557
c18487ee 2558 if (!bp->link_vars.link_up) {
a2fbb9ea
ET
2559 /* no need to collect statistics in link down */
2560 return;
2561 }
2562
2563 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2564 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2565 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2566#ifdef __BIG_ENDIAN
2567 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2568#else
2569 DMAE_CMD_ENDIANITY_DW_SWAP |
2570#endif
2571 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2572
c18487ee 2573 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
2574
2575 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
2576 NIG_REG_INGRESS_BMAC0_MEM);
2577
2578 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
2579 BIGMAC_REGISTER_TX_STAT_GTBYT */
2580 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2581 dmae->opcode = opcode;
2582 dmae->src_addr_lo = (mac_addr +
2583 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
2584 dmae->src_addr_hi = 0;
2585 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
2586 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
2587 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
2588 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
2589 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2590 dmae->comp_addr_hi = 0;
2591 dmae->comp_val = 1;
2592
2593 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
2594 BIGMAC_REGISTER_RX_STAT_GRIPJ */
2595 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2596 dmae->opcode = opcode;
2597 dmae->src_addr_lo = (mac_addr +
2598 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
2599 dmae->src_addr_hi = 0;
2600 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2601 offsetof(struct bmac_stats, rx_gr64));
2602 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2603 offsetof(struct bmac_stats, rx_gr64));
2604 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
2605 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
2606 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2607 dmae->comp_addr_hi = 0;
2608 dmae->comp_val = 1;
2609
c18487ee 2610 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
2611
2612 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
2613
2614 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
2615 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2616 dmae->opcode = opcode;
2617 dmae->src_addr_lo = (mac_addr +
2618 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
2619 dmae->src_addr_hi = 0;
2620 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
2621 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
2622 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
2623 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2624 dmae->comp_addr_hi = 0;
2625 dmae->comp_val = 1;
2626
2627 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
2628 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2629 dmae->opcode = opcode;
2630 dmae->src_addr_lo = (mac_addr +
2631 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
2632 dmae->src_addr_hi = 0;
2633 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2634 offsetof(struct emac_stats,
2635 rx_falsecarriererrors));
2636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2637 offsetof(struct emac_stats,
2638 rx_falsecarriererrors));
2639 dmae->len = 1;
2640 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2641 dmae->comp_addr_hi = 0;
2642 dmae->comp_val = 1;
2643
2644 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
2645 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2646 dmae->opcode = opcode;
2647 dmae->src_addr_lo = (mac_addr +
2648 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
2649 dmae->src_addr_hi = 0;
2650 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2651 offsetof(struct emac_stats,
2652 tx_ifhcoutoctets));
2653 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2654 offsetof(struct emac_stats,
2655 tx_ifhcoutoctets));
2656 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
2657 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2658 dmae->comp_addr_hi = 0;
2659 dmae->comp_val = 1;
2660 }
2661
2662 /* NIG */
2663 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2664 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2665 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
2666 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2667#ifdef __BIG_ENDIAN
2668 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2669#else
2670 DMAE_CMD_ENDIANITY_DW_SWAP |
2671#endif
2672 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2673 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
2674 NIG_REG_STAT0_BRB_DISCARD) >> 2;
2675 dmae->src_addr_hi = 0;
2676 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
2677 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
2678 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
2679 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
2680 offsetof(struct nig_stats, done));
2681 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
2682 offsetof(struct nig_stats, done));
2683 dmae->comp_val = 0xffffffff;
2684}
2685
2686static void bnx2x_init_stats(struct bnx2x *bp)
2687{
34f80b04 2688 int port = BP_PORT(bp);
a2fbb9ea
ET
2689
2690 bp->stats_state = STATS_STATE_DISABLE;
2691 bp->executer_idx = 0;
2692
2693 bp->old_brb_discard = REG_RD(bp,
2694 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
2695
2696 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2697 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
2698 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
2699
2700 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
2701 REG_WR(bp, BAR_XSTRORM_INTMEM +
2702 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2703
2704 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
2705 REG_WR(bp, BAR_TSTRORM_INTMEM +
2706 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2707
2708 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
2709 REG_WR(bp, BAR_CSTRORM_INTMEM +
2710 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2711
2712 REG_WR(bp, BAR_XSTRORM_INTMEM +
2713 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
2714 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2715 REG_WR(bp, BAR_XSTRORM_INTMEM +
2716 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
2717 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2718
2719 REG_WR(bp, BAR_TSTRORM_INTMEM +
2720 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
2721 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2722 REG_WR(bp, BAR_TSTRORM_INTMEM +
2723 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
2724 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2725}
2726
2727static void bnx2x_stop_stats(struct bnx2x *bp)
2728{
2729 might_sleep();
2730 if (bp->stats_state != STATS_STATE_DISABLE) {
2731 int timeout = 10;
2732
2733 bp->stats_state = STATS_STATE_STOP;
2734 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2735
2736 while (bp->stats_state != STATS_STATE_DISABLE) {
2737 if (!timeout) {
c14423fe 2738 BNX2X_ERR("timeout waiting for stats stop\n");
a2fbb9ea
ET
2739 break;
2740 }
2741 timeout--;
2742 msleep(100);
2743 }
2744 }
2745 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
2746}
2747
2748/*
2749 * Statistics service functions
2750 */
2751
2752static void bnx2x_update_bmac_stats(struct bnx2x *bp)
2753{
2754 struct regp diff;
2755 struct regp sum;
2756 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
2757 struct bmac_stats *old = &bp->old_bmac;
2758 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2759
2760 sum.hi = 0;
2761 sum.lo = 0;
2762
2763 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
2764 tx_gtbyt.lo, total_bytes_transmitted_lo);
2765
2766 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
2767 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
2768 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
2769
2770 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
2771 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
2772 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
2773
2774 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
2775 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
2776 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
2777 estats->total_unicast_packets_transmitted_lo, sum.lo);
2778
2779 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
2780 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
2781 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
2782 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
2783 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
2784 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
2785 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
2786 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
2787 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
2788 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
2789 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
2790
2791 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
2792 UPDATE_STAT(rx_grund.lo, runt_packets_received);
2793 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
2794 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
2795 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
2796 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
2797 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
2798 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
2799
2800 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
2801 rx_grerb.lo, stat_IfHCInBadOctets_lo);
2802 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
2803 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
2804 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
2805 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
2806 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
2807}
2808
2809static void bnx2x_update_emac_stats(struct bnx2x *bp)
2810{
2811 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
2812 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2813
2814 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
2815 total_bytes_transmitted_lo);
2816 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
2817 total_unicast_packets_transmitted_hi,
2818 total_unicast_packets_transmitted_lo);
2819 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
2820 total_multicast_packets_transmitted_hi,
2821 total_multicast_packets_transmitted_lo);
2822 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
2823 total_broadcast_packets_transmitted_hi,
2824 total_broadcast_packets_transmitted_lo);
2825
2826 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
2827 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
2828 estats->single_collision_transmit_frames +=
2829 new->tx_dot3statssinglecollisionframes;
2830 estats->multiple_collision_transmit_frames +=
2831 new->tx_dot3statsmultiplecollisionframes;
2832 estats->late_collision_frames += new->tx_dot3statslatecollisions;
2833 estats->excessive_collision_frames +=
2834 new->tx_dot3statsexcessivecollisions;
2835 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
2836 estats->frames_transmitted_65_127_bytes +=
2837 new->tx_etherstatspkts65octetsto127octets;
2838 estats->frames_transmitted_128_255_bytes +=
2839 new->tx_etherstatspkts128octetsto255octets;
2840 estats->frames_transmitted_256_511_bytes +=
2841 new->tx_etherstatspkts256octetsto511octets;
2842 estats->frames_transmitted_512_1023_bytes +=
2843 new->tx_etherstatspkts512octetsto1023octets;
2844 estats->frames_transmitted_1024_1522_bytes +=
2845 new->tx_etherstatspkts1024octetsto1522octet;
2846 estats->frames_transmitted_1523_9022_bytes +=
2847 new->tx_etherstatspktsover1522octets;
2848
2849 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
2850 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
2851 estats->false_carrier_detections += new->rx_falsecarriererrors;
2852 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
2853 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
2854 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
2855 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
2856 estats->control_frames_received += new->rx_maccontrolframesreceived;
2857 estats->error_runt_packets_received += new->rx_etherstatsfragments;
2858 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
2859
2860 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
2861 stat_IfHCInBadOctets_lo);
2862 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
2863 stat_IfHCOutBadOctets_lo);
2864 estats->stat_Dot3statsInternalMacTransmitErrors +=
2865 new->tx_dot3statsinternalmactransmiterrors;
2866 estats->stat_Dot3StatsCarrierSenseErrors +=
2867 new->rx_dot3statscarriersenseerrors;
2868 estats->stat_Dot3StatsDeferredTransmissions +=
2869 new->tx_dot3statsdeferredtransmissions;
2870 estats->stat_FlowControlDone += new->tx_flowcontroldone;
2871 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
2872}
2873
2874static int bnx2x_update_storm_stats(struct bnx2x *bp)
2875{
2876 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
2877 struct tstorm_common_stats *tstats = &stats->tstorm_common;
2878 struct tstorm_per_client_stats *tclient =
2879 &tstats->client_statistics[0];
2880 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
2881 struct xstorm_common_stats *xstats = &stats->xstorm_common;
2882 struct nig_stats *nstats = bnx2x_sp(bp, nig);
2883 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2884 u32 diff;
2885
2886 /* are DMAE stats valid? */
2887 if (nstats->done != 0xffffffff) {
2888 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
2889 return -1;
2890 }
2891
2892 /* are storm stats valid? */
2893 if (tstats->done.hi != 0xffffffff) {
2894 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
2895 return -2;
2896 }
2897 if (xstats->done.hi != 0xffffffff) {
2898 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
2899 return -3;
2900 }
2901
2902 estats->total_bytes_received_hi =
2903 estats->valid_bytes_received_hi =
2904 le32_to_cpu(tclient->total_rcv_bytes.hi);
2905 estats->total_bytes_received_lo =
2906 estats->valid_bytes_received_lo =
2907 le32_to_cpu(tclient->total_rcv_bytes.lo);
2908 ADD_64(estats->total_bytes_received_hi,
2909 le32_to_cpu(tclient->rcv_error_bytes.hi),
2910 estats->total_bytes_received_lo,
2911 le32_to_cpu(tclient->rcv_error_bytes.lo));
2912
2913 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
2914 total_unicast_packets_received_hi,
2915 total_unicast_packets_received_lo);
2916 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
2917 total_multicast_packets_received_hi,
2918 total_multicast_packets_received_lo);
2919 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
2920 total_broadcast_packets_received_hi,
2921 total_broadcast_packets_received_lo);
2922
2923 estats->frames_received_64_bytes = MAC_STX_NA;
2924 estats->frames_received_65_127_bytes = MAC_STX_NA;
2925 estats->frames_received_128_255_bytes = MAC_STX_NA;
2926 estats->frames_received_256_511_bytes = MAC_STX_NA;
2927 estats->frames_received_512_1023_bytes = MAC_STX_NA;
2928 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
2929 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
2930
2931 estats->x_total_sent_bytes_hi =
2932 le32_to_cpu(xstats->total_sent_bytes.hi);
2933 estats->x_total_sent_bytes_lo =
2934 le32_to_cpu(xstats->total_sent_bytes.lo);
2935 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
2936
2937 estats->t_rcv_unicast_bytes_hi =
2938 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
2939 estats->t_rcv_unicast_bytes_lo =
2940 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
2941 estats->t_rcv_broadcast_bytes_hi =
2942 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
2943 estats->t_rcv_broadcast_bytes_lo =
2944 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
2945 estats->t_rcv_multicast_bytes_hi =
2946 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
2947 estats->t_rcv_multicast_bytes_lo =
2948 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
2949 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
2950
2951 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
2952 estats->packets_too_big_discard =
2953 le32_to_cpu(tclient->packets_too_big_discard);
2954 estats->jabber_packets_received = estats->packets_too_big_discard +
2955 estats->stat_Dot3statsFramesTooLong;
2956 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
2957 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
2958 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
2959 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
2960 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
2961 estats->brb_truncate_discard =
2962 le32_to_cpu(tstats->brb_truncate_discard);
2963
2964 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
2965 bp->old_brb_discard = nstats->brb_discard;
2966
2967 estats->brb_packet = nstats->brb_packet;
2968 estats->brb_truncate = nstats->brb_truncate;
2969 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
2970 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
2971 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
2972 estats->mng_discard = nstats->mng_discard;
2973 estats->mng_octet_inp = nstats->mng_octet_inp;
2974 estats->mng_octet_out = nstats->mng_octet_out;
2975 estats->mng_packet_inp = nstats->mng_packet_inp;
2976 estats->mng_packet_out = nstats->mng_packet_out;
2977 estats->pbf_octets = nstats->pbf_octets;
2978 estats->pbf_packet = nstats->pbf_packet;
2979 estats->safc_inp = nstats->safc_inp;
2980
2981 xstats->done.hi = 0;
2982 tstats->done.hi = 0;
2983 nstats->done = 0;
2984
2985 return 0;
2986}
2987
2988static void bnx2x_update_net_stats(struct bnx2x *bp)
2989{
2990 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2991 struct net_device_stats *nstats = &bp->dev->stats;
2992
2993 nstats->rx_packets =
2994 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
2995 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
2996 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
2997
2998 nstats->tx_packets =
2999 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3000 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3001 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3002
3003 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3004
0e39e645 3005 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3006
0e39e645 3007 nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
a2fbb9ea
ET
3008 nstats->tx_dropped = 0;
3009
3010 nstats->multicast =
3011 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3012
0e39e645
ET
3013 nstats->collisions = estats->single_collision_transmit_frames +
3014 estats->multiple_collision_transmit_frames +
3015 estats->late_collision_frames +
3016 estats->excessive_collision_frames;
a2fbb9ea
ET
3017
3018 nstats->rx_length_errors = estats->runt_packets_received +
3019 estats->jabber_packets_received;
0e39e645
ET
3020 nstats->rx_over_errors = estats->brb_discard +
3021 estats->brb_truncate_discard;
a2fbb9ea
ET
3022 nstats->rx_crc_errors = estats->crc_receive_errors;
3023 nstats->rx_frame_errors = estats->alignment_errors;
0e39e645 3024 nstats->rx_fifo_errors = estats->no_buff_discard;
a2fbb9ea
ET
3025 nstats->rx_missed_errors = estats->xxoverflow_discard;
3026
3027 nstats->rx_errors = nstats->rx_length_errors +
3028 nstats->rx_over_errors +
3029 nstats->rx_crc_errors +
3030 nstats->rx_frame_errors +
0e39e645
ET
3031 nstats->rx_fifo_errors +
3032 nstats->rx_missed_errors;
a2fbb9ea
ET
3033
3034 nstats->tx_aborted_errors = estats->late_collision_frames +
0e39e645 3035 estats->excessive_collision_frames;
a2fbb9ea
ET
3036 nstats->tx_carrier_errors = estats->false_carrier_detections;
3037 nstats->tx_fifo_errors = 0;
3038 nstats->tx_heartbeat_errors = 0;
3039 nstats->tx_window_errors = 0;
3040
3041 nstats->tx_errors = nstats->tx_aborted_errors +
3042 nstats->tx_carrier_errors;
3043
3044 estats->mac_stx_start = ++estats->mac_stx_end;
3045}
3046
3047static void bnx2x_update_stats(struct bnx2x *bp)
3048{
a2fbb9ea
ET
3049 if (!bnx2x_update_storm_stats(bp)) {
3050
c18487ee 3051 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3052 bnx2x_update_bmac_stats(bp);
3053
c18487ee 3054 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3055 bnx2x_update_emac_stats(bp);
3056
3057 } else { /* unreached */
3058 BNX2X_ERR("no MAC active\n");
3059 return;
3060 }
3061
3062 bnx2x_update_net_stats(bp);
3063 }
3064
3065 if (bp->msglevel & NETIF_MSG_TIMER) {
3066 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
3067 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3068 int i;
a2fbb9ea
ET
3069
3070 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3071 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3072 " tx pkt (%lx)\n",
3073 bnx2x_tx_avail(bp->fp),
3074 *bp->fp->tx_cons_sb, nstats->tx_packets);
3075 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3076 " rx pkt (%lx)\n",
3077 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
3078 *bp->fp->rx_cons_sb, nstats->rx_packets);
3079 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3080 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3081 estats->driver_xoff, estats->brb_discard);
3082 printk(KERN_DEBUG "tstats: checksum_discard %u "
3083 "packets_too_big_discard %u no_buff_discard %u "
3084 "mac_discard %u mac_filter_discard %u "
3085 "xxovrflow_discard %u brb_truncate_discard %u "
3086 "ttl0_discard %u\n",
3087 estats->checksum_discard,
3088 estats->packets_too_big_discard,
3089 estats->no_buff_discard, estats->mac_discard,
3090 estats->mac_filter_discard, estats->xxoverflow_discard,
3091 estats->brb_truncate_discard, estats->ttl0_discard);
3092
3093 for_each_queue(bp, i) {
3094 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3095 bnx2x_fp(bp, i, tx_pkt),
3096 bnx2x_fp(bp, i, rx_pkt),
3097 bnx2x_fp(bp, i, rx_calls));
3098 }
3099 }
3100
3101 if (bp->state != BNX2X_STATE_OPEN) {
3102 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
3103 return;
3104 }
3105
3106#ifdef BNX2X_STOP_ON_ERROR
3107 if (unlikely(bp->panic))
3108 return;
3109#endif
3110
3111 /* loader */
3112 if (bp->executer_idx) {
3113 struct dmae_command *dmae = &bp->dmae;
34f80b04 3114 int port = BP_PORT(bp);
a2fbb9ea
ET
3115 int loader_idx = port * 8;
3116
3117 memset(dmae, 0, sizeof(struct dmae_command));
3118
3119 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3120 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3121 DMAE_CMD_DST_RESET |
3122#ifdef __BIG_ENDIAN
3123 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3124#else
3125 DMAE_CMD_ENDIANITY_DW_SWAP |
3126#endif
3127 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3128 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3129 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3130 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3131 sizeof(struct dmae_command) *
3132 (loader_idx + 1)) >> 2;
3133 dmae->dst_addr_hi = 0;
3134 dmae->len = sizeof(struct dmae_command) >> 2;
3135 dmae->len--; /* !!! for A0/1 only */
3136 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3137 dmae->comp_addr_hi = 0;
3138 dmae->comp_val = 1;
3139
3140 bnx2x_post_dmae(bp, dmae, loader_idx);
3141 }
3142
3143 if (bp->stats_state != STATS_STATE_ENABLE) {
3144 bp->stats_state = STATS_STATE_DISABLE;
3145 return;
3146 }
3147
3148 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
3149 /* stats ramrod has it's own slot on the spe */
3150 bp->spq_left++;
3151 bp->stat_pending = 1;
3152 }
3153}
3154
3155static void bnx2x_timer(unsigned long data)
3156{
3157 struct bnx2x *bp = (struct bnx2x *) data;
3158
3159 if (!netif_running(bp->dev))
3160 return;
3161
3162 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3163 goto timer_restart;
a2fbb9ea
ET
3164
3165 if (poll) {
3166 struct bnx2x_fastpath *fp = &bp->fp[0];
3167 int rc;
3168
3169 bnx2x_tx_int(fp, 1000);
3170 rc = bnx2x_rx_int(fp, 1000);
3171 }
3172
34f80b04
EG
3173 if (!BP_NOMCP(bp)) {
3174 int func = BP_FUNC(bp);
a2fbb9ea
ET
3175 u32 drv_pulse;
3176 u32 mcp_pulse;
3177
3178 ++bp->fw_drv_pulse_wr_seq;
3179 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3180 /* TBD - add SYSTEM_TIME */
3181 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3182 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3183
34f80b04 3184 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3185 MCP_PULSE_SEQ_MASK);
3186 /* The delta between driver pulse and mcp response
3187 * should be 1 (before mcp response) or 0 (after mcp response)
3188 */
3189 if ((drv_pulse != mcp_pulse) &&
3190 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3191 /* someone lost a heartbeat... */
3192 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3193 drv_pulse, mcp_pulse);
3194 }
3195 }
3196
3197 if (bp->stats_state == STATS_STATE_DISABLE)
f1410647 3198 goto timer_restart;
a2fbb9ea
ET
3199
3200 bnx2x_update_stats(bp);
3201
f1410647 3202timer_restart:
a2fbb9ea
ET
3203 mod_timer(&bp->timer, jiffies + bp->current_interval);
3204}
3205
3206/* end of Statistics */
3207
3208/* nic init */
3209
3210/*
3211 * nic init service functions
3212 */
3213
34f80b04 3214static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3215{
34f80b04
EG
3216 int port = BP_PORT(bp);
3217
3218 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3219 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3220 sizeof(struct ustorm_def_status_block)/4);
3221 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3222 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3223 sizeof(struct cstorm_def_status_block)/4);
3224}
3225
3226static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
3227 struct host_status_block *sb, dma_addr_t mapping)
3228{
3229 int port = BP_PORT(bp);
a2fbb9ea 3230 int index;
34f80b04 3231 u64 section;
a2fbb9ea
ET
3232
3233 /* USTORM */
3234 section = ((u64)mapping) + offsetof(struct host_status_block,
3235 u_status_block);
34f80b04 3236 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
3237
3238 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3239 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 3240 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3241 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea
ET
3242 U64_HI(section));
3243
3244 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
3245 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 3246 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
3247
3248 /* CSTORM */
3249 section = ((u64)mapping) + offsetof(struct host_status_block,
3250 c_status_block);
34f80b04 3251 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
3252
3253 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 3254 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 3255 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 3256 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea
ET
3257 U64_HI(section));
3258
3259 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
3260 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
3261 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
3262
3263 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
3264}
3265
3266static void bnx2x_zero_def_sb(struct bnx2x *bp)
3267{
3268 int func = BP_FUNC(bp);
a2fbb9ea 3269
34f80b04
EG
3270 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3271 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
3272 sizeof(struct ustorm_def_status_block)/4);
3273 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3274 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
3275 sizeof(struct cstorm_def_status_block)/4);
3276 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
3277 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
3278 sizeof(struct xstorm_def_status_block)/4);
3279 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
3280 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
3281 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
3282}
3283
3284static void bnx2x_init_def_sb(struct bnx2x *bp,
3285 struct host_def_status_block *def_sb,
34f80b04 3286 dma_addr_t mapping, int sb_id)
a2fbb9ea 3287{
34f80b04
EG
3288 int port = BP_PORT(bp);
3289 int func = BP_FUNC(bp);
a2fbb9ea
ET
3290 int index, val, reg_offset;
3291 u64 section;
3292
3293 /* ATTN */
3294 section = ((u64)mapping) + offsetof(struct host_def_status_block,
3295 atten_status_block);
34f80b04 3296 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 3297
49d66772
ET
3298 bp->def_att_idx = 0;
3299 bp->attn_state = 0;
3300
a2fbb9ea
ET
3301 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3302 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3303
34f80b04 3304 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
3305 bp->attn_group[index].sig[0] = REG_RD(bp,
3306 reg_offset + 0x10*index);
3307 bp->attn_group[index].sig[1] = REG_RD(bp,
3308 reg_offset + 0x4 + 0x10*index);
3309 bp->attn_group[index].sig[2] = REG_RD(bp,
3310 reg_offset + 0x8 + 0x10*index);
3311 bp->attn_group[index].sig[3] = REG_RD(bp,
3312 reg_offset + 0xc + 0x10*index);
3313 }
3314
3315 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3316 MISC_REG_AEU_MASK_ATTN_FUNC_0));
3317
3318 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
3319 HC_REG_ATTN_MSG0_ADDR_L);
3320
3321 REG_WR(bp, reg_offset, U64_LO(section));
3322 REG_WR(bp, reg_offset + 4, U64_HI(section));
3323
3324 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
3325
3326 val = REG_RD(bp, reg_offset);
34f80b04 3327 val |= sb_id;
a2fbb9ea
ET
3328 REG_WR(bp, reg_offset, val);
3329
3330 /* USTORM */
3331 section = ((u64)mapping) + offsetof(struct host_def_status_block,
3332 u_def_status_block);
34f80b04 3333 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 3334
49d66772
ET
3335 bp->def_u_idx = 0;
3336
a2fbb9ea 3337 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3338 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 3339 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3340 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 3341 U64_HI(section));
34f80b04
EG
3342 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
3343 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
3344 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
3345 BNX2X_BTR);
3346
3347 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
3348 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 3349 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
3350
3351 /* CSTORM */
3352 section = ((u64)mapping) + offsetof(struct host_def_status_block,
3353 c_def_status_block);
34f80b04 3354 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea 3355
49d66772
ET
3356 bp->def_c_idx = 0;
3357
a2fbb9ea 3358 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 3359 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 3360 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 3361 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 3362 U64_HI(section));
34f80b04
EG
3363 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
3364 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
3365 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
3366 BNX2X_BTR);
3367
3368 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
3369 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 3370 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
3371
3372 /* TSTORM */
3373 section = ((u64)mapping) + offsetof(struct host_def_status_block,
3374 t_def_status_block);
34f80b04 3375 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea 3376
49d66772
ET
3377 bp->def_t_idx = 0;
3378
a2fbb9ea 3379 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3380 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 3381 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3382 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 3383 U64_HI(section));
34f80b04
EG
3384 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
3385 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
3386 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
3387 BNX2X_BTR);
3388
3389 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
3390 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 3391 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
3392
3393 /* XSTORM */
3394 section = ((u64)mapping) + offsetof(struct host_def_status_block,
3395 x_def_status_block);
34f80b04 3396 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea 3397
49d66772
ET
3398 bp->def_x_idx = 0;
3399
a2fbb9ea 3400 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 3401 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 3402 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 3403 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 3404 U64_HI(section));
34f80b04
EG
3405 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
3406 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
3407 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
3408 BNX2X_BTR);
3409
3410 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
3411 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 3412 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 3413
34f80b04 3414 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
3415}
3416
3417static void bnx2x_update_coalesce(struct bnx2x *bp)
3418{
34f80b04 3419 int port = BP_PORT(bp);
a2fbb9ea
ET
3420 int i;
3421
3422 for_each_queue(bp, i) {
34f80b04 3423 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
3424
3425 /* HC_INDEX_U_ETH_RX_CQ_CONS */
3426 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 3427 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 3428 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 3429 bp->rx_ticks/12);
a2fbb9ea 3430 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 3431 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 3432 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 3433 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
3434
3435 /* HC_INDEX_C_ETH_TX_CQ_CONS */
3436 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 3437 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 3438 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 3439 bp->tx_ticks/12);
a2fbb9ea 3440 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 3441 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 3442 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 3443 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
3444 }
3445}
3446
3447static void bnx2x_init_rx_rings(struct bnx2x *bp)
3448{
3449 u16 ring_prod;
3450 int i, j;
a2fbb9ea
ET
3451
3452 bp->rx_buf_use_size = bp->dev->mtu;
3453
3454 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
3455 bp->rx_buf_size = bp->rx_buf_use_size + 64;
3456
3457 for_each_queue(bp, j) {
3458 struct bnx2x_fastpath *fp = &bp->fp[j];
3459
3460 fp->rx_bd_cons = 0;
3461 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
3462
3463 for (i = 1; i <= NUM_RX_RINGS; i++) {
3464 struct eth_rx_bd *rx_bd;
3465
3466 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
3467 rx_bd->addr_hi =
3468 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 3469 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
3470 rx_bd->addr_lo =
3471 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 3472 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
3473 }
3474
34f80b04 3475 /* CQ ring */
a2fbb9ea
ET
3476 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3477 struct eth_rx_cqe_next_page *nextpg;
3478
3479 nextpg = (struct eth_rx_cqe_next_page *)
3480 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3481 nextpg->addr_hi =
3482 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 3483 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
3484 nextpg->addr_lo =
3485 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 3486 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
3487 }
3488
3489 /* rx completion queue */
3490 fp->rx_comp_cons = ring_prod = 0;
3491
3492 for (i = 0; i < bp->rx_ring_size; i++) {
3493 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
3494 BNX2X_ERR("was only able to allocate "
3495 "%d rx skbs\n", i);
3496 break;
3497 }
3498 ring_prod = NEXT_RX_IDX(ring_prod);
3499 BUG_TRAP(ring_prod > i);
3500 }
3501
3502 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
3503 fp->rx_pkt = fp->rx_calls = 0;
3504
c14423fe 3505 /* Warning! this will generate an interrupt (to the TSTORM) */
a2fbb9ea
ET
3506 /* must only be done when chip is initialized */
3507 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04
EG
3508 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)),
3509 ring_prod);
a2fbb9ea
ET
3510 if (j != 0)
3511 continue;
3512
3513 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3514 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(BP_PORT(bp)),
a2fbb9ea
ET
3515 U64_LO(fp->rx_comp_mapping));
3516 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3517 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(BP_PORT(bp)) + 4,
a2fbb9ea
ET
3518 U64_HI(fp->rx_comp_mapping));
3519 }
3520}
3521
3522static void bnx2x_init_tx_ring(struct bnx2x *bp)
3523{
3524 int i, j;
3525
3526 for_each_queue(bp, j) {
3527 struct bnx2x_fastpath *fp = &bp->fp[j];
3528
3529 for (i = 1; i <= NUM_TX_RINGS; i++) {
3530 struct eth_tx_bd *tx_bd =
3531 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
3532
3533 tx_bd->addr_hi =
3534 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 3535 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
3536 tx_bd->addr_lo =
3537 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 3538 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
3539 }
3540
3541 fp->tx_pkt_prod = 0;
3542 fp->tx_pkt_cons = 0;
3543 fp->tx_bd_prod = 0;
3544 fp->tx_bd_cons = 0;
3545 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
3546 fp->tx_pkt = 0;
3547 }
3548}
3549
3550static void bnx2x_init_sp_ring(struct bnx2x *bp)
3551{
34f80b04 3552 int func = BP_FUNC(bp);
a2fbb9ea
ET
3553
3554 spin_lock_init(&bp->spq_lock);
3555
3556 bp->spq_left = MAX_SPQ_PENDING;
3557 bp->spq_prod_idx = 0;
a2fbb9ea
ET
3558 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3559 bp->spq_prod_bd = bp->spq;
3560 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
3561
34f80b04 3562 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 3563 U64_LO(bp->spq_mapping));
34f80b04
EG
3564 REG_WR(bp,
3565 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
3566 U64_HI(bp->spq_mapping));
3567
34f80b04 3568 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
3569 bp->spq_prod_idx);
3570}
3571
3572static void bnx2x_init_context(struct bnx2x *bp)
3573{
3574 int i;
3575
3576 for_each_queue(bp, i) {
3577 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
3578 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 3579 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
3580
3581 context->xstorm_st_context.tx_bd_page_base_hi =
3582 U64_HI(fp->tx_desc_mapping);
3583 context->xstorm_st_context.tx_bd_page_base_lo =
3584 U64_LO(fp->tx_desc_mapping);
3585 context->xstorm_st_context.db_data_addr_hi =
3586 U64_HI(fp->tx_prods_mapping);
3587 context->xstorm_st_context.db_data_addr_lo =
3588 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
3589 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
3590 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3591
3592 context->ustorm_st_context.common.sb_index_numbers =
3593 BNX2X_RX_SB_INDEX_NUM;
3594 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
3595 context->ustorm_st_context.common.status_block_id = sb_id;
3596 context->ustorm_st_context.common.flags =
3597 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
3598 context->ustorm_st_context.common.mc_alignment_size = 64;
3599 context->ustorm_st_context.common.bd_buff_size =
3600 bp->rx_buf_use_size;
3601 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 3602 U64_HI(fp->rx_desc_mapping);
34f80b04 3603 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 3604 U64_LO(fp->rx_desc_mapping);
a2fbb9ea
ET
3605 context->cstorm_st_context.sb_index_number =
3606 HC_INDEX_C_ETH_TX_CQ_CONS;
34f80b04 3607 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
3608
3609 context->xstorm_ag_context.cdu_reserved =
3610 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3611 CDU_REGION_NUMBER_XCM_AG,
3612 ETH_CONNECTION_TYPE);
3613 context->ustorm_ag_context.cdu_usage =
3614 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3615 CDU_REGION_NUMBER_UCM_AG,
3616 ETH_CONNECTION_TYPE);
3617 }
3618}
3619
3620static void bnx2x_init_ind_table(struct bnx2x *bp)
3621{
34f80b04 3622 int port = BP_PORT(bp);
a2fbb9ea
ET
3623 int i;
3624
3625 if (!is_multi(bp))
3626 return;
3627
34f80b04 3628 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 3629 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
3630 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3631 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
3632 i % bp->num_queues);
3633
3634 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3635}
3636
49d66772
ET
3637static void bnx2x_set_client_config(struct bnx2x *bp)
3638{
49d66772 3639 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
3640 int port = BP_PORT(bp);
3641 int i;
49d66772 3642
34f80b04 3643 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
49d66772
ET
3644 tstorm_client.statistics_counter_id = 0;
3645 tstorm_client.config_flags =
3646 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
3647#ifdef BCM_VLAN
34f80b04 3648 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
3649 tstorm_client.config_flags |=
3650 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
3651 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3652 }
3653#endif
49d66772
ET
3654
3655 for_each_queue(bp, i) {
3656 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3657 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
3658 ((u32 *)&tstorm_client)[0]);
3659 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3660 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
3661 ((u32 *)&tstorm_client)[1]);
3662 }
3663
34f80b04
EG
3664 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3665 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
3666}
3667
a2fbb9ea
ET
3668static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3669{
a2fbb9ea 3670 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
3671 int mode = bp->rx_mode;
3672 int mask = (1 << BP_L_ID(bp));
3673 int func = BP_FUNC(bp);
a2fbb9ea
ET
3674 int i;
3675
3676 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
3677
3678 switch (mode) {
3679 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
3680 tstorm_mac_filter.ucast_drop_all = mask;
3681 tstorm_mac_filter.mcast_drop_all = mask;
3682 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
3683 break;
3684 case BNX2X_RX_MODE_NORMAL:
34f80b04 3685 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
3686 break;
3687 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
3688 tstorm_mac_filter.mcast_accept_all = mask;
3689 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
3690 break;
3691 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
3692 tstorm_mac_filter.ucast_accept_all = mask;
3693 tstorm_mac_filter.mcast_accept_all = mask;
3694 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
3695 break;
3696 default:
34f80b04
EG
3697 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3698 break;
a2fbb9ea
ET
3699 }
3700
3701 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3702 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3703 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
3704 ((u32 *)&tstorm_mac_filter)[i]);
3705
34f80b04 3706/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
3707 ((u32 *)&tstorm_mac_filter)[i]); */
3708 }
a2fbb9ea 3709
49d66772
ET
3710 if (mode != BNX2X_RX_MODE_NONE)
3711 bnx2x_set_client_config(bp);
a2fbb9ea
ET
3712}
3713
3714static void bnx2x_init_internal(struct bnx2x *bp)
3715{
a2fbb9ea
ET
3716 struct tstorm_eth_function_common_config tstorm_config = {0};
3717 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
3718 int port = BP_PORT(bp);
3719 int func = BP_FUNC(bp);
3720 int i;
a2fbb9ea
ET
3721
3722 if (is_multi(bp)) {
3723 tstorm_config.config_flags = MULTI_FLAGS;
3724 tstorm_config.rss_result_mask = MULTI_MASK;
3725 }
3726
34f80b04
EG
3727 tstorm_config.leading_client_id = BP_L_ID(bp);
3728
a2fbb9ea 3729 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3730 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
3731 (*(u32 *)&tstorm_config));
3732
34f80b04 3733/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
a2fbb9ea
ET
3734 (*(u32 *)&tstorm_config)); */
3735
c14423fe 3736 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
3737 bnx2x_set_storm_rx_mode(bp);
3738
34f80b04 3739 stats_flags.collect_eth = 1;
a2fbb9ea
ET
3740
3741 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
3742 ((u32 *)&stats_flags)[0]);
3743 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
3744 ((u32 *)&stats_flags)[1]);
3745
3746 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
3747 ((u32 *)&stats_flags)[0]);
3748 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
3749 ((u32 *)&stats_flags)[1]);
3750
3751 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
3752 ((u32 *)&stats_flags)[0]);
3753 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
3754 ((u32 *)&stats_flags)[1]);
3755
34f80b04 3756/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
a2fbb9ea 3757 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
34f80b04
EG
3758
3759 if (CHIP_IS_E1H(bp)) {
3760 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3761 IS_E1HMF(bp));
3762 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3763 IS_E1HMF(bp));
3764 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3765 IS_E1HMF(bp));
3766 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3767 IS_E1HMF(bp));
3768
3769 REG_WR16(bp, BAR_XSTRORM_INTMEM +
3770 XSTORM_E1HOV_OFFSET(func), bp->e1hov);
3771 }
3772
3773 /* Zero this manualy as its initialization is
3774 currently missing in the initTool */
3775 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
3776 REG_WR(bp, BAR_USTRORM_INTMEM +
3777 USTORM_AGG_DATA_OFFSET + 4*i, 0);
a2fbb9ea
ET
3778}
3779
3780static void bnx2x_nic_init(struct bnx2x *bp)
3781{
3782 int i;
3783
3784 for_each_queue(bp, i) {
3785 struct bnx2x_fastpath *fp = &bp->fp[i];
3786
34f80b04 3787 fp->bp = bp;
a2fbb9ea 3788 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 3789 fp->index = i;
34f80b04
EG
3790 fp->cl_id = BP_L_ID(bp) + i;
3791 fp->sb_id = fp->cl_id;
3792 DP(NETIF_MSG_IFUP,
3793 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
3794 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
3795 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
3796 fp->status_blk_mapping);
a2fbb9ea
ET
3797 }
3798
3799 bnx2x_init_def_sb(bp, bp->def_status_blk,
34f80b04 3800 bp->def_status_blk_mapping, DEF_SB_ID);
a2fbb9ea
ET
3801 bnx2x_update_coalesce(bp);
3802 bnx2x_init_rx_rings(bp);
3803 bnx2x_init_tx_ring(bp);
3804 bnx2x_init_sp_ring(bp);
3805 bnx2x_init_context(bp);
3806 bnx2x_init_internal(bp);
3807 bnx2x_init_stats(bp);
3808 bnx2x_init_ind_table(bp);
615f8fd9 3809 bnx2x_int_enable(bp);
a2fbb9ea
ET
3810}
3811
3812/* end of nic init */
3813
3814/*
3815 * gzip service functions
3816 */
3817
3818static int bnx2x_gunzip_init(struct bnx2x *bp)
3819{
3820 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
3821 &bp->gunzip_mapping);
3822 if (bp->gunzip_buf == NULL)
3823 goto gunzip_nomem1;
3824
3825 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3826 if (bp->strm == NULL)
3827 goto gunzip_nomem2;
3828
3829 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3830 GFP_KERNEL);
3831 if (bp->strm->workspace == NULL)
3832 goto gunzip_nomem3;
3833
3834 return 0;
3835
3836gunzip_nomem3:
3837 kfree(bp->strm);
3838 bp->strm = NULL;
3839
3840gunzip_nomem2:
3841 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
3842 bp->gunzip_mapping);
3843 bp->gunzip_buf = NULL;
3844
3845gunzip_nomem1:
3846 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 3847 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
3848 return -ENOMEM;
3849}
3850
3851static void bnx2x_gunzip_end(struct bnx2x *bp)
3852{
3853 kfree(bp->strm->workspace);
3854
3855 kfree(bp->strm);
3856 bp->strm = NULL;
3857
3858 if (bp->gunzip_buf) {
3859 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
3860 bp->gunzip_mapping);
3861 bp->gunzip_buf = NULL;
3862 }
3863}
3864
3865static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
3866{
3867 int n, rc;
3868
3869 /* check gzip header */
3870 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
3871 return -EINVAL;
3872
3873 n = 10;
3874
34f80b04 3875#define FNAME 0x8
a2fbb9ea
ET
3876
3877 if (zbuf[3] & FNAME)
3878 while ((zbuf[n++] != 0) && (n < len));
3879
3880 bp->strm->next_in = zbuf + n;
3881 bp->strm->avail_in = len - n;
3882 bp->strm->next_out = bp->gunzip_buf;
3883 bp->strm->avail_out = FW_BUF_SIZE;
3884
3885 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3886 if (rc != Z_OK)
3887 return rc;
3888
3889 rc = zlib_inflate(bp->strm, Z_FINISH);
3890 if ((rc != Z_OK) && (rc != Z_STREAM_END))
3891 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
3892 bp->dev->name, bp->strm->msg);
3893
3894 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3895 if (bp->gunzip_outlen & 0x3)
3896 printk(KERN_ERR PFX "%s: Firmware decompression error:"
3897 " gunzip_outlen (%d) not aligned\n",
3898 bp->dev->name, bp->gunzip_outlen);
3899 bp->gunzip_outlen >>= 2;
3900
3901 zlib_inflateEnd(bp->strm);
3902
3903 if (rc == Z_STREAM_END)
3904 return 0;
3905
3906 return rc;
3907}
3908
3909/* nic load/unload */
3910
3911/*
34f80b04 3912 * General service functions
a2fbb9ea
ET
3913 */
3914
3915/* send a NIG loopback debug packet */
3916static void bnx2x_lb_pckt(struct bnx2x *bp)
3917{
a2fbb9ea 3918 u32 wb_write[3];
a2fbb9ea
ET
3919
3920 /* Ethernet source and destination addresses */
a2fbb9ea
ET
3921 wb_write[0] = 0x55555555;
3922 wb_write[1] = 0x55555555;
34f80b04 3923 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 3924 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3925
3926 /* NON-IP protocol */
a2fbb9ea
ET
3927 wb_write[0] = 0x09000000;
3928 wb_write[1] = 0x55555555;
34f80b04 3929 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 3930 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3931}
3932
3933/* some of the internal memories
3934 * are not directly readable from the driver
3935 * to test them we send debug packets
3936 */
3937static int bnx2x_int_mem_test(struct bnx2x *bp)
3938{
3939 int factor;
3940 int count, i;
3941 u32 val = 0;
3942
ad8d3948 3943 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 3944 factor = 120;
ad8d3948
EG
3945 else if (CHIP_REV_IS_EMUL(bp))
3946 factor = 200;
3947 else
a2fbb9ea 3948 factor = 1;
a2fbb9ea
ET
3949
3950 DP(NETIF_MSG_HW, "start part1\n");
3951
3952 /* Disable inputs of parser neighbor blocks */
3953 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3954 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3955 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3956 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
3957
3958 /* Write 0 to parser credits for CFC search request */
3959 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3960
3961 /* send Ethernet packet */
3962 bnx2x_lb_pckt(bp);
3963
3964 /* TODO do i reset NIG statistic? */
3965 /* Wait until NIG register shows 1 packet of size 0x10 */
3966 count = 1000 * factor;
3967 while (count) {
34f80b04 3968
a2fbb9ea
ET
3969 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3970 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3971 if (val == 0x10)
3972 break;
3973
3974 msleep(10);
3975 count--;
3976 }
3977 if (val != 0x10) {
3978 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3979 return -1;
3980 }
3981
3982 /* Wait until PRS register shows 1 packet */
3983 count = 1000 * factor;
3984 while (count) {
3985 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
3986 if (val == 1)
3987 break;
3988
3989 msleep(10);
3990 count--;
3991 }
3992 if (val != 0x1) {
3993 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3994 return -2;
3995 }
3996
3997 /* Reset and init BRB, PRS */
34f80b04 3998 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 3999 msleep(50);
34f80b04 4000 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4001 msleep(50);
4002 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4003 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4004
4005 DP(NETIF_MSG_HW, "part2\n");
4006
4007 /* Disable inputs of parser neighbor blocks */
4008 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4009 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4010 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4011 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4012
4013 /* Write 0 to parser credits for CFC search request */
4014 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4015
4016 /* send 10 Ethernet packets */
4017 for (i = 0; i < 10; i++)
4018 bnx2x_lb_pckt(bp);
4019
4020 /* Wait until NIG register shows 10 + 1
4021 packets of size 11*0x10 = 0xb0 */
4022 count = 1000 * factor;
4023 while (count) {
34f80b04 4024
a2fbb9ea
ET
4025 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4026 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4027 if (val == 0xb0)
4028 break;
4029
4030 msleep(10);
4031 count--;
4032 }
4033 if (val != 0xb0) {
4034 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4035 return -3;
4036 }
4037
4038 /* Wait until PRS register shows 2 packets */
4039 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4040 if (val != 2)
4041 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4042
4043 /* Write 1 to parser credits for CFC search request */
4044 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4045
4046 /* Wait until PRS register shows 3 packets */
4047 msleep(10 * factor);
4048 /* Wait until NIG register shows 1 packet of size 0x10 */
4049 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4050 if (val != 3)
4051 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4052
4053 /* clear NIG EOP FIFO */
4054 for (i = 0; i < 11; i++)
4055 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4056 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4057 if (val != 1) {
4058 BNX2X_ERR("clear of NIG failed\n");
4059 return -4;
4060 }
4061
4062 /* Reset and init BRB, PRS, NIG */
4063 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4064 msleep(50);
4065 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4066 msleep(50);
4067 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4068 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4069#ifndef BCM_ISCSI
4070 /* set NIC mode */
4071 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4072#endif
4073
4074 /* Enable inputs of parser neighbor blocks */
4075 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4076 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4077 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4078 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
4079
4080 DP(NETIF_MSG_HW, "done\n");
4081
4082 return 0; /* OK */
4083}
4084
4085static void enable_blocks_attention(struct bnx2x *bp)
4086{
4087 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4088 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4089 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4090 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4091 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4092 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4093 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4094 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4095 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4096/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4097/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4098 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4099 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4100 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4101/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4102/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4103 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4104 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4105 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4106 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4107/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4108/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4109 if (CHIP_REV_IS_FPGA(bp))
4110 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4111 else
4112 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4113 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4114 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4115 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4116/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4117/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4118 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4119 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
4120/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4121 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
4122}
4123
34f80b04
EG
4124
4125static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 4126{
a2fbb9ea 4127 u32 val, i;
a2fbb9ea 4128
34f80b04 4129 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 4130
34f80b04
EG
4131 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4132 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4133
34f80b04
EG
4134 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
4135 if (CHIP_IS_E1H(bp))
4136 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 4137
34f80b04
EG
4138 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
4139 msleep(30);
4140 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 4141
34f80b04
EG
4142 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
4143 if (CHIP_IS_E1(bp)) {
4144 /* enable HW interrupt from PXP on USDM overflow
4145 bit 16 on INT_MASK_0 */
4146 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4147 }
a2fbb9ea 4148
34f80b04
EG
4149 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
4150 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4151
4152#ifdef __BIG_ENDIAN
34f80b04
EG
4153 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4154 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4155 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4156 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4157 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
4158 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
4159
4160/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4161 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4162 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4163 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4164 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
4165#endif
4166
4167#ifndef BCM_ISCSI
4168 /* set NIC mode */
4169 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4170#endif
4171
34f80b04 4172 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 4173#ifdef BCM_ISCSI
34f80b04
EG
4174 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
4175 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
4176 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
4177#endif
4178
34f80b04
EG
4179 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4180 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 4181
34f80b04
EG
4182 /* let the HW do it's magic ... */
4183 msleep(100);
4184 /* finish PXP init */
4185 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4186 if (val != 1) {
4187 BNX2X_ERR("PXP2 CFG failed\n");
4188 return -EBUSY;
4189 }
4190 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4191 if (val != 1) {
4192 BNX2X_ERR("PXP2 RD_INIT failed\n");
4193 return -EBUSY;
4194 }
a2fbb9ea 4195
34f80b04
EG
4196 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4197 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 4198
34f80b04 4199 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 4200
34f80b04
EG
4201 /* clean the DMAE memory */
4202 bp->dmae_ready = 1;
4203 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 4204
34f80b04
EG
4205 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
4206 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
4207 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
4208 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 4209
34f80b04
EG
4210 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4211 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4212 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4213 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4214
4215 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
4216 /* soft reset pulse */
4217 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4218 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
4219
4220#ifdef BCM_ISCSI
34f80b04 4221 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 4222#endif
a2fbb9ea 4223
34f80b04
EG
4224 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
4225 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4226 if (!CHIP_REV_IS_SLOW(bp)) {
4227 /* enable hw interrupt from doorbell Q */
4228 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4229 }
a2fbb9ea 4230
34f80b04
EG
4231 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4232 if (CHIP_REV_IS_SLOW(bp)) {
4233 /* fix for emulation and FPGA for no pause */
4234 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
4235 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
4236 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
4237 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
4238 }
a2fbb9ea 4239
34f80b04
EG
4240 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4241 if (CHIP_IS_E1H(bp))
4242 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 4243
34f80b04
EG
4244 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
4245 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
4246 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
4247 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 4248
34f80b04
EG
4249 if (CHIP_IS_E1H(bp)) {
4250 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
4251 STORM_INTMEM_SIZE_E1H/2);
4252 bnx2x_init_fill(bp,
4253 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
4254 0, STORM_INTMEM_SIZE_E1H/2);
4255 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
4256 STORM_INTMEM_SIZE_E1H/2);
4257 bnx2x_init_fill(bp,
4258 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
4259 0, STORM_INTMEM_SIZE_E1H/2);
4260 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
4261 STORM_INTMEM_SIZE_E1H/2);
4262 bnx2x_init_fill(bp,
4263 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
4264 0, STORM_INTMEM_SIZE_E1H/2);
4265 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
4266 STORM_INTMEM_SIZE_E1H/2);
4267 bnx2x_init_fill(bp,
4268 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
4269 0, STORM_INTMEM_SIZE_E1H/2);
4270 } else { /* E1 */
ad8d3948
EG
4271 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
4272 STORM_INTMEM_SIZE_E1);
4273 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
4274 STORM_INTMEM_SIZE_E1);
4275 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
4276 STORM_INTMEM_SIZE_E1);
4277 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
4278 STORM_INTMEM_SIZE_E1);
34f80b04 4279 }
a2fbb9ea 4280
34f80b04
EG
4281 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
4282 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
4283 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
4284 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 4285
34f80b04
EG
4286 /* sync semi rtc */
4287 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4288 0x80000000);
4289 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4290 0x80000000);
a2fbb9ea 4291
34f80b04
EG
4292 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
4293 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
4294 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 4295
34f80b04
EG
4296 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4297 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
4298 REG_WR(bp, i, 0xc0cac01a);
4299 /* TODO: replace with something meaningful */
4300 }
4301 if (CHIP_IS_E1H(bp))
4302 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
4303 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 4304
34f80b04
EG
4305 if (sizeof(union cdu_context) != 1024)
4306 /* we currently assume that a context is 1024 bytes */
4307 printk(KERN_ALERT PFX "please adjust the size of"
4308 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 4309
34f80b04
EG
4310 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
4311 val = (4 << 24) + (0 << 12) + 1024;
4312 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4313 if (CHIP_IS_E1(bp)) {
4314 /* !!! fix pxp client crdit until excel update */
4315 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
4316 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
4317 }
a2fbb9ea 4318
34f80b04
EG
4319 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
4320 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 4321
34f80b04
EG
4322 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
4323 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 4324
34f80b04
EG
4325 /* PXPCS COMMON comes here */
4326 /* Reset PCIE errors for debug */
4327 REG_WR(bp, 0x2814, 0xffffffff);
4328 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 4329
34f80b04
EG
4330 /* EMAC0 COMMON comes here */
4331 /* EMAC1 COMMON comes here */
4332 /* DBU COMMON comes here */
4333 /* DBG COMMON comes here */
4334
4335 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
4336 if (CHIP_IS_E1H(bp)) {
4337 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4338 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4339 }
4340
4341 if (CHIP_REV_IS_SLOW(bp))
4342 msleep(200);
4343
4344 /* finish CFC init */
4345 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4346 if (val != 1) {
4347 BNX2X_ERR("CFC LL_INIT failed\n");
4348 return -EBUSY;
4349 }
4350 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4351 if (val != 1) {
4352 BNX2X_ERR("CFC AC_INIT failed\n");
4353 return -EBUSY;
4354 }
4355 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4356 if (val != 1) {
4357 BNX2X_ERR("CFC CAM_INIT failed\n");
4358 return -EBUSY;
4359 }
4360 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 4361
34f80b04
EG
4362 /* read NIG statistic
4363 to see if this is our first up since powerup */
4364 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4365 val = *bnx2x_sp(bp, wb_data[0]);
4366
4367 /* do internal memory self test */
4368 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4369 BNX2X_ERR("internal mem self test failed\n");
4370 return -EBUSY;
4371 }
4372
4373 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4374 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4375 /* Fan failure is indicated by SPIO 5 */
4376 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4377 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4378
4379 /* set to active low mode */
4380 val = REG_RD(bp, MISC_REG_SPIO_INT);
4381 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 4382 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 4383 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 4384
34f80b04
EG
4385 /* enable interrupt to signal the IGU */
4386 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4387 val |= (1 << MISC_REGISTERS_SPIO_5);
4388 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4389 break;
f1410647 4390
34f80b04
EG
4391 default:
4392 break;
4393 }
f1410647 4394
34f80b04
EG
4395 /* clear PXP2 attentions */
4396 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 4397
34f80b04 4398 enable_blocks_attention(bp);
a2fbb9ea 4399
34f80b04
EG
4400 return 0;
4401}
a2fbb9ea 4402
34f80b04
EG
4403static int bnx2x_init_port(struct bnx2x *bp)
4404{
4405 int port = BP_PORT(bp);
4406 u32 val;
a2fbb9ea 4407
34f80b04
EG
4408 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
4409
4410 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
4411
4412 /* Port PXP comes here */
4413 /* Port PXP2 comes here */
a2fbb9ea
ET
4414#ifdef BCM_ISCSI
4415 /* Port0 1
4416 * Port1 385 */
4417 i++;
4418 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
4419 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
4420 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
4421 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4422
4423 /* Port0 2
4424 * Port1 386 */
4425 i++;
4426 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
4427 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
4428 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
4429 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4430
4431 /* Port0 3
4432 * Port1 387 */
4433 i++;
4434 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
4435 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
4436 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
4437 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4438#endif
34f80b04 4439 /* Port CMs come here */
a2fbb9ea
ET
4440
4441 /* Port QM comes here */
a2fbb9ea
ET
4442#ifdef BCM_ISCSI
4443 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
4444 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
4445
4446 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
4447 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
4448#endif
4449 /* Port DQ comes here */
4450 /* Port BRB1 comes here */
ad8d3948 4451 /* Port PRS comes here */
a2fbb9ea
ET
4452 /* Port TSDM comes here */
4453 /* Port CSDM comes here */
4454 /* Port USDM comes here */
4455 /* Port XSDM comes here */
34f80b04
EG
4456 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
4457 port ? TSEM_PORT1_END : TSEM_PORT0_END);
4458 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
4459 port ? USEM_PORT1_END : USEM_PORT0_END);
4460 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
4461 port ? CSEM_PORT1_END : CSEM_PORT0_END);
4462 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
4463 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 4464 /* Port UPB comes here */
34f80b04
EG
4465 /* Port XPB comes here */
4466
4467 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
4468 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
4469
4470 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 4471 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
4472
4473 /* update threshold */
34f80b04 4474 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 4475 /* update init credit */
34f80b04 4476 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
4477
4478 /* probe changes */
34f80b04 4479 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 4480 msleep(5);
34f80b04 4481 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
4482
4483#ifdef BCM_ISCSI
4484 /* tell the searcher where the T2 table is */
4485 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
4486
4487 wb_write[0] = U64_LO(bp->t2_mapping);
4488 wb_write[1] = U64_HI(bp->t2_mapping);
4489 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
4490 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
4491 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
4492 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
4493
4494 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
4495 /* Port SRCH comes here */
4496#endif
4497 /* Port CDU comes here */
4498 /* Port CFC comes here */
34f80b04
EG
4499
4500 if (CHIP_IS_E1(bp)) {
4501 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4502 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4503 }
4504 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
4505 port ? HC_PORT1_END : HC_PORT0_END);
4506
4507 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 4508 MISC_AEU_PORT0_START,
34f80b04
EG
4509 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
4510 /* init aeu_mask_attn_func_0/1:
4511 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4512 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4513 * bits 4-7 are used for "per vn group attention" */
4514 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4515 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4516
a2fbb9ea
ET
4517 /* Port PXPCS comes here */
4518 /* Port EMAC0 comes here */
4519 /* Port EMAC1 comes here */
4520 /* Port DBU comes here */
4521 /* Port DBG comes here */
34f80b04
EG
4522 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
4523 port ? NIG_PORT1_END : NIG_PORT0_END);
4524
4525 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4526
4527 if (CHIP_IS_E1H(bp)) {
4528 u32 wsum;
4529 struct cmng_struct_per_port m_cmng_port;
4530 int vn;
4531
4532 /* 0x2 disable e1hov, 0x1 enable */
4533 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4534 (IS_E1HMF(bp) ? 0x1 : 0x2));
4535
4536 /* Init RATE SHAPING and FAIRNESS contexts.
4537 Initialize as if there is 10G link. */
4538 wsum = bnx2x_calc_vn_wsum(bp);
4539 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
4540 if (IS_E1HMF(bp))
4541 for (vn = VN_0; vn < E1HVN_MAX; vn++)
4542 bnx2x_init_vn_minmax(bp, 2*vn + port,
4543 wsum, 10000, &m_cmng_port);
4544 }
4545
a2fbb9ea
ET
4546 /* Port MCP comes here */
4547 /* Port DMAE comes here */
4548
34f80b04 4549 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
4550 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4551 /* add SPIO 5 to group 0 */
4552 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4553 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4554 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
4555 break;
4556
4557 default:
4558 break;
4559 }
4560
c18487ee 4561 bnx2x__link_reset(bp);
a2fbb9ea 4562
34f80b04
EG
4563 return 0;
4564}
4565
4566#define ILT_PER_FUNC (768/2)
4567#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4568/* the phys address is shifted right 12 bits and has an added
4569 1=valid bit added to the 53rd bit
4570 then since this is a wide register(TM)
4571 we split it into two 32 bit writes
4572 */
4573#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4574#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4575#define PXP_ONE_ILT(x) (((x) << 10) | x)
4576#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4577
4578#define CNIC_ILT_LINES 0
4579
4580static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4581{
4582 int reg;
4583
4584 if (CHIP_IS_E1H(bp))
4585 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4586 else /* E1 */
4587 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4588
4589 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4590}
4591
4592static int bnx2x_init_func(struct bnx2x *bp)
4593{
4594 int port = BP_PORT(bp);
4595 int func = BP_FUNC(bp);
4596 int i;
4597
4598 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
4599
4600 i = FUNC_ILT_BASE(func);
4601
4602 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4603 if (CHIP_IS_E1H(bp)) {
4604 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4605 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4606 } else /* E1 */
4607 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4608 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4609
4610
4611 if (CHIP_IS_E1H(bp)) {
4612 for (i = 0; i < 9; i++)
4613 bnx2x_init_block(bp,
4614 cm_start[func][i], cm_end[func][i]);
4615
4616 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4617 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4618 }
4619
4620 /* HC init per function */
4621 if (CHIP_IS_E1H(bp)) {
4622 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4623
4624 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4625 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4626 }
4627 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
4628
4629 if (CHIP_IS_E1H(bp))
4630 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
4631
c14423fe 4632 /* Reset PCIE errors for debug */
a2fbb9ea
ET
4633 REG_WR(bp, 0x2114, 0xffffffff);
4634 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 4635
34f80b04
EG
4636 return 0;
4637}
4638
4639static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4640{
4641 int i, rc = 0;
a2fbb9ea 4642
34f80b04
EG
4643 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4644 BP_FUNC(bp), load_code);
a2fbb9ea 4645
34f80b04
EG
4646 bp->dmae_ready = 0;
4647 mutex_init(&bp->dmae_mutex);
4648 bnx2x_gunzip_init(bp);
a2fbb9ea 4649
34f80b04
EG
4650 switch (load_code) {
4651 case FW_MSG_CODE_DRV_LOAD_COMMON:
4652 rc = bnx2x_init_common(bp);
4653 if (rc)
4654 goto init_hw_err;
4655 /* no break */
4656
4657 case FW_MSG_CODE_DRV_LOAD_PORT:
4658 bp->dmae_ready = 1;
4659 rc = bnx2x_init_port(bp);
4660 if (rc)
4661 goto init_hw_err;
4662 /* no break */
4663
4664 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4665 bp->dmae_ready = 1;
4666 rc = bnx2x_init_func(bp);
4667 if (rc)
4668 goto init_hw_err;
4669 break;
4670
4671 default:
4672 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4673 break;
4674 }
4675
4676 if (!BP_NOMCP(bp)) {
4677 int func = BP_FUNC(bp);
a2fbb9ea
ET
4678
4679 bp->fw_drv_pulse_wr_seq =
34f80b04 4680 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 4681 DRV_PULSE_SEQ_MASK);
34f80b04
EG
4682 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4683 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
4684 bp->fw_drv_pulse_wr_seq, bp->func_stx);
4685 } else
4686 bp->func_stx = 0;
a2fbb9ea 4687
34f80b04
EG
4688 /* this needs to be done before gunzip end */
4689 bnx2x_zero_def_sb(bp);
4690 for_each_queue(bp, i)
4691 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4692
4693init_hw_err:
4694 bnx2x_gunzip_end(bp);
4695
4696 return rc;
a2fbb9ea
ET
4697}
4698
c14423fe 4699/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
4700static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
4701{
34f80b04 4702 int func = BP_FUNC(bp);
f1410647
ET
4703 u32 seq = ++bp->fw_seq;
4704 u32 rc = 0;
a2fbb9ea 4705
34f80b04 4706 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 4707 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea
ET
4708
4709 /* let the FW do it's magic ... */
4710 msleep(100); /* TBD */
4711
4712 if (CHIP_REV_IS_SLOW(bp))
4713 msleep(900);
4714
34f80b04 4715 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea
ET
4716 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
4717
4718 /* is this a reply to our command? */
4719 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
4720 rc &= FW_MSG_CODE_MASK;
f1410647 4721
a2fbb9ea
ET
4722 } else {
4723 /* FW BUG! */
4724 BNX2X_ERR("FW failed to respond!\n");
4725 bnx2x_fw_dump(bp);
4726 rc = 0;
4727 }
f1410647 4728
a2fbb9ea
ET
4729 return rc;
4730}
4731
4732static void bnx2x_free_mem(struct bnx2x *bp)
4733{
4734
4735#define BNX2X_PCI_FREE(x, y, size) \
4736 do { \
4737 if (x) { \
4738 pci_free_consistent(bp->pdev, size, x, y); \
4739 x = NULL; \
4740 y = 0; \
4741 } \
4742 } while (0)
4743
4744#define BNX2X_FREE(x) \
4745 do { \
4746 if (x) { \
4747 vfree(x); \
4748 x = NULL; \
4749 } \
4750 } while (0)
4751
4752 int i;
4753
4754 /* fastpath */
4755 for_each_queue(bp, i) {
4756
4757 /* Status blocks */
4758 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4759 bnx2x_fp(bp, i, status_blk_mapping),
4760 sizeof(struct host_status_block) +
4761 sizeof(struct eth_tx_db_data));
4762
4763 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
4764 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4765 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4766 bnx2x_fp(bp, i, tx_desc_mapping),
4767 sizeof(struct eth_tx_bd) * NUM_TX_BD);
4768
4769 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4770 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4771 bnx2x_fp(bp, i, rx_desc_mapping),
4772 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4773
4774 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4775 bnx2x_fp(bp, i, rx_comp_mapping),
4776 sizeof(struct eth_fast_path_rx_cqe) *
4777 NUM_RCQ_BD);
4778 }
4779
a2fbb9ea
ET
4780 /* end of fastpath */
4781
4782 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 4783 sizeof(struct host_def_status_block));
a2fbb9ea
ET
4784
4785 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 4786 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
4787
4788#ifdef BCM_ISCSI
4789 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4790 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4791 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4792 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4793#endif
4794 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
4795
4796#undef BNX2X_PCI_FREE
4797#undef BNX2X_KFREE
4798}
4799
4800static int bnx2x_alloc_mem(struct bnx2x *bp)
4801{
4802
4803#define BNX2X_PCI_ALLOC(x, y, size) \
4804 do { \
4805 x = pci_alloc_consistent(bp->pdev, size, y); \
4806 if (x == NULL) \
4807 goto alloc_mem_err; \
4808 memset(x, 0, size); \
4809 } while (0)
4810
4811#define BNX2X_ALLOC(x, size) \
4812 do { \
4813 x = vmalloc(size); \
4814 if (x == NULL) \
4815 goto alloc_mem_err; \
4816 memset(x, 0, size); \
4817 } while (0)
4818
4819 int i;
4820
4821 /* fastpath */
a2fbb9ea
ET
4822 for_each_queue(bp, i) {
4823 bnx2x_fp(bp, i, bp) = bp;
4824
4825 /* Status blocks */
4826 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4827 &bnx2x_fp(bp, i, status_blk_mapping),
4828 sizeof(struct host_status_block) +
4829 sizeof(struct eth_tx_db_data));
4830
4831 bnx2x_fp(bp, i, hw_tx_prods) =
4832 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
4833
4834 bnx2x_fp(bp, i, tx_prods_mapping) =
4835 bnx2x_fp(bp, i, status_blk_mapping) +
4836 sizeof(struct host_status_block);
4837
4838 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
4839 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4840 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4841 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4842 &bnx2x_fp(bp, i, tx_desc_mapping),
4843 sizeof(struct eth_tx_bd) * NUM_TX_BD);
4844
4845 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4846 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4847 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4848 &bnx2x_fp(bp, i, rx_desc_mapping),
4849 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4850
4851 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4852 &bnx2x_fp(bp, i, rx_comp_mapping),
4853 sizeof(struct eth_fast_path_rx_cqe) *
4854 NUM_RCQ_BD);
4855
4856 }
4857 /* end of fastpath */
4858
4859 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4860 sizeof(struct host_def_status_block));
4861
4862 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4863 sizeof(struct bnx2x_slowpath));
4864
4865#ifdef BCM_ISCSI
4866 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4867
4868 /* Initialize T1 */
4869 for (i = 0; i < 64*1024; i += 64) {
4870 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
4871 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
4872 }
4873
4874 /* allocate searcher T2 table
4875 we allocate 1/4 of alloc num for T2
4876 (which is not entered into the ILT) */
4877 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4878
4879 /* Initialize T2 */
4880 for (i = 0; i < 16*1024; i += 64)
4881 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4882
c14423fe 4883 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
4884 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
4885
4886 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
4887 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4888
4889 /* QM queues (128*MAX_CONN) */
4890 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4891#endif
4892
4893 /* Slow path ring */
4894 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4895
4896 return 0;
4897
4898alloc_mem_err:
4899 bnx2x_free_mem(bp);
4900 return -ENOMEM;
4901
4902#undef BNX2X_PCI_ALLOC
4903#undef BNX2X_ALLOC
4904}
4905
4906static void bnx2x_free_tx_skbs(struct bnx2x *bp)
4907{
4908 int i;
4909
4910 for_each_queue(bp, i) {
4911 struct bnx2x_fastpath *fp = &bp->fp[i];
4912
4913 u16 bd_cons = fp->tx_bd_cons;
4914 u16 sw_prod = fp->tx_pkt_prod;
4915 u16 sw_cons = fp->tx_pkt_cons;
4916
a2fbb9ea
ET
4917 while (sw_cons != sw_prod) {
4918 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
4919 sw_cons++;
4920 }
4921 }
4922}
4923
4924static void bnx2x_free_rx_skbs(struct bnx2x *bp)
4925{
4926 int i, j;
4927
4928 for_each_queue(bp, j) {
4929 struct bnx2x_fastpath *fp = &bp->fp[j];
4930
a2fbb9ea
ET
4931 for (i = 0; i < NUM_RX_BD; i++) {
4932 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
4933 struct sk_buff *skb = rx_buf->skb;
4934
4935 if (skb == NULL)
4936 continue;
4937
4938 pci_unmap_single(bp->pdev,
4939 pci_unmap_addr(rx_buf, mapping),
4940 bp->rx_buf_use_size,
4941 PCI_DMA_FROMDEVICE);
4942
4943 rx_buf->skb = NULL;
4944 dev_kfree_skb(skb);
4945 }
4946 }
4947}
4948
4949static void bnx2x_free_skbs(struct bnx2x *bp)
4950{
4951 bnx2x_free_tx_skbs(bp);
4952 bnx2x_free_rx_skbs(bp);
4953}
4954
4955static void bnx2x_free_msix_irqs(struct bnx2x *bp)
4956{
34f80b04 4957 int i, offset = 1;
a2fbb9ea
ET
4958
4959 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 4960 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
4961 bp->msix_table[0].vector);
4962
4963 for_each_queue(bp, i) {
c14423fe 4964 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 4965 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
4966 bnx2x_fp(bp, i, state));
4967
228241eb
ET
4968 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
4969 BNX2X_ERR("IRQ of fp #%d being freed while "
4970 "state != closed\n", i);
a2fbb9ea 4971
34f80b04 4972 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 4973 }
a2fbb9ea
ET
4974}
4975
4976static void bnx2x_free_irq(struct bnx2x *bp)
4977{
a2fbb9ea 4978 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
4979 bnx2x_free_msix_irqs(bp);
4980 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
4981 bp->flags &= ~USING_MSIX_FLAG;
4982
4983 } else
4984 free_irq(bp->pdev->irq, bp->dev);
4985}
4986
4987static int bnx2x_enable_msix(struct bnx2x *bp)
4988{
34f80b04 4989 int i, rc, offset;
a2fbb9ea
ET
4990
4991 bp->msix_table[0].entry = 0;
34f80b04
EG
4992 offset = 1;
4993 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 4994
34f80b04
EG
4995 for_each_queue(bp, i) {
4996 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 4997
34f80b04
EG
4998 bp->msix_table[i + offset].entry = igu_vec;
4999 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5000 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
5001 }
5002
34f80b04
EG
5003 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5004 bp->num_queues + offset);
5005 if (rc) {
5006 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
5007 return -1;
5008 }
a2fbb9ea
ET
5009 bp->flags |= USING_MSIX_FLAG;
5010
5011 return 0;
a2fbb9ea
ET
5012}
5013
a2fbb9ea
ET
5014static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5015{
34f80b04 5016 int i, rc, offset = 1;
a2fbb9ea 5017
a2fbb9ea
ET
5018 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
5019 bp->dev->name, bp->dev);
a2fbb9ea
ET
5020 if (rc) {
5021 BNX2X_ERR("request sp irq failed\n");
5022 return -EBUSY;
5023 }
5024
5025 for_each_queue(bp, i) {
34f80b04 5026 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5027 bnx2x_msix_fp_int, 0,
5028 bp->dev->name, &bp->fp[i]);
a2fbb9ea 5029 if (rc) {
34f80b04
EG
5030 BNX2X_ERR("request fp #%d irq failed rc %d\n",
5031 i + offset, rc);
a2fbb9ea
ET
5032 bnx2x_free_msix_irqs(bp);
5033 return -EBUSY;
5034 }
5035
5036 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
5037 }
5038
5039 return 0;
a2fbb9ea
ET
5040}
5041
5042static int bnx2x_req_irq(struct bnx2x *bp)
5043{
34f80b04 5044 int rc;
a2fbb9ea 5045
34f80b04
EG
5046 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
5047 bp->dev->name, bp->dev);
a2fbb9ea
ET
5048 if (!rc)
5049 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
5050
5051 return rc;
a2fbb9ea
ET
5052}
5053
5054/*
5055 * Init service functions
5056 */
5057
34f80b04 5058static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
a2fbb9ea
ET
5059{
5060 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 5061 int port = BP_PORT(bp);
a2fbb9ea
ET
5062
5063 /* CAM allocation
5064 * unicasts 0-31:port0 32-63:port1
5065 * multicast 64-127:port0 128-191:port1
5066 */
5067 config->hdr.length_6b = 2;
34f80b04
EG
5068 config->hdr.offset = port ? 31 : 0;
5069 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
5070 config->hdr.reserved1 = 0;
5071
5072 /* primary MAC */
5073 config->config_table[0].cam_entry.msb_mac_addr =
5074 swab16(*(u16 *)&bp->dev->dev_addr[0]);
5075 config->config_table[0].cam_entry.middle_mac_addr =
5076 swab16(*(u16 *)&bp->dev->dev_addr[2]);
5077 config->config_table[0].cam_entry.lsb_mac_addr =
5078 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 5079 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
5080 config->config_table[0].target_table_entry.flags = 0;
5081 config->config_table[0].target_table_entry.client_id = 0;
5082 config->config_table[0].target_table_entry.vlan_id = 0;
5083
5084 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
5085 config->config_table[0].cam_entry.msb_mac_addr,
5086 config->config_table[0].cam_entry.middle_mac_addr,
5087 config->config_table[0].cam_entry.lsb_mac_addr);
5088
5089 /* broadcast */
5090 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
5091 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
5092 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 5093 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
5094 config->config_table[1].target_table_entry.flags =
5095 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
5096 config->config_table[1].target_table_entry.client_id = 0;
5097 config->config_table[1].target_table_entry.vlan_id = 0;
5098
5099 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5100 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
5101 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
5102}
5103
34f80b04
EG
5104static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
5105{
5106 struct mac_configuration_cmd_e1h *config =
5107 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
5108
5109 if (bp->state != BNX2X_STATE_OPEN) {
5110 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
5111 return;
5112 }
5113
5114 /* CAM allocation for E1H
5115 * unicasts: by func number
5116 * multicast: 20+FUNC*20, 20 each
5117 */
5118 config->hdr.length_6b = 1;
5119 config->hdr.offset = BP_FUNC(bp);
5120 config->hdr.client_id = BP_CL_ID(bp);
5121 config->hdr.reserved1 = 0;
5122
5123 /* primary MAC */
5124 config->config_table[0].msb_mac_addr =
5125 swab16(*(u16 *)&bp->dev->dev_addr[0]);
5126 config->config_table[0].middle_mac_addr =
5127 swab16(*(u16 *)&bp->dev->dev_addr[2]);
5128 config->config_table[0].lsb_mac_addr =
5129 swab16(*(u16 *)&bp->dev->dev_addr[4]);
5130 config->config_table[0].client_id = BP_L_ID(bp);
5131 config->config_table[0].vlan_id = 0;
5132 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
5133 config->config_table[0].flags = BP_PORT(bp);
5134
5135 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
5136 config->config_table[0].msb_mac_addr,
5137 config->config_table[0].middle_mac_addr,
5138 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
5139
5140 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5141 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
5142 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
5143}
5144
a2fbb9ea
ET
5145static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5146 int *state_p, int poll)
5147{
5148 /* can take a while if any port is running */
34f80b04 5149 int cnt = 500;
a2fbb9ea 5150
c14423fe
ET
5151 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
5152 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
5153
5154 might_sleep();
34f80b04 5155 while (cnt--) {
a2fbb9ea
ET
5156 if (poll) {
5157 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
5158 /* if index is different from 0
5159 * the reply for some commands will
a2fbb9ea
ET
5160 * be on the none default queue
5161 */
5162 if (idx)
5163 bnx2x_rx_int(&bp->fp[idx], 10);
5164 }
34f80b04 5165 mb(); /* state is changed by bnx2x_sp_event() */
a2fbb9ea 5166
49d66772 5167 if (*state_p == state)
a2fbb9ea
ET
5168 return 0;
5169
a2fbb9ea 5170 msleep(1);
a2fbb9ea
ET
5171 }
5172
a2fbb9ea 5173 /* timeout! */
49d66772
ET
5174 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
5175 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
5176#ifdef BNX2X_STOP_ON_ERROR
5177 bnx2x_panic();
5178#endif
a2fbb9ea 5179
49d66772 5180 return -EBUSY;
a2fbb9ea
ET
5181}
5182
5183static int bnx2x_setup_leading(struct bnx2x *bp)
5184{
34f80b04 5185 int rc;
a2fbb9ea 5186
c14423fe 5187 /* reset IGU state */
34f80b04 5188 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5189
5190 /* SETUP ramrod */
5191 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
5192
34f80b04
EG
5193 /* Wait for completion */
5194 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 5195
34f80b04 5196 return rc;
a2fbb9ea
ET
5197}
5198
5199static int bnx2x_setup_multi(struct bnx2x *bp, int index)
5200{
a2fbb9ea 5201 /* reset IGU state */
34f80b04 5202 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 5203
228241eb 5204 /* SETUP ramrod */
a2fbb9ea
ET
5205 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
5206 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
5207
5208 /* Wait for completion */
5209 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 5210 &(bp->fp[index].state), 0);
a2fbb9ea
ET
5211}
5212
a2fbb9ea
ET
5213static int bnx2x_poll(struct napi_struct *napi, int budget);
5214static void bnx2x_set_rx_mode(struct net_device *dev);
5215
34f80b04
EG
5216/* must be called with rtnl_lock */
5217static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 5218{
228241eb 5219 u32 load_code;
34f80b04
EG
5220 int i, rc;
5221
5222#ifdef BNX2X_STOP_ON_ERROR
5223 if (unlikely(bp->panic))
5224 return -EPERM;
5225#endif
a2fbb9ea
ET
5226
5227 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
5228
34f80b04
EG
5229 /* Send LOAD_REQUEST command to MCP
5230 Returns the type of LOAD command:
5231 if it is the first port to be initialized
5232 common blocks should be initialized, otherwise - not
a2fbb9ea 5233 */
34f80b04 5234 if (!BP_NOMCP(bp)) {
228241eb
ET
5235 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
5236 if (!load_code) {
5237 BNX2X_ERR("MCP response failure, unloading\n");
5238 return -EBUSY;
5239 }
34f80b04 5240 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 5241 return -EBUSY; /* other port in diagnostic mode */
34f80b04 5242
a2fbb9ea 5243 } else {
34f80b04
EG
5244 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
5245 load_count[0], load_count[1], load_count[2]);
5246 load_count[0]++;
5247 load_count[1 + BP_PORT(bp)]++;
5248 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
5249 load_count[0], load_count[1], load_count[2]);
5250 if (load_count[0] == 1)
5251 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
5252 else if (load_count[1 + BP_PORT(bp)] == 1)
5253 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
5254 else
5255 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
5256 }
5257
34f80b04
EG
5258 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
5259 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
5260 bp->port.pmf = 1;
5261 else
5262 bp->port.pmf = 0;
5263 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
5264
5265 /* if we can't use MSI-X we only need one fp,
5266 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
5267 * and fallback to inta with one fp
5268 */
34f80b04
EG
5269 if (use_inta) {
5270 bp->num_queues = 1;
5271
5272 } else {
5273 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
5274 /* user requested number */
5275 bp->num_queues = use_multi;
5276
5277 else if (use_multi)
5278 bp->num_queues = min_t(u32, num_online_cpus(),
5279 BP_MAX_QUEUES(bp));
5280 else
a2fbb9ea 5281 bp->num_queues = 1;
34f80b04
EG
5282
5283 if (bnx2x_enable_msix(bp)) {
5284 /* failed to enable MSI-X */
5285 bp->num_queues = 1;
5286 if (use_multi)
5287 BNX2X_ERR("Multi requested but failed"
5288 " to enable MSI-X\n");
a2fbb9ea
ET
5289 }
5290 }
34f80b04
EG
5291 DP(NETIF_MSG_IFUP,
5292 "set number of queues to %d\n", bp->num_queues);
c14423fe 5293
a2fbb9ea
ET
5294 if (bnx2x_alloc_mem(bp))
5295 return -ENOMEM;
5296
34f80b04
EG
5297 /* Disable interrupt handling until HW is initialized */
5298 atomic_set(&bp->intr_sem, 1);
a2fbb9ea 5299
34f80b04
EG
5300 if (bp->flags & USING_MSIX_FLAG) {
5301 rc = bnx2x_req_msix_irqs(bp);
5302 if (rc) {
5303 pci_disable_msix(bp->pdev);
5304 goto load_error;
5305 }
5306 } else {
5307 bnx2x_ack_int(bp);
5308 rc = bnx2x_req_irq(bp);
5309 if (rc) {
5310 BNX2X_ERR("IRQ request failed, aborting\n");
5311 goto load_error;
a2fbb9ea
ET
5312 }
5313 }
5314
5315 for_each_queue(bp, i)
5316 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
5317 bnx2x_poll, 128);
5318
a2fbb9ea 5319 /* Initialize HW */
34f80b04
EG
5320 rc = bnx2x_init_hw(bp, load_code);
5321 if (rc) {
a2fbb9ea 5322 BNX2X_ERR("HW init failed, aborting\n");
228241eb 5323 goto load_error;
a2fbb9ea
ET
5324 }
5325
34f80b04 5326 /* Enable interrupt handling */
a2fbb9ea
ET
5327 atomic_set(&bp->intr_sem, 0);
5328
a2fbb9ea
ET
5329 /* Setup NIC internals and enable interrupts */
5330 bnx2x_nic_init(bp);
5331
5332 /* Send LOAD_DONE command to MCP */
34f80b04 5333 if (!BP_NOMCP(bp)) {
228241eb
ET
5334 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
5335 if (!load_code) {
a2fbb9ea 5336 BNX2X_ERR("MCP response failure, unloading\n");
34f80b04 5337 rc = -EBUSY;
228241eb 5338 goto load_int_disable;
a2fbb9ea
ET
5339 }
5340 }
5341
5342 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
5343
5344 /* Enable Rx interrupt handling before sending the ramrod
5345 as it's completed on Rx FP queue */
5346 for_each_queue(bp, i)
5347 napi_enable(&bnx2x_fp(bp, i, napi));
5348
34f80b04
EG
5349 rc = bnx2x_setup_leading(bp);
5350 if (rc) {
5351#ifdef BNX2X_STOP_ON_ERROR
5352 bp->panic = 1;
5353#endif
228241eb 5354 goto load_stop_netif;
34f80b04 5355 }
a2fbb9ea 5356
34f80b04
EG
5357 if (CHIP_IS_E1H(bp))
5358 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
5359 BNX2X_ERR("!!! mf_cfg function disabled\n");
5360 bp->state = BNX2X_STATE_DISABLED;
5361 }
a2fbb9ea 5362
34f80b04
EG
5363 if (bp->state == BNX2X_STATE_OPEN)
5364 for_each_nondefault_queue(bp, i) {
5365 rc = bnx2x_setup_multi(bp, i);
5366 if (rc)
5367 goto load_stop_netif;
5368 }
a2fbb9ea 5369
34f80b04
EG
5370 if (CHIP_IS_E1(bp))
5371 bnx2x_set_mac_addr_e1(bp);
5372 else
5373 bnx2x_set_mac_addr_e1h(bp);
5374
5375 if (bp->port.pmf)
5376 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
5377
5378 /* Start fast path */
34f80b04
EG
5379 switch (load_mode) {
5380 case LOAD_NORMAL:
5381 /* Tx queue should be only reenabled */
5382 netif_wake_queue(bp->dev);
5383 bnx2x_set_rx_mode(bp->dev);
5384 break;
5385
5386 case LOAD_OPEN:
5387 /* IRQ is only requested from bnx2x_open */
a2fbb9ea 5388 netif_start_queue(bp->dev);
34f80b04 5389 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
5390 if (bp->flags & USING_MSIX_FLAG)
5391 printk(KERN_INFO PFX "%s: using MSI-X\n",
5392 bp->dev->name);
34f80b04 5393 break;
a2fbb9ea 5394
34f80b04 5395 case LOAD_DIAG:
a2fbb9ea 5396 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
5397 bp->state = BNX2X_STATE_DIAG;
5398 break;
5399
5400 default:
5401 break;
a2fbb9ea
ET
5402 }
5403
34f80b04
EG
5404 if (!bp->port.pmf)
5405 bnx2x__link_status_update(bp);
5406
a2fbb9ea
ET
5407 /* start the timer */
5408 mod_timer(&bp->timer, jiffies + bp->current_interval);
5409
34f80b04 5410
a2fbb9ea
ET
5411 return 0;
5412
228241eb 5413load_stop_netif:
a2fbb9ea
ET
5414 for_each_queue(bp, i)
5415 napi_disable(&bnx2x_fp(bp, i, napi));
5416
228241eb 5417load_int_disable:
615f8fd9 5418 bnx2x_int_disable_sync(bp);
a2fbb9ea 5419
34f80b04 5420 /* Release IRQs */
a2fbb9ea
ET
5421 bnx2x_free_irq(bp);
5422
228241eb 5423load_error:
a2fbb9ea
ET
5424 bnx2x_free_mem(bp);
5425
5426 /* TBD we really need to reset the chip
5427 if we want to recover from this */
34f80b04 5428 return rc;
a2fbb9ea
ET
5429}
5430
5431static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5432{
a2fbb9ea
ET
5433 int rc;
5434
c14423fe 5435 /* halt the connection */
a2fbb9ea
ET
5436 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
5437 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
5438
34f80b04 5439 /* Wait for completion */
a2fbb9ea 5440 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 5441 &(bp->fp[index].state), 1);
c14423fe 5442 if (rc) /* timeout */
a2fbb9ea
ET
5443 return rc;
5444
5445 /* delete cfc entry */
5446 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5447
34f80b04
EG
5448 /* Wait for completion */
5449 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5450 &(bp->fp[index].state), 1);
5451 return rc;
a2fbb9ea
ET
5452}
5453
a2fbb9ea
ET
5454static void bnx2x_stop_leading(struct bnx2x *bp)
5455{
49d66772 5456 u16 dsb_sp_prod_idx;
c14423fe 5457 /* if the other port is handling traffic,
a2fbb9ea 5458 this can take a lot of time */
34f80b04
EG
5459 int cnt = 500;
5460 int rc;
a2fbb9ea
ET
5461
5462 might_sleep();
5463
5464 /* Send HALT ramrod */
5465 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 5466 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 5467
34f80b04
EG
5468 /* Wait for completion */
5469 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5470 &(bp->fp[0].state), 1);
5471 if (rc) /* timeout */
a2fbb9ea
ET
5472 return;
5473
49d66772 5474 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 5475
228241eb 5476 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
5477 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5478
49d66772 5479 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
5480 we are going to reset the chip anyway
5481 so there is not much to do if this times out
5482 */
34f80b04 5483 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
49d66772 5484 msleep(1);
34f80b04
EG
5485 if (!cnt) {
5486 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5487 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5488 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5489#ifdef BNX2X_STOP_ON_ERROR
5490 bnx2x_panic();
5491#endif
5492 break;
5493 }
5494 cnt--;
49d66772
ET
5495 }
5496 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5497 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
5498}
5499
34f80b04
EG
5500static void bnx2x_reset_func(struct bnx2x *bp)
5501{
5502 int port = BP_PORT(bp);
5503 int func = BP_FUNC(bp);
5504 int base, i;
5505
5506 /* Configure IGU */
5507 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5508 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5509
5510 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
5511
5512 /* Clear ILT */
5513 base = FUNC_ILT_BASE(func);
5514 for (i = base; i < base + ILT_PER_FUNC; i++)
5515 bnx2x_ilt_wr(bp, i, 0);
5516}
5517
5518static void bnx2x_reset_port(struct bnx2x *bp)
5519{
5520 int port = BP_PORT(bp);
5521 u32 val;
5522
5523 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5524
5525 /* Do not rcv packets to BRB */
5526 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5527 /* Do not direct rcv packets that are not for MCP to the BRB */
5528 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5529 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5530
5531 /* Configure AEU */
5532 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5533
5534 msleep(100);
5535 /* Check for BRB port occupancy */
5536 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5537 if (val)
5538 DP(NETIF_MSG_IFDOWN,
5539 "BRB1 is not empty %d blooks are occupied\n", val);
5540
5541 /* TODO: Close Doorbell port? */
5542}
5543
5544static void bnx2x_reset_common(struct bnx2x *bp)
5545{
5546 /* reset_common */
5547 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5548 0xd3ffff7f);
5549 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5550}
5551
5552static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5553{
5554 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5555 BP_FUNC(bp), reset_code);
5556
5557 switch (reset_code) {
5558 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5559 bnx2x_reset_port(bp);
5560 bnx2x_reset_func(bp);
5561 bnx2x_reset_common(bp);
5562 break;
5563
5564 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5565 bnx2x_reset_port(bp);
5566 bnx2x_reset_func(bp);
5567 break;
5568
5569 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5570 bnx2x_reset_func(bp);
5571 break;
49d66772 5572
34f80b04
EG
5573 default:
5574 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5575 break;
5576 }
5577}
5578
5579/* msut be called with rtnl_lock */
5580static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea
ET
5581{
5582 u32 reset_code = 0;
34f80b04 5583 int i, cnt;
a2fbb9ea
ET
5584
5585 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
5586
228241eb
ET
5587 bp->rx_mode = BNX2X_RX_MODE_NONE;
5588 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 5589
228241eb
ET
5590 if (netif_running(bp->dev)) {
5591 netif_tx_disable(bp->dev);
5592 bp->dev->trans_start = jiffies; /* prevent tx timeout */
5593 }
5594
34f80b04
EG
5595 del_timer_sync(&bp->timer);
5596 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
5597 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
5598
228241eb
ET
5599 /* Wait until all fast path tasks complete */
5600 for_each_queue(bp, i) {
5601 struct bnx2x_fastpath *fp = &bp->fp[i];
5602
34f80b04
EG
5603#ifdef BNX2X_STOP_ON_ERROR
5604#ifdef __powerpc64__
5605 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
5606#else
5607 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
5608#endif
5609 fp->tpa_queue_used);
5610#endif
5611 cnt = 1000;
5612 smp_rmb();
5613 while (bnx2x_has_work(fp)) {
228241eb 5614 msleep(1);
34f80b04
EG
5615 if (!cnt) {
5616 BNX2X_ERR("timeout waiting for queue[%d]\n",
5617 i);
5618#ifdef BNX2X_STOP_ON_ERROR
5619 bnx2x_panic();
5620 return -EBUSY;
5621#else
5622 break;
5623#endif
5624 }
5625 cnt--;
5626 smp_rmb();
5627 }
228241eb 5628 }
a2fbb9ea 5629
34f80b04
EG
5630 /* Wait until all slow path tasks complete */
5631 cnt = 1000;
5632 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
a2fbb9ea
ET
5633 msleep(1);
5634
228241eb
ET
5635 for_each_queue(bp, i)
5636 napi_disable(&bnx2x_fp(bp, i, napi));
5637 /* Disable interrupts after Tx and Rx are disabled on stack level */
5638 bnx2x_int_disable_sync(bp);
a2fbb9ea 5639
34f80b04
EG
5640 /* Release IRQs */
5641 bnx2x_free_irq(bp);
5642
a2fbb9ea
ET
5643 if (bp->flags & NO_WOL_FLAG)
5644 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
228241eb 5645
a2fbb9ea 5646 else if (bp->wol) {
34f80b04 5647 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 5648 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 5649 u32 val;
a2fbb9ea 5650
34f80b04
EG
5651 /* The mac address is written to entries 1-4 to
5652 preserve entry 0 which is used by the PMF */
a2fbb9ea 5653 val = (mac_addr[0] << 8) | mac_addr[1];
34f80b04 5654 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
a2fbb9ea
ET
5655
5656 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5657 (mac_addr[4] << 8) | mac_addr[5];
34f80b04
EG
5658 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
5659 val);
a2fbb9ea
ET
5660
5661 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 5662
a2fbb9ea
ET
5663 } else
5664 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5665
34f80b04
EG
5666 /* Close multi and leading connections
5667 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
5668 for_each_nondefault_queue(bp, i)
5669 if (bnx2x_stop_multi(bp, i))
228241eb 5670 goto unload_error;
a2fbb9ea 5671
34f80b04
EG
5672 if (CHIP_IS_E1H(bp))
5673 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
5674
5675 bnx2x_stop_leading(bp);
5676#ifdef BNX2X_STOP_ON_ERROR
5677 /* If ramrod completion timed out - break here! */
5678 if (bp->panic) {
5679 BNX2X_ERR("Stop leading failed!\n");
5680 return -EBUSY;
5681 }
5682#endif
5683
228241eb
ET
5684 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
5685 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
34f80b04
EG
5686 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
5687 "state 0x%x fp[0].state 0x%x\n",
228241eb
ET
5688 bp->state, bp->fp[0].state);
5689 }
5690
5691unload_error:
34f80b04 5692 if (!BP_NOMCP(bp))
228241eb 5693 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5694 else {
5695 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
5696 load_count[0], load_count[1], load_count[2]);
5697 load_count[0]--;
5698 load_count[1 + BP_PORT(bp)]--;
5699 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
5700 load_count[0], load_count[1], load_count[2]);
5701 if (load_count[0] == 0)
5702 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5703 else if (load_count[1 + BP_PORT(bp)] == 0)
5704 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5705 else
5706 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5707 }
a2fbb9ea 5708
34f80b04
EG
5709 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5710 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5711 bnx2x__link_reset(bp);
a2fbb9ea
ET
5712
5713 /* Reset the chip */
228241eb 5714 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
5715
5716 /* Report UNLOAD_DONE to MCP */
34f80b04 5717 if (!BP_NOMCP(bp))
a2fbb9ea
ET
5718 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5719
5720 /* Free SKBs and driver internals */
5721 bnx2x_free_skbs(bp);
5722 bnx2x_free_mem(bp);
5723
5724 bp->state = BNX2X_STATE_CLOSED;
228241eb 5725
a2fbb9ea
ET
5726 netif_carrier_off(bp->dev);
5727
5728 return 0;
5729}
5730
34f80b04
EG
5731static void bnx2x_reset_task(struct work_struct *work)
5732{
5733 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
5734
5735#ifdef BNX2X_STOP_ON_ERROR
5736 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5737 " so reset not done to allow debug dump,\n"
5738 KERN_ERR " you will need to reboot when done\n");
5739 return;
5740#endif
5741
5742 rtnl_lock();
5743
5744 if (!netif_running(bp->dev))
5745 goto reset_task_exit;
5746
5747 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5748 bnx2x_nic_load(bp, LOAD_NORMAL);
5749
5750reset_task_exit:
5751 rtnl_unlock();
5752}
5753
a2fbb9ea
ET
5754/* end of nic load/unload */
5755
5756/* ethtool_ops */
5757
5758/*
5759 * Init service functions
5760 */
5761
34f80b04
EG
5762static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5763{
5764 u32 val;
5765
5766 /* Check if there is any driver already loaded */
5767 val = REG_RD(bp, MISC_REG_UNPREPARED);
5768 if (val == 0x1) {
5769 /* Check if it is the UNDI driver
5770 * UNDI driver initializes CID offset for normal bell to 0x7
5771 */
5772 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5773 if (val == 0x7) {
5774 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5775 /* save our func and fw_seq */
5776 int func = BP_FUNC(bp);
5777 u16 fw_seq = bp->fw_seq;
5778
5779 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5780
5781 /* try unload UNDI on port 0 */
5782 bp->func = 0;
5783 bp->fw_seq = (SHMEM_RD(bp,
5784 func_mb[bp->func].drv_mb_header) &
5785 DRV_MSG_SEQ_NUMBER_MASK);
5786
5787 reset_code = bnx2x_fw_command(bp, reset_code);
5788 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5789
5790 /* if UNDI is loaded on the other port */
5791 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5792
5793 bp->func = 1;
5794 bp->fw_seq = (SHMEM_RD(bp,
5795 func_mb[bp->func].drv_mb_header) &
5796 DRV_MSG_SEQ_NUMBER_MASK);
5797
5798 bnx2x_fw_command(bp,
5799 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
5800 bnx2x_fw_command(bp,
5801 DRV_MSG_CODE_UNLOAD_DONE);
5802
5803 /* restore our func and fw_seq */
5804 bp->func = func;
5805 bp->fw_seq = fw_seq;
5806 }
5807
5808 /* reset device */
5809 REG_WR(bp,
5810 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5811 0xd3ffff7f);
5812 REG_WR(bp,
5813 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5814 0x1403);
5815 }
5816 }
5817}
5818
5819static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5820{
5821 u32 val, val2, val3, val4, id;
5822
5823 /* Get the chip revision id and number. */
5824 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5825 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5826 id = ((val & 0xffff) << 16);
5827 val = REG_RD(bp, MISC_REG_CHIP_REV);
5828 id |= ((val & 0xf) << 12);
5829 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5830 id |= ((val & 0xff) << 4);
5831 REG_RD(bp, MISC_REG_BOND_ID);
5832 id |= (val & 0xf);
5833 bp->common.chip_id = id;
5834 bp->link_params.chip_id = bp->common.chip_id;
5835 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5836
5837 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5838 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5839 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5840 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5841 bp->common.flash_size, bp->common.flash_size);
5842
5843 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5844 bp->link_params.shmem_base = bp->common.shmem_base;
5845 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
5846
5847 if (!bp->common.shmem_base ||
5848 (bp->common.shmem_base < 0xA0000) ||
5849 (bp->common.shmem_base >= 0xC0000)) {
5850 BNX2X_DEV_INFO("MCP not active\n");
5851 bp->flags |= NO_MCP_FLAG;
5852 return;
5853 }
5854
5855 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
5856 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5857 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5858 BNX2X_ERR("BAD MCP validity signature\n");
5859
5860 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
5861 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
5862
5863 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
5864 bp->common.hw_config, bp->common.board);
5865
5866 bp->link_params.hw_led_mode = ((bp->common.hw_config &
5867 SHARED_HW_CFG_LED_MODE_MASK) >>
5868 SHARED_HW_CFG_LED_MODE_SHIFT);
5869
5870 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
5871 bp->common.bc_ver = val;
5872 BNX2X_DEV_INFO("bc_ver %X\n", val);
5873 if (val < BNX2X_BC_VER) {
5874 /* for now only warn
5875 * later we might need to enforce this */
5876 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
5877 " please upgrade BC\n", BNX2X_BC_VER, val);
5878 }
5879 BNX2X_DEV_INFO("%sWoL Capable\n",
5880 (bp->flags & NO_WOL_FLAG)? "Not " : "");
5881
5882 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
5883 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
5884 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
5885 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
5886
5887 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
5888 val, val2, val3, val4);
5889}
5890
5891static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
5892 u32 switch_cfg)
a2fbb9ea 5893{
34f80b04 5894 int port = BP_PORT(bp);
a2fbb9ea
ET
5895 u32 ext_phy_type;
5896
a2fbb9ea
ET
5897 switch (switch_cfg) {
5898 case SWITCH_CFG_1G:
5899 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
5900
c18487ee
YR
5901 ext_phy_type =
5902 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
5903 switch (ext_phy_type) {
5904 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
5905 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
5906 ext_phy_type);
5907
34f80b04
EG
5908 bp->port.supported |= (SUPPORTED_10baseT_Half |
5909 SUPPORTED_10baseT_Full |
5910 SUPPORTED_100baseT_Half |
5911 SUPPORTED_100baseT_Full |
5912 SUPPORTED_1000baseT_Full |
5913 SUPPORTED_2500baseX_Full |
5914 SUPPORTED_TP |
5915 SUPPORTED_FIBRE |
5916 SUPPORTED_Autoneg |
5917 SUPPORTED_Pause |
5918 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
5919 break;
5920
5921 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
5922 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
5923 ext_phy_type);
5924
34f80b04
EG
5925 bp->port.supported |= (SUPPORTED_10baseT_Half |
5926 SUPPORTED_10baseT_Full |
5927 SUPPORTED_100baseT_Half |
5928 SUPPORTED_100baseT_Full |
5929 SUPPORTED_1000baseT_Full |
5930 SUPPORTED_TP |
5931 SUPPORTED_FIBRE |
5932 SUPPORTED_Autoneg |
5933 SUPPORTED_Pause |
5934 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
5935 break;
5936
5937 default:
5938 BNX2X_ERR("NVRAM config error. "
5939 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 5940 bp->link_params.ext_phy_config);
a2fbb9ea
ET
5941 return;
5942 }
5943
34f80b04
EG
5944 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
5945 port*0x10);
5946 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
5947 break;
5948
5949 case SWITCH_CFG_10G:
5950 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
5951
c18487ee
YR
5952 ext_phy_type =
5953 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
5954 switch (ext_phy_type) {
5955 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5956 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
5957 ext_phy_type);
5958
34f80b04
EG
5959 bp->port.supported |= (SUPPORTED_10baseT_Half |
5960 SUPPORTED_10baseT_Full |
5961 SUPPORTED_100baseT_Half |
5962 SUPPORTED_100baseT_Full |
5963 SUPPORTED_1000baseT_Full |
5964 SUPPORTED_2500baseX_Full |
5965 SUPPORTED_10000baseT_Full |
5966 SUPPORTED_TP |
5967 SUPPORTED_FIBRE |
5968 SUPPORTED_Autoneg |
5969 SUPPORTED_Pause |
5970 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
5971 break;
5972
5973 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 5974 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 5975 ext_phy_type);
f1410647 5976
34f80b04
EG
5977 bp->port.supported |= (SUPPORTED_10000baseT_Full |
5978 SUPPORTED_FIBRE |
5979 SUPPORTED_Pause |
5980 SUPPORTED_Asym_Pause);
f1410647
ET
5981 break;
5982
a2fbb9ea 5983 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
5984 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
5985 ext_phy_type);
5986
34f80b04
EG
5987 bp->port.supported |= (SUPPORTED_10000baseT_Full |
5988 SUPPORTED_1000baseT_Full |
5989 SUPPORTED_FIBRE |
5990 SUPPORTED_Pause |
5991 SUPPORTED_Asym_Pause);
f1410647
ET
5992 break;
5993
5994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5995 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
5996 ext_phy_type);
5997
34f80b04
EG
5998 bp->port.supported |= (SUPPORTED_10000baseT_Full |
5999 SUPPORTED_1000baseT_Full |
6000 SUPPORTED_FIBRE |
6001 SUPPORTED_Autoneg |
6002 SUPPORTED_Pause |
6003 SUPPORTED_Asym_Pause);
f1410647
ET
6004 break;
6005
c18487ee
YR
6006 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6007 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6008 ext_phy_type);
6009
34f80b04
EG
6010 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6011 SUPPORTED_2500baseX_Full |
6012 SUPPORTED_1000baseT_Full |
6013 SUPPORTED_FIBRE |
6014 SUPPORTED_Autoneg |
6015 SUPPORTED_Pause |
6016 SUPPORTED_Asym_Pause);
c18487ee
YR
6017 break;
6018
f1410647
ET
6019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6020 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6021 ext_phy_type);
6022
34f80b04
EG
6023 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6024 SUPPORTED_TP |
6025 SUPPORTED_Autoneg |
6026 SUPPORTED_Pause |
6027 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6028 break;
6029
c18487ee
YR
6030 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6031 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6032 bp->link_params.ext_phy_config);
6033 break;
6034
a2fbb9ea
ET
6035 default:
6036 BNX2X_ERR("NVRAM config error. "
6037 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 6038 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6039 return;
6040 }
6041
34f80b04
EG
6042 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6043 port*0x18);
6044 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 6045
a2fbb9ea
ET
6046 break;
6047
6048 default:
6049 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 6050 bp->port.link_config);
a2fbb9ea
ET
6051 return;
6052 }
34f80b04 6053 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
6054
6055 /* mask what we support according to speed_cap_mask */
c18487ee
YR
6056 if (!(bp->link_params.speed_cap_mask &
6057 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 6058 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 6059
c18487ee
YR
6060 if (!(bp->link_params.speed_cap_mask &
6061 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 6062 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 6063
c18487ee
YR
6064 if (!(bp->link_params.speed_cap_mask &
6065 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 6066 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 6067
c18487ee
YR
6068 if (!(bp->link_params.speed_cap_mask &
6069 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 6070 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 6071
c18487ee
YR
6072 if (!(bp->link_params.speed_cap_mask &
6073 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
6074 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6075 SUPPORTED_1000baseT_Full);
a2fbb9ea 6076
c18487ee
YR
6077 if (!(bp->link_params.speed_cap_mask &
6078 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 6079 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 6080
c18487ee
YR
6081 if (!(bp->link_params.speed_cap_mask &
6082 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 6083 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 6084
34f80b04 6085 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
6086}
6087
34f80b04 6088static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 6089{
c18487ee 6090 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 6091
34f80b04 6092 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 6093 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 6094 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 6095 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6096 bp->port.advertising = bp->port.supported;
a2fbb9ea 6097 } else {
c18487ee
YR
6098 u32 ext_phy_type =
6099 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6100
6101 if ((ext_phy_type ==
6102 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6103 (ext_phy_type ==
6104 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 6105 /* force 10G, no AN */
c18487ee 6106 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 6107 bp->port.advertising =
a2fbb9ea
ET
6108 (ADVERTISED_10000baseT_Full |
6109 ADVERTISED_FIBRE);
6110 break;
6111 }
6112 BNX2X_ERR("NVRAM config error. "
6113 "Invalid link_config 0x%x"
6114 " Autoneg not supported\n",
34f80b04 6115 bp->port.link_config);
a2fbb9ea
ET
6116 return;
6117 }
6118 break;
6119
6120 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 6121 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 6122 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
6123 bp->port.advertising = (ADVERTISED_10baseT_Full |
6124 ADVERTISED_TP);
a2fbb9ea
ET
6125 } else {
6126 BNX2X_ERR("NVRAM config error. "
6127 "Invalid link_config 0x%x"
6128 " speed_cap_mask 0x%x\n",
34f80b04 6129 bp->port.link_config,
c18487ee 6130 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6131 return;
6132 }
6133 break;
6134
6135 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 6136 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
6137 bp->link_params.req_line_speed = SPEED_10;
6138 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6139 bp->port.advertising = (ADVERTISED_10baseT_Half |
6140 ADVERTISED_TP);
a2fbb9ea
ET
6141 } else {
6142 BNX2X_ERR("NVRAM config error. "
6143 "Invalid link_config 0x%x"
6144 " speed_cap_mask 0x%x\n",
34f80b04 6145 bp->port.link_config,
c18487ee 6146 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6147 return;
6148 }
6149 break;
6150
6151 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 6152 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 6153 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
6154 bp->port.advertising = (ADVERTISED_100baseT_Full |
6155 ADVERTISED_TP);
a2fbb9ea
ET
6156 } else {
6157 BNX2X_ERR("NVRAM config error. "
6158 "Invalid link_config 0x%x"
6159 " speed_cap_mask 0x%x\n",
34f80b04 6160 bp->port.link_config,
c18487ee 6161 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6162 return;
6163 }
6164 break;
6165
6166 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 6167 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
6168 bp->link_params.req_line_speed = SPEED_100;
6169 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6170 bp->port.advertising = (ADVERTISED_100baseT_Half |
6171 ADVERTISED_TP);
a2fbb9ea
ET
6172 } else {
6173 BNX2X_ERR("NVRAM config error. "
6174 "Invalid link_config 0x%x"
6175 " speed_cap_mask 0x%x\n",
34f80b04 6176 bp->port.link_config,
c18487ee 6177 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6178 return;
6179 }
6180 break;
6181
6182 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 6183 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 6184 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
6185 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6186 ADVERTISED_TP);
a2fbb9ea
ET
6187 } else {
6188 BNX2X_ERR("NVRAM config error. "
6189 "Invalid link_config 0x%x"
6190 " speed_cap_mask 0x%x\n",
34f80b04 6191 bp->port.link_config,
c18487ee 6192 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6193 return;
6194 }
6195 break;
6196
6197 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 6198 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 6199 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
6200 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6201 ADVERTISED_TP);
a2fbb9ea
ET
6202 } else {
6203 BNX2X_ERR("NVRAM config error. "
6204 "Invalid link_config 0x%x"
6205 " speed_cap_mask 0x%x\n",
34f80b04 6206 bp->port.link_config,
c18487ee 6207 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6208 return;
6209 }
6210 break;
6211
6212 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6213 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6214 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 6215 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 6216 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
6217 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6218 ADVERTISED_FIBRE);
a2fbb9ea
ET
6219 } else {
6220 BNX2X_ERR("NVRAM config error. "
6221 "Invalid link_config 0x%x"
6222 " speed_cap_mask 0x%x\n",
34f80b04 6223 bp->port.link_config,
c18487ee 6224 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6225 return;
6226 }
6227 break;
6228
6229 default:
6230 BNX2X_ERR("NVRAM config error. "
6231 "BAD link speed link_config 0x%x\n",
34f80b04 6232 bp->port.link_config);
c18487ee 6233 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6234 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
6235 break;
6236 }
a2fbb9ea 6237
34f80b04
EG
6238 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6239 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 6240 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
34f80b04 6241 (!bp->port.supported & SUPPORTED_Autoneg))
c18487ee 6242 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 6243
c18487ee 6244 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 6245 " advertising 0x%x\n",
c18487ee
YR
6246 bp->link_params.req_line_speed,
6247 bp->link_params.req_duplex,
34f80b04 6248 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
6249}
6250
34f80b04 6251static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 6252{
34f80b04
EG
6253 int port = BP_PORT(bp);
6254 u32 val, val2;
a2fbb9ea 6255
c18487ee 6256 bp->link_params.bp = bp;
34f80b04 6257 bp->link_params.port = port;
c18487ee 6258
c18487ee 6259 bp->link_params.serdes_config =
f1410647 6260 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 6261 bp->link_params.lane_config =
a2fbb9ea 6262 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 6263 bp->link_params.ext_phy_config =
a2fbb9ea
ET
6264 SHMEM_RD(bp,
6265 dev_info.port_hw_config[port].external_phy_config);
c18487ee 6266 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
6267 SHMEM_RD(bp,
6268 dev_info.port_hw_config[port].speed_capability_mask);
6269
34f80b04 6270 bp->port.link_config =
a2fbb9ea
ET
6271 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6272
34f80b04
EG
6273 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
6274 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
6275 " link_config 0x%08x\n",
c18487ee
YR
6276 bp->link_params.serdes_config,
6277 bp->link_params.lane_config,
6278 bp->link_params.ext_phy_config,
34f80b04 6279 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 6280
34f80b04 6281 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
6282 PORT_FEATURE_CONNECTED_SWITCH_MASK);
6283 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
6284
6285 bnx2x_link_settings_requested(bp);
6286
6287 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6288 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6289 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6290 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6291 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6292 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6293 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6294 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
6295 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6296 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
6297}
6298
6299static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6300{
6301 int func = BP_FUNC(bp);
6302 u32 val, val2;
6303 int rc = 0;
a2fbb9ea 6304
34f80b04 6305 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 6306
34f80b04
EG
6307 bp->e1hov = 0;
6308 bp->e1hmf = 0;
6309 if (CHIP_IS_E1H(bp)) {
6310 bp->mf_config =
6311 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 6312
34f80b04
EG
6313 val =
6314 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
6315 FUNC_MF_CFG_E1HOV_TAG_MASK);
6316 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 6317
34f80b04
EG
6318 bp->e1hov = val;
6319 bp->e1hmf = 1;
6320 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
6321 "(0x%04x)\n",
6322 func, bp->e1hov, bp->e1hov);
6323 } else {
6324 BNX2X_DEV_INFO("Single function mode\n");
6325 if (BP_E1HVN(bp)) {
6326 BNX2X_ERR("!!! No valid E1HOV for func %d,"
6327 " aborting\n", func);
6328 rc = -EPERM;
6329 }
6330 }
6331 }
a2fbb9ea 6332
34f80b04
EG
6333 if (!BP_NOMCP(bp)) {
6334 bnx2x_get_port_hwinfo(bp);
6335
6336 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6337 DRV_MSG_SEQ_NUMBER_MASK);
6338 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6339 }
6340
6341 if (IS_E1HMF(bp)) {
6342 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6343 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6344 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6345 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6346 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6347 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6348 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6349 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6350 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6351 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6352 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6353 ETH_ALEN);
6354 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6355 ETH_ALEN);
a2fbb9ea 6356 }
34f80b04
EG
6357
6358 return rc;
a2fbb9ea
ET
6359 }
6360
34f80b04
EG
6361 if (BP_NOMCP(bp)) {
6362 /* only supposed to happen on emulation/FPGA */
6363 BNX2X_ERR("warning rendom MAC workaround active\n");
6364 random_ether_addr(bp->dev->dev_addr);
6365 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6366 }
a2fbb9ea 6367
34f80b04
EG
6368 return rc;
6369}
6370
6371static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6372{
6373 int func = BP_FUNC(bp);
6374 int rc;
6375
6376 if (nomcp)
6377 bp->flags |= NO_MCP_FLAG;
a2fbb9ea 6378
34f80b04 6379 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 6380
34f80b04
EG
6381 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
6382 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
6383
6384 rc = bnx2x_get_hwinfo(bp);
6385
6386 /* need to reset chip if undi was active */
6387 if (!BP_NOMCP(bp))
6388 bnx2x_undi_unload(bp);
6389
6390 if (CHIP_REV_IS_FPGA(bp))
6391 printk(KERN_ERR PFX "FPGA detected\n");
6392
6393 if (BP_NOMCP(bp) && (func == 0))
6394 printk(KERN_ERR PFX
6395 "MCP disabled, must load devices in order!\n");
6396
6397 bp->tx_ring_size = MAX_TX_AVAIL;
6398 bp->rx_ring_size = MAX_RX_AVAIL;
6399
6400 bp->rx_csum = 1;
6401 bp->rx_offset = 0;
6402
6403 bp->tx_ticks = 50;
6404 bp->rx_ticks = 25;
6405
6406 bp->stats_ticks = 1000000 & 0xffff00;
6407
6408 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6409 bp->current_interval = (poll ? poll : bp->timer_interval);
6410
6411 init_timer(&bp->timer);
6412 bp->timer.expires = jiffies + bp->current_interval;
6413 bp->timer.data = (unsigned long) bp;
6414 bp->timer.function = bnx2x_timer;
6415
6416 return rc;
a2fbb9ea
ET
6417}
6418
6419/*
6420 * ethtool service functions
6421 */
6422
6423/* All ethtool functions called with rtnl_lock */
6424
6425static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6426{
6427 struct bnx2x *bp = netdev_priv(dev);
6428
34f80b04
EG
6429 cmd->supported = bp->port.supported;
6430 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
6431
6432 if (netif_carrier_ok(dev)) {
c18487ee
YR
6433 cmd->speed = bp->link_vars.line_speed;
6434 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 6435 } else {
c18487ee
YR
6436 cmd->speed = bp->link_params.req_line_speed;
6437 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 6438 }
34f80b04
EG
6439 if (IS_E1HMF(bp)) {
6440 u16 vn_max_rate;
6441
6442 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
6443 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
6444 if (vn_max_rate < cmd->speed)
6445 cmd->speed = vn_max_rate;
6446 }
a2fbb9ea 6447
c18487ee
YR
6448 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
6449 u32 ext_phy_type =
6450 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
6451
6452 switch (ext_phy_type) {
6453 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6454 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6455 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 6457 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
6458 cmd->port = PORT_FIBRE;
6459 break;
6460
6461 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6462 cmd->port = PORT_TP;
6463 break;
6464
c18487ee
YR
6465 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6466 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6467 bp->link_params.ext_phy_config);
6468 break;
6469
f1410647
ET
6470 default:
6471 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
6472 bp->link_params.ext_phy_config);
6473 break;
f1410647
ET
6474 }
6475 } else
a2fbb9ea 6476 cmd->port = PORT_TP;
a2fbb9ea 6477
34f80b04 6478 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
6479 cmd->transceiver = XCVR_INTERNAL;
6480
c18487ee 6481 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 6482 cmd->autoneg = AUTONEG_ENABLE;
f1410647 6483 else
a2fbb9ea 6484 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
6485
6486 cmd->maxtxpkt = 0;
6487 cmd->maxrxpkt = 0;
6488
6489 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
6490 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
6491 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
6492 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
6493 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
6494 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
6495 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
6496
6497 return 0;
6498}
6499
6500static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6501{
6502 struct bnx2x *bp = netdev_priv(dev);
6503 u32 advertising;
6504
34f80b04
EG
6505 if (IS_E1HMF(bp))
6506 return 0;
6507
a2fbb9ea
ET
6508 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
6509 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
6510 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
6511 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
6512 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
6513 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
6514 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
6515
a2fbb9ea 6516 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
6517 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
6518 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 6519 return -EINVAL;
f1410647 6520 }
a2fbb9ea
ET
6521
6522 /* advertise the requested speed and duplex if supported */
34f80b04 6523 cmd->advertising &= bp->port.supported;
a2fbb9ea 6524
c18487ee
YR
6525 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6526 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
6527 bp->port.advertising |= (ADVERTISED_Autoneg |
6528 cmd->advertising);
a2fbb9ea
ET
6529
6530 } else { /* forced speed */
6531 /* advertise the requested speed and duplex if supported */
6532 switch (cmd->speed) {
6533 case SPEED_10:
6534 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 6535 if (!(bp->port.supported &
f1410647
ET
6536 SUPPORTED_10baseT_Full)) {
6537 DP(NETIF_MSG_LINK,
6538 "10M full not supported\n");
a2fbb9ea 6539 return -EINVAL;
f1410647 6540 }
a2fbb9ea
ET
6541
6542 advertising = (ADVERTISED_10baseT_Full |
6543 ADVERTISED_TP);
6544 } else {
34f80b04 6545 if (!(bp->port.supported &
f1410647
ET
6546 SUPPORTED_10baseT_Half)) {
6547 DP(NETIF_MSG_LINK,
6548 "10M half not supported\n");
a2fbb9ea 6549 return -EINVAL;
f1410647 6550 }
a2fbb9ea
ET
6551
6552 advertising = (ADVERTISED_10baseT_Half |
6553 ADVERTISED_TP);
6554 }
6555 break;
6556
6557 case SPEED_100:
6558 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 6559 if (!(bp->port.supported &
f1410647
ET
6560 SUPPORTED_100baseT_Full)) {
6561 DP(NETIF_MSG_LINK,
6562 "100M full not supported\n");
a2fbb9ea 6563 return -EINVAL;
f1410647 6564 }
a2fbb9ea
ET
6565
6566 advertising = (ADVERTISED_100baseT_Full |
6567 ADVERTISED_TP);
6568 } else {
34f80b04 6569 if (!(bp->port.supported &
f1410647
ET
6570 SUPPORTED_100baseT_Half)) {
6571 DP(NETIF_MSG_LINK,
6572 "100M half not supported\n");
a2fbb9ea 6573 return -EINVAL;
f1410647 6574 }
a2fbb9ea
ET
6575
6576 advertising = (ADVERTISED_100baseT_Half |
6577 ADVERTISED_TP);
6578 }
6579 break;
6580
6581 case SPEED_1000:
f1410647
ET
6582 if (cmd->duplex != DUPLEX_FULL) {
6583 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 6584 return -EINVAL;
f1410647 6585 }
a2fbb9ea 6586
34f80b04 6587 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 6588 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 6589 return -EINVAL;
f1410647 6590 }
a2fbb9ea
ET
6591
6592 advertising = (ADVERTISED_1000baseT_Full |
6593 ADVERTISED_TP);
6594 break;
6595
6596 case SPEED_2500:
f1410647
ET
6597 if (cmd->duplex != DUPLEX_FULL) {
6598 DP(NETIF_MSG_LINK,
6599 "2.5G half not supported\n");
a2fbb9ea 6600 return -EINVAL;
f1410647 6601 }
a2fbb9ea 6602
34f80b04 6603 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
6604 DP(NETIF_MSG_LINK,
6605 "2.5G full not supported\n");
a2fbb9ea 6606 return -EINVAL;
f1410647 6607 }
a2fbb9ea 6608
f1410647 6609 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
6610 ADVERTISED_TP);
6611 break;
6612
6613 case SPEED_10000:
f1410647
ET
6614 if (cmd->duplex != DUPLEX_FULL) {
6615 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 6616 return -EINVAL;
f1410647 6617 }
a2fbb9ea 6618
34f80b04 6619 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 6620 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 6621 return -EINVAL;
f1410647 6622 }
a2fbb9ea
ET
6623
6624 advertising = (ADVERTISED_10000baseT_Full |
6625 ADVERTISED_FIBRE);
6626 break;
6627
6628 default:
f1410647 6629 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
6630 return -EINVAL;
6631 }
6632
c18487ee
YR
6633 bp->link_params.req_line_speed = cmd->speed;
6634 bp->link_params.req_duplex = cmd->duplex;
34f80b04 6635 bp->port.advertising = advertising;
a2fbb9ea
ET
6636 }
6637
c18487ee 6638 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 6639 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 6640 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 6641 bp->port.advertising);
a2fbb9ea 6642
34f80b04
EG
6643 if (netif_running(dev)) {
6644 bnx2x_stop_stats(bp);
6645 bnx2x_link_set(bp);
6646 }
a2fbb9ea
ET
6647
6648 return 0;
6649}
6650
c18487ee
YR
6651#define PHY_FW_VER_LEN 10
6652
a2fbb9ea
ET
6653static void bnx2x_get_drvinfo(struct net_device *dev,
6654 struct ethtool_drvinfo *info)
6655{
6656 struct bnx2x *bp = netdev_priv(dev);
c18487ee 6657 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
6658
6659 strcpy(info->driver, DRV_MODULE_NAME);
6660 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
6661
6662 phy_fw_ver[0] = '\0';
34f80b04
EG
6663 if (bp->port.pmf) {
6664 bnx2x_phy_hw_lock(bp);
6665 bnx2x_get_ext_phy_fw_version(&bp->link_params,
6666 (bp->state != BNX2X_STATE_CLOSED),
6667 phy_fw_ver, PHY_FW_VER_LEN);
6668 bnx2x_phy_hw_unlock(bp);
6669 }
c18487ee
YR
6670
6671 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 6672 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 6673 BCM_5710_FW_REVISION_VERSION,
34f80b04 6674 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 6675 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
6676 strcpy(info->bus_info, pci_name(bp->pdev));
6677 info->n_stats = BNX2X_NUM_STATS;
6678 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 6679 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
6680 info->regdump_len = 0;
6681}
6682
6683static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6684{
6685 struct bnx2x *bp = netdev_priv(dev);
6686
6687 if (bp->flags & NO_WOL_FLAG) {
6688 wol->supported = 0;
6689 wol->wolopts = 0;
6690 } else {
6691 wol->supported = WAKE_MAGIC;
6692 if (bp->wol)
6693 wol->wolopts = WAKE_MAGIC;
6694 else
6695 wol->wolopts = 0;
6696 }
6697 memset(&wol->sopass, 0, sizeof(wol->sopass));
6698}
6699
6700static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6701{
6702 struct bnx2x *bp = netdev_priv(dev);
6703
6704 if (wol->wolopts & ~WAKE_MAGIC)
6705 return -EINVAL;
6706
6707 if (wol->wolopts & WAKE_MAGIC) {
6708 if (bp->flags & NO_WOL_FLAG)
6709 return -EINVAL;
6710
6711 bp->wol = 1;
34f80b04 6712 } else
a2fbb9ea 6713 bp->wol = 0;
34f80b04 6714
a2fbb9ea
ET
6715 return 0;
6716}
6717
6718static u32 bnx2x_get_msglevel(struct net_device *dev)
6719{
6720 struct bnx2x *bp = netdev_priv(dev);
6721
6722 return bp->msglevel;
6723}
6724
6725static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
6726{
6727 struct bnx2x *bp = netdev_priv(dev);
6728
6729 if (capable(CAP_NET_ADMIN))
6730 bp->msglevel = level;
6731}
6732
6733static int bnx2x_nway_reset(struct net_device *dev)
6734{
6735 struct bnx2x *bp = netdev_priv(dev);
6736
34f80b04
EG
6737 if (!bp->port.pmf)
6738 return 0;
a2fbb9ea 6739
34f80b04
EG
6740 if (netif_running(dev)) {
6741 bnx2x_stop_stats(bp);
6742 bnx2x_link_set(bp);
6743 }
a2fbb9ea
ET
6744
6745 return 0;
6746}
6747
6748static int bnx2x_get_eeprom_len(struct net_device *dev)
6749{
6750 struct bnx2x *bp = netdev_priv(dev);
6751
34f80b04 6752 return bp->common.flash_size;
a2fbb9ea
ET
6753}
6754
6755static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
6756{
34f80b04 6757 int port = BP_PORT(bp);
a2fbb9ea
ET
6758 int count, i;
6759 u32 val = 0;
6760
6761 /* adjust timeout for emulation/FPGA */
6762 count = NVRAM_TIMEOUT_COUNT;
6763 if (CHIP_REV_IS_SLOW(bp))
6764 count *= 100;
6765
6766 /* request access to nvram interface */
6767 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
6768 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
6769
6770 for (i = 0; i < count*10; i++) {
6771 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
6772 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
6773 break;
6774
6775 udelay(5);
6776 }
6777
6778 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 6779 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
6780 return -EBUSY;
6781 }
6782
6783 return 0;
6784}
6785
6786static int bnx2x_release_nvram_lock(struct bnx2x *bp)
6787{
34f80b04 6788 int port = BP_PORT(bp);
a2fbb9ea
ET
6789 int count, i;
6790 u32 val = 0;
6791
6792 /* adjust timeout for emulation/FPGA */
6793 count = NVRAM_TIMEOUT_COUNT;
6794 if (CHIP_REV_IS_SLOW(bp))
6795 count *= 100;
6796
6797 /* relinquish nvram interface */
6798 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
6799 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
6800
6801 for (i = 0; i < count*10; i++) {
6802 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
6803 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
6804 break;
6805
6806 udelay(5);
6807 }
6808
6809 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 6810 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
6811 return -EBUSY;
6812 }
6813
6814 return 0;
6815}
6816
6817static void bnx2x_enable_nvram_access(struct bnx2x *bp)
6818{
6819 u32 val;
6820
6821 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
6822
6823 /* enable both bits, even on read */
6824 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
6825 (val | MCPR_NVM_ACCESS_ENABLE_EN |
6826 MCPR_NVM_ACCESS_ENABLE_WR_EN));
6827}
6828
6829static void bnx2x_disable_nvram_access(struct bnx2x *bp)
6830{
6831 u32 val;
6832
6833 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
6834
6835 /* disable both bits, even after read */
6836 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
6837 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
6838 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
6839}
6840
6841static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
6842 u32 cmd_flags)
6843{
f1410647 6844 int count, i, rc;
a2fbb9ea
ET
6845 u32 val;
6846
6847 /* build the command word */
6848 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
6849
6850 /* need to clear DONE bit separately */
6851 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
6852
6853 /* address of the NVRAM to read from */
6854 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
6855 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
6856
6857 /* issue a read command */
6858 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
6859
6860 /* adjust timeout for emulation/FPGA */
6861 count = NVRAM_TIMEOUT_COUNT;
6862 if (CHIP_REV_IS_SLOW(bp))
6863 count *= 100;
6864
6865 /* wait for completion */
6866 *ret_val = 0;
6867 rc = -EBUSY;
6868 for (i = 0; i < count; i++) {
6869 udelay(5);
6870 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
6871
6872 if (val & MCPR_NVM_COMMAND_DONE) {
6873 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
6874 /* we read nvram data in cpu order
6875 * but ethtool sees it as an array of bytes
6876 * converting to big-endian will do the work */
6877 val = cpu_to_be32(val);
6878 *ret_val = val;
6879 rc = 0;
6880 break;
6881 }
6882 }
6883
6884 return rc;
6885}
6886
6887static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
6888 int buf_size)
6889{
6890 int rc;
6891 u32 cmd_flags;
6892 u32 val;
6893
6894 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 6895 DP(BNX2X_MSG_NVM,
c14423fe 6896 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
6897 offset, buf_size);
6898 return -EINVAL;
6899 }
6900
34f80b04
EG
6901 if (offset + buf_size > bp->common.flash_size) {
6902 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 6903 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 6904 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
6905 return -EINVAL;
6906 }
6907
6908 /* request access to nvram interface */
6909 rc = bnx2x_acquire_nvram_lock(bp);
6910 if (rc)
6911 return rc;
6912
6913 /* enable access to nvram interface */
6914 bnx2x_enable_nvram_access(bp);
6915
6916 /* read the first word(s) */
6917 cmd_flags = MCPR_NVM_COMMAND_FIRST;
6918 while ((buf_size > sizeof(u32)) && (rc == 0)) {
6919 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
6920 memcpy(ret_buf, &val, 4);
6921
6922 /* advance to the next dword */
6923 offset += sizeof(u32);
6924 ret_buf += sizeof(u32);
6925 buf_size -= sizeof(u32);
6926 cmd_flags = 0;
6927 }
6928
6929 if (rc == 0) {
6930 cmd_flags |= MCPR_NVM_COMMAND_LAST;
6931 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
6932 memcpy(ret_buf, &val, 4);
6933 }
6934
6935 /* disable access to nvram interface */
6936 bnx2x_disable_nvram_access(bp);
6937 bnx2x_release_nvram_lock(bp);
6938
6939 return rc;
6940}
6941
6942static int bnx2x_get_eeprom(struct net_device *dev,
6943 struct ethtool_eeprom *eeprom, u8 *eebuf)
6944{
6945 struct bnx2x *bp = netdev_priv(dev);
6946 int rc;
6947
34f80b04 6948 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
6949 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
6950 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
6951 eeprom->len, eeprom->len);
6952
6953 /* parameters already validated in ethtool_get_eeprom */
6954
6955 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6956
6957 return rc;
6958}
6959
6960static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
6961 u32 cmd_flags)
6962{
f1410647 6963 int count, i, rc;
a2fbb9ea
ET
6964
6965 /* build the command word */
6966 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
6967
6968 /* need to clear DONE bit separately */
6969 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
6970
6971 /* write the data */
6972 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
6973
6974 /* address of the NVRAM to write to */
6975 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
6976 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
6977
6978 /* issue the write command */
6979 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
6980
6981 /* adjust timeout for emulation/FPGA */
6982 count = NVRAM_TIMEOUT_COUNT;
6983 if (CHIP_REV_IS_SLOW(bp))
6984 count *= 100;
6985
6986 /* wait for completion */
6987 rc = -EBUSY;
6988 for (i = 0; i < count; i++) {
6989 udelay(5);
6990 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
6991 if (val & MCPR_NVM_COMMAND_DONE) {
6992 rc = 0;
6993 break;
6994 }
6995 }
6996
6997 return rc;
6998}
6999
f1410647 7000#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
7001
7002static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7003 int buf_size)
7004{
7005 int rc;
7006 u32 cmd_flags;
7007 u32 align_offset;
7008 u32 val;
7009
34f80b04
EG
7010 if (offset + buf_size > bp->common.flash_size) {
7011 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7012 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7013 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7014 return -EINVAL;
7015 }
7016
7017 /* request access to nvram interface */
7018 rc = bnx2x_acquire_nvram_lock(bp);
7019 if (rc)
7020 return rc;
7021
7022 /* enable access to nvram interface */
7023 bnx2x_enable_nvram_access(bp);
7024
7025 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
7026 align_offset = (offset & ~0x03);
7027 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
7028
7029 if (rc == 0) {
7030 val &= ~(0xff << BYTE_OFFSET(offset));
7031 val |= (*data_buf << BYTE_OFFSET(offset));
7032
7033 /* nvram data is returned as an array of bytes
7034 * convert it back to cpu order */
7035 val = be32_to_cpu(val);
7036
a2fbb9ea
ET
7037 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
7038 cmd_flags);
7039 }
7040
7041 /* disable access to nvram interface */
7042 bnx2x_disable_nvram_access(bp);
7043 bnx2x_release_nvram_lock(bp);
7044
7045 return rc;
7046}
7047
7048static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
7049 int buf_size)
7050{
7051 int rc;
7052 u32 cmd_flags;
7053 u32 val;
7054 u32 written_so_far;
7055
34f80b04 7056 if (buf_size == 1) /* ethtool */
a2fbb9ea 7057 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
7058
7059 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7060 DP(BNX2X_MSG_NVM,
c14423fe 7061 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7062 offset, buf_size);
7063 return -EINVAL;
7064 }
7065
34f80b04
EG
7066 if (offset + buf_size > bp->common.flash_size) {
7067 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7068 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7069 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7070 return -EINVAL;
7071 }
7072
7073 /* request access to nvram interface */
7074 rc = bnx2x_acquire_nvram_lock(bp);
7075 if (rc)
7076 return rc;
7077
7078 /* enable access to nvram interface */
7079 bnx2x_enable_nvram_access(bp);
7080
7081 written_so_far = 0;
7082 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7083 while ((written_so_far < buf_size) && (rc == 0)) {
7084 if (written_so_far == (buf_size - sizeof(u32)))
7085 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7086 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
7087 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7088 else if ((offset % NVRAM_PAGE_SIZE) == 0)
7089 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
7090
7091 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
7092
7093 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
7094
7095 /* advance to the next dword */
7096 offset += sizeof(u32);
7097 data_buf += sizeof(u32);
7098 written_so_far += sizeof(u32);
7099 cmd_flags = 0;
7100 }
7101
7102 /* disable access to nvram interface */
7103 bnx2x_disable_nvram_access(bp);
7104 bnx2x_release_nvram_lock(bp);
7105
7106 return rc;
7107}
7108
7109static int bnx2x_set_eeprom(struct net_device *dev,
7110 struct ethtool_eeprom *eeprom, u8 *eebuf)
7111{
7112 struct bnx2x *bp = netdev_priv(dev);
7113 int rc;
7114
34f80b04 7115 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
7116 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7117 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7118 eeprom->len, eeprom->len);
7119
7120 /* parameters already validated in ethtool_set_eeprom */
7121
c18487ee 7122 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
7123 if (eeprom->magic == 0x00504859)
7124 if (bp->port.pmf) {
7125
7126 bnx2x_phy_hw_lock(bp);
7127 rc = bnx2x_flash_download(bp, BP_PORT(bp),
7128 bp->link_params.ext_phy_config,
7129 (bp->state != BNX2X_STATE_CLOSED),
7130 eebuf, eeprom->len);
7131 rc |= bnx2x_link_reset(&bp->link_params,
7132 &bp->link_vars);
7133 rc |= bnx2x_phy_init(&bp->link_params,
7134 &bp->link_vars);
7135 bnx2x_phy_hw_unlock(bp);
7136
7137 } else /* Only the PMF can access the PHY */
7138 return -EINVAL;
7139 else
c18487ee 7140 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
7141
7142 return rc;
7143}
7144
7145static int bnx2x_get_coalesce(struct net_device *dev,
7146 struct ethtool_coalesce *coal)
7147{
7148 struct bnx2x *bp = netdev_priv(dev);
7149
7150 memset(coal, 0, sizeof(struct ethtool_coalesce));
7151
7152 coal->rx_coalesce_usecs = bp->rx_ticks;
7153 coal->tx_coalesce_usecs = bp->tx_ticks;
7154 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7155
7156 return 0;
7157}
7158
7159static int bnx2x_set_coalesce(struct net_device *dev,
7160 struct ethtool_coalesce *coal)
7161{
7162 struct bnx2x *bp = netdev_priv(dev);
7163
7164 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7165 if (bp->rx_ticks > 3000)
7166 bp->rx_ticks = 3000;
7167
7168 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7169 if (bp->tx_ticks > 0x3000)
7170 bp->tx_ticks = 0x3000;
7171
7172 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7173 if (bp->stats_ticks > 0xffff00)
7174 bp->stats_ticks = 0xffff00;
7175 bp->stats_ticks &= 0xffff00;
7176
34f80b04 7177 if (netif_running(dev))
a2fbb9ea
ET
7178 bnx2x_update_coalesce(bp);
7179
7180 return 0;
7181}
7182
7183static void bnx2x_get_ringparam(struct net_device *dev,
7184 struct ethtool_ringparam *ering)
7185{
7186 struct bnx2x *bp = netdev_priv(dev);
7187
7188 ering->rx_max_pending = MAX_RX_AVAIL;
7189 ering->rx_mini_max_pending = 0;
7190 ering->rx_jumbo_max_pending = 0;
7191
7192 ering->rx_pending = bp->rx_ring_size;
7193 ering->rx_mini_pending = 0;
7194 ering->rx_jumbo_pending = 0;
7195
7196 ering->tx_max_pending = MAX_TX_AVAIL;
7197 ering->tx_pending = bp->tx_ring_size;
7198}
7199
7200static int bnx2x_set_ringparam(struct net_device *dev,
7201 struct ethtool_ringparam *ering)
7202{
7203 struct bnx2x *bp = netdev_priv(dev);
34f80b04 7204 int rc = 0;
a2fbb9ea
ET
7205
7206 if ((ering->rx_pending > MAX_RX_AVAIL) ||
7207 (ering->tx_pending > MAX_TX_AVAIL) ||
7208 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
7209 return -EINVAL;
7210
7211 bp->rx_ring_size = ering->rx_pending;
7212 bp->tx_ring_size = ering->tx_pending;
7213
34f80b04
EG
7214 if (netif_running(dev)) {
7215 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7216 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
7217 }
7218
34f80b04 7219 return rc;
a2fbb9ea
ET
7220}
7221
7222static void bnx2x_get_pauseparam(struct net_device *dev,
7223 struct ethtool_pauseparam *epause)
7224{
7225 struct bnx2x *bp = netdev_priv(dev);
7226
c18487ee
YR
7227 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7228 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
7229
7230 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
7231 FLOW_CTRL_RX);
7232 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
7233 FLOW_CTRL_TX);
a2fbb9ea
ET
7234
7235 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
7236 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
7237 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7238}
7239
7240static int bnx2x_set_pauseparam(struct net_device *dev,
7241 struct ethtool_pauseparam *epause)
7242{
7243 struct bnx2x *bp = netdev_priv(dev);
7244
34f80b04
EG
7245 if (IS_E1HMF(bp))
7246 return 0;
7247
a2fbb9ea
ET
7248 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
7249 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
7250 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7251
c18487ee 7252 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 7253
f1410647 7254 if (epause->rx_pause)
c18487ee
YR
7255 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
7256
f1410647 7257 if (epause->tx_pause)
c18487ee
YR
7258 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
7259
7260 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
7261 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7262
c18487ee 7263 if (epause->autoneg) {
34f80b04 7264 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
7265 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7266 return -EINVAL;
7267 }
a2fbb9ea 7268
c18487ee
YR
7269 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7270 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
7271 }
a2fbb9ea 7272
c18487ee
YR
7273 DP(NETIF_MSG_LINK,
7274 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
7275
7276 if (netif_running(dev)) {
7277 bnx2x_stop_stats(bp);
7278 bnx2x_link_set(bp);
7279 }
a2fbb9ea
ET
7280
7281 return 0;
7282}
7283
7284static u32 bnx2x_get_rx_csum(struct net_device *dev)
7285{
7286 struct bnx2x *bp = netdev_priv(dev);
7287
7288 return bp->rx_csum;
7289}
7290
7291static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
7292{
7293 struct bnx2x *bp = netdev_priv(dev);
7294
7295 bp->rx_csum = data;
7296 return 0;
7297}
7298
7299static int bnx2x_set_tso(struct net_device *dev, u32 data)
7300{
7301 if (data)
7302 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7303 else
7304 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
7305 return 0;
7306}
7307
7308static struct {
7309 char string[ETH_GSTRING_LEN];
7310} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
7311 { "MC Errors (online)" }
7312};
7313
7314static int bnx2x_self_test_count(struct net_device *dev)
7315{
7316 return BNX2X_NUM_TESTS;
7317}
7318
7319static void bnx2x_self_test(struct net_device *dev,
7320 struct ethtool_test *etest, u64 *buf)
7321{
7322 struct bnx2x *bp = netdev_priv(dev);
7323 int stats_state;
7324
7325 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
7326
7327 if (bp->state != BNX2X_STATE_OPEN) {
7328 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
7329 return;
7330 }
7331
7332 stats_state = bp->stats_state;
7333 bnx2x_stop_stats(bp);
7334
7335 if (bnx2x_mc_assert(bp) != 0) {
7336 buf[0] = 1;
7337 etest->flags |= ETH_TEST_FL_FAILED;
7338 }
7339
7340#ifdef BNX2X_EXTRA_DEBUG
7341 bnx2x_panic_dump(bp);
7342#endif
7343 bp->stats_state = stats_state;
7344}
7345
7346static struct {
7347 char string[ETH_GSTRING_LEN];
7348} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
0e39e645
ET
7349 { "rx_bytes"},
7350 { "rx_error_bytes"},
7351 { "tx_bytes"},
7352 { "tx_error_bytes"},
7353 { "rx_ucast_packets"},
7354 { "rx_mcast_packets"},
7355 { "rx_bcast_packets"},
7356 { "tx_ucast_packets"},
7357 { "tx_mcast_packets"},
7358 { "tx_bcast_packets"},
7359 { "tx_mac_errors"}, /* 10 */
7360 { "tx_carrier_errors"},
7361 { "rx_crc_errors"},
7362 { "rx_align_errors"},
7363 { "tx_single_collisions"},
7364 { "tx_multi_collisions"},
7365 { "tx_deferred"},
7366 { "tx_excess_collisions"},
7367 { "tx_late_collisions"},
7368 { "tx_total_collisions"},
7369 { "rx_fragments"}, /* 20 */
7370 { "rx_jabbers"},
7371 { "rx_undersize_packets"},
7372 { "rx_oversize_packets"},
7373 { "rx_xon_frames"},
7374 { "rx_xoff_frames"},
7375 { "tx_xon_frames"},
7376 { "tx_xoff_frames"},
7377 { "rx_mac_ctrl_frames"},
7378 { "rx_filtered_packets"},
7379 { "rx_discards"}, /* 30 */
7380 { "brb_discard"},
7381 { "brb_truncate"},
7382 { "xxoverflow"}
a2fbb9ea
ET
7383};
7384
7385#define STATS_OFFSET32(offset_name) \
7386 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
7387
7388static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
0e39e645
ET
7389 STATS_OFFSET32(total_bytes_received_hi),
7390 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7391 STATS_OFFSET32(total_bytes_transmitted_hi),
7392 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7393 STATS_OFFSET32(total_unicast_packets_received_hi),
7394 STATS_OFFSET32(total_multicast_packets_received_hi),
7395 STATS_OFFSET32(total_broadcast_packets_received_hi),
7396 STATS_OFFSET32(total_unicast_packets_transmitted_hi),
7397 STATS_OFFSET32(total_multicast_packets_transmitted_hi),
7398 STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
7399 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
7400 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7401 STATS_OFFSET32(crc_receive_errors),
7402 STATS_OFFSET32(alignment_errors),
7403 STATS_OFFSET32(single_collision_transmit_frames),
7404 STATS_OFFSET32(multiple_collision_transmit_frames),
7405 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7406 STATS_OFFSET32(excessive_collision_frames),
7407 STATS_OFFSET32(late_collision_frames),
7408 STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
7409 STATS_OFFSET32(runt_packets_received), /* 20 */
7410 STATS_OFFSET32(jabber_packets_received),
7411 STATS_OFFSET32(error_runt_packets_received),
7412 STATS_OFFSET32(error_jabber_packets_received),
7413 STATS_OFFSET32(pause_xon_frames_received),
7414 STATS_OFFSET32(pause_xoff_frames_received),
7415 STATS_OFFSET32(pause_xon_frames_transmitted),
7416 STATS_OFFSET32(pause_xoff_frames_transmitted),
7417 STATS_OFFSET32(control_frames_received),
7418 STATS_OFFSET32(mac_filter_discard),
7419 STATS_OFFSET32(no_buff_discard), /* 30 */
7420 STATS_OFFSET32(brb_discard),
7421 STATS_OFFSET32(brb_truncate_discard),
7422 STATS_OFFSET32(xxoverflow_discard)
a2fbb9ea
ET
7423};
7424
7425static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
7426 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
7427 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
7428 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
0e39e645 7429 4, 4, 4, 4
a2fbb9ea
ET
7430};
7431
7432static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7433{
7434 switch (stringset) {
7435 case ETH_SS_STATS:
7436 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
7437 break;
7438
7439 case ETH_SS_TEST:
7440 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
7441 break;
7442 }
7443}
7444
7445static int bnx2x_get_stats_count(struct net_device *dev)
7446{
7447 return BNX2X_NUM_STATS;
7448}
7449
7450static void bnx2x_get_ethtool_stats(struct net_device *dev,
7451 struct ethtool_stats *stats, u64 *buf)
7452{
7453 struct bnx2x *bp = netdev_priv(dev);
7454 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
7455 int i;
7456
7457 for (i = 0; i < BNX2X_NUM_STATS; i++) {
7458 if (bnx2x_stats_len_arr[i] == 0) {
7459 /* skip this counter */
7460 buf[i] = 0;
7461 continue;
7462 }
7463 if (!hw_stats) {
7464 buf[i] = 0;
7465 continue;
7466 }
7467 if (bnx2x_stats_len_arr[i] == 4) {
7468 /* 4-byte counter */
7469 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
7470 continue;
7471 }
7472 /* 8-byte counter */
7473 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
7474 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
7475 }
7476}
7477
7478static int bnx2x_phys_id(struct net_device *dev, u32 data)
7479{
7480 struct bnx2x *bp = netdev_priv(dev);
34f80b04 7481 int port = BP_PORT(bp);
a2fbb9ea
ET
7482 int i;
7483
34f80b04
EG
7484 if (!netif_running(dev))
7485 return 0;
7486
7487 if (!bp->port.pmf)
7488 return 0;
7489
a2fbb9ea
ET
7490 if (data == 0)
7491 data = 2;
7492
7493 for (i = 0; i < (data * 2); i++) {
c18487ee 7494 if ((i % 2) == 0)
34f80b04 7495 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
7496 bp->link_params.hw_led_mode,
7497 bp->link_params.chip_id);
7498 else
34f80b04 7499 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
7500 bp->link_params.hw_led_mode,
7501 bp->link_params.chip_id);
7502
a2fbb9ea
ET
7503 msleep_interruptible(500);
7504 if (signal_pending(current))
7505 break;
7506 }
7507
c18487ee 7508 if (bp->link_vars.link_up)
34f80b04 7509 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
7510 bp->link_vars.line_speed,
7511 bp->link_params.hw_led_mode,
7512 bp->link_params.chip_id);
a2fbb9ea
ET
7513
7514 return 0;
7515}
7516
7517static struct ethtool_ops bnx2x_ethtool_ops = {
7518 .get_settings = bnx2x_get_settings,
7519 .set_settings = bnx2x_set_settings,
7520 .get_drvinfo = bnx2x_get_drvinfo,
7521 .get_wol = bnx2x_get_wol,
7522 .set_wol = bnx2x_set_wol,
7523 .get_msglevel = bnx2x_get_msglevel,
7524 .set_msglevel = bnx2x_set_msglevel,
7525 .nway_reset = bnx2x_nway_reset,
7526 .get_link = ethtool_op_get_link,
7527 .get_eeprom_len = bnx2x_get_eeprom_len,
7528 .get_eeprom = bnx2x_get_eeprom,
7529 .set_eeprom = bnx2x_set_eeprom,
7530 .get_coalesce = bnx2x_get_coalesce,
7531 .set_coalesce = bnx2x_set_coalesce,
7532 .get_ringparam = bnx2x_get_ringparam,
7533 .set_ringparam = bnx2x_set_ringparam,
7534 .get_pauseparam = bnx2x_get_pauseparam,
7535 .set_pauseparam = bnx2x_set_pauseparam,
7536 .get_rx_csum = bnx2x_get_rx_csum,
7537 .set_rx_csum = bnx2x_set_rx_csum,
7538 .get_tx_csum = ethtool_op_get_tx_csum,
7539 .set_tx_csum = ethtool_op_set_tx_csum,
7540 .get_sg = ethtool_op_get_sg,
7541 .set_sg = ethtool_op_set_sg,
7542 .get_tso = ethtool_op_get_tso,
7543 .set_tso = bnx2x_set_tso,
7544 .self_test_count = bnx2x_self_test_count,
7545 .self_test = bnx2x_self_test,
7546 .get_strings = bnx2x_get_strings,
7547 .phys_id = bnx2x_phys_id,
7548 .get_stats_count = bnx2x_get_stats_count,
7549 .get_ethtool_stats = bnx2x_get_ethtool_stats
7550};
7551
7552/* end of ethtool_ops */
7553
7554/****************************************************************************
7555* General service functions
7556****************************************************************************/
7557
7558static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
7559{
7560 u16 pmcsr;
7561
7562 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
7563
7564 switch (state) {
7565 case PCI_D0:
34f80b04 7566 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
7567 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
7568 PCI_PM_CTRL_PME_STATUS));
7569
7570 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
7571 /* delay required during transition out of D3hot */
7572 msleep(20);
34f80b04 7573 break;
a2fbb9ea 7574
34f80b04
EG
7575 case PCI_D3hot:
7576 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
7577 pmcsr |= 3;
a2fbb9ea 7578
34f80b04
EG
7579 if (bp->wol)
7580 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 7581
34f80b04
EG
7582 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
7583 pmcsr);
a2fbb9ea 7584
34f80b04
EG
7585 /* No more memory access after this point until
7586 * device is brought back to D0.
7587 */
7588 break;
7589
7590 default:
7591 return -EINVAL;
7592 }
7593 return 0;
a2fbb9ea
ET
7594}
7595
34f80b04
EG
7596/*
7597 * net_device service functions
7598 */
7599
a2fbb9ea
ET
7600static int bnx2x_poll(struct napi_struct *napi, int budget)
7601{
7602 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
7603 napi);
7604 struct bnx2x *bp = fp->bp;
7605 int work_done = 0;
7606
7607#ifdef BNX2X_STOP_ON_ERROR
7608 if (unlikely(bp->panic))
34f80b04 7609 goto poll_panic;
a2fbb9ea
ET
7610#endif
7611
7612 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
7613 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
7614 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
7615
7616 bnx2x_update_fpsb_idx(fp);
7617
34f80b04
EG
7618 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
7619 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
7620 bnx2x_tx_int(fp, budget);
7621
a2fbb9ea
ET
7622 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
7623 work_done = bnx2x_rx_int(fp, budget);
7624
a2fbb9ea
ET
7625 rmb(); /* bnx2x_has_work() reads the status block */
7626
7627 /* must not complete if we consumed full budget */
7628 if ((work_done < budget) && !bnx2x_has_work(fp)) {
7629
7630#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7631poll_panic:
a2fbb9ea
ET
7632#endif
7633 netif_rx_complete(bp->dev, napi);
7634
34f80b04 7635 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 7636 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 7637 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
7638 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
7639 }
a2fbb9ea
ET
7640 return work_done;
7641}
7642
7643/* Called with netif_tx_lock.
7644 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
7645 * netif_wake_queue().
7646 */
7647static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
7648{
7649 struct bnx2x *bp = netdev_priv(dev);
7650 struct bnx2x_fastpath *fp;
7651 struct sw_tx_bd *tx_buf;
7652 struct eth_tx_bd *tx_bd;
7653 struct eth_tx_parse_bd *pbd = NULL;
7654 u16 pkt_prod, bd_prod;
7655 int nbd, fp_index = 0;
7656 dma_addr_t mapping;
7657
7658#ifdef BNX2X_STOP_ON_ERROR
7659 if (unlikely(bp->panic))
7660 return NETDEV_TX_BUSY;
7661#endif
7662
7663 fp_index = smp_processor_id() % (bp->num_queues);
7664
7665 fp = &bp->fp[fp_index];
7666 if (unlikely(bnx2x_tx_avail(bp->fp) <
7667 (skb_shinfo(skb)->nr_frags + 3))) {
7668 bp->slowpath->eth_stats.driver_xoff++,
7669 netif_stop_queue(dev);
7670 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
7671 return NETDEV_TX_BUSY;
7672 }
7673
7674 /*
7675 This is a bit ugly. First we use one BD which we mark as start,
7676 then for TSO or xsum we have a parsing info BD,
7677 and only then we have the rest of the TSO bds.
7678 (don't forget to mark the last one as last,
7679 and to unmap only AFTER you write to the BD ...)
7680 I would like to thank DovH for this mess.
7681 */
7682
7683 pkt_prod = fp->tx_pkt_prod++;
7684 bd_prod = fp->tx_bd_prod;
7685 bd_prod = TX_BD(bd_prod);
7686
7687 /* get a tx_buff and first bd */
7688 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
7689 tx_bd = &fp->tx_desc_ring[bd_prod];
7690
7691 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
7692 tx_bd->general_data = (UNICAST_ADDRESS <<
7693 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
7694 tx_bd->general_data |= 1; /* header nbd */
7695
c14423fe 7696 /* remember the first bd of the packet */
a2fbb9ea
ET
7697 tx_buf->first_bd = bd_prod;
7698
7699 DP(NETIF_MSG_TX_QUEUED,
7700 "sending pkt %u @%p next_idx %u bd %u @%p\n",
7701 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
7702
7703 if (skb->ip_summed == CHECKSUM_PARTIAL) {
7704 struct iphdr *iph = ip_hdr(skb);
7705 u8 len;
7706
7707 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
7708
7709 /* turn on parsing and get a bd */
7710 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
7711 pbd = (void *)&fp->tx_desc_ring[bd_prod];
7712 len = ((u8 *)iph - (u8 *)skb->data) / 2;
7713
7714 /* for now NS flag is not used in Linux */
7715 pbd->global_data = (len |
96fc1784 7716 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea
ET
7717 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
7718 pbd->ip_hlen = ip_hdrlen(skb) / 2;
7719 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
7720 if (iph->protocol == IPPROTO_TCP) {
7721 struct tcphdr *th = tcp_hdr(skb);
7722
7723 tx_bd->bd_flags.as_bitfield |=
7724 ETH_TX_BD_FLAGS_TCP_CSUM;
96fc1784 7725 pbd->tcp_flags = pbd_tcp_flags(skb);
a2fbb9ea
ET
7726 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
7727 pbd->tcp_pseudo_csum = swab16(th->check);
7728
7729 } else if (iph->protocol == IPPROTO_UDP) {
7730 struct udphdr *uh = udp_hdr(skb);
7731
7732 tx_bd->bd_flags.as_bitfield |=
7733 ETH_TX_BD_FLAGS_TCP_CSUM;
7734 pbd->total_hlen += cpu_to_le16(4);
7735 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
7736 pbd->cs_offset = 5; /* 10 >> 1 */
7737 pbd->tcp_pseudo_csum = 0;
7738 /* HW bug: we need to subtract 10 bytes before the
7739 * UDP header from the csum
7740 */
7741 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
7742 csum_partial(((u8 *)(uh)-10), 10, 0)));
7743 }
7744 }
7745
7746 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
7747 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
7748 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
7749 } else {
7750 tx_bd->vlan = cpu_to_le16(pkt_prod);
7751 }
7752
7753 mapping = pci_map_single(bp->pdev, skb->data,
7754 skb->len, PCI_DMA_TODEVICE);
7755
7756 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
7757 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
7758 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
7759 tx_bd->nbd = cpu_to_le16(nbd);
7760 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
7761
7762 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
7763 " nbytes %d flags %x vlan %u\n",
7764 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
7765 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
7766
7767 if (skb_shinfo(skb)->gso_size &&
7768 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
96fc1784 7769 int hlen = 2 * le16_to_cpu(pbd->total_hlen);
a2fbb9ea
ET
7770
7771 DP(NETIF_MSG_TX_QUEUED,
7772 "TSO packet len %d hlen %d total len %d tso size %d\n",
7773 skb->len, hlen, skb_headlen(skb),
7774 skb_shinfo(skb)->gso_size);
7775
7776 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
7777
7778 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
7779 /* we split the first bd into headers and data bds
7780 * to ease the pain of our fellow micocode engineers
7781 * we use one mapping for both bds
7782 * So far this has only been observed to happen
7783 * in Other Operating Systems(TM)
7784 */
7785
7786 /* first fix first bd */
7787 nbd++;
7788 tx_bd->nbd = cpu_to_le16(nbd);
7789 tx_bd->nbytes = cpu_to_le16(hlen);
7790
7791 /* we only print this as an error
7792 * because we don't think this will ever happen.
7793 */
7794 BNX2X_ERR("TSO split header size is %d (%x:%x)"
7795 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
7796 tx_bd->addr_lo, tx_bd->nbd);
7797
7798 /* now get a new data bd
7799 * (after the pbd) and fill it */
7800 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
7801 tx_bd = &fp->tx_desc_ring[bd_prod];
7802
7803 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
7804 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
7805 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
7806 tx_bd->vlan = cpu_to_le16(pkt_prod);
7807 /* this marks the bd
7808 * as one that has no individual mapping
c14423fe 7809 * the FW ignores this flag in a bd not marked start
a2fbb9ea
ET
7810 */
7811 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
7812 DP(NETIF_MSG_TX_QUEUED,
7813 "TSO split data size is %d (%x:%x)\n",
7814 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
7815 }
7816
7817 if (!pbd) {
7818 /* supposed to be unreached
7819 * (and therefore not handled properly...)
7820 */
7821 BNX2X_ERR("LSO with no PBD\n");
7822 BUG();
7823 }
7824
7825 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
7826 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
7827 pbd->ip_id = swab16(ip_hdr(skb)->id);
7828 pbd->tcp_pseudo_csum =
7829 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
7830 ip_hdr(skb)->daddr,
7831 0, IPPROTO_TCP, 0));
7832 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
7833 }
7834
7835 {
7836 int i;
7837
7838 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
7839 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7840
7841 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
7842 tx_bd = &fp->tx_desc_ring[bd_prod];
7843
7844 mapping = pci_map_page(bp->pdev, frag->page,
7845 frag->page_offset,
7846 frag->size, PCI_DMA_TODEVICE);
7847
7848 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
7849 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
7850 tx_bd->nbytes = cpu_to_le16(frag->size);
7851 tx_bd->vlan = cpu_to_le16(pkt_prod);
7852 tx_bd->bd_flags.as_bitfield = 0;
7853 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
7854 " addr (%x:%x) nbytes %d flags %x\n",
7855 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
7856 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
7857 } /* for */
7858 }
7859
7860 /* now at last mark the bd as the last bd */
7861 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
7862
7863 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
7864 tx_bd, tx_bd->bd_flags.as_bitfield);
7865
7866 tx_buf->skb = skb;
7867
7868 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
7869
7870 /* now send a tx doorbell, counting the next bd
7871 * if the packet contains or ends with it
7872 */
7873 if (TX_BD_POFF(bd_prod) < nbd)
7874 nbd++;
7875
7876 if (pbd)
7877 DP(NETIF_MSG_TX_QUEUED,
7878 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
7879 " tcp_flags %x xsum %x seq %u hlen %u\n",
7880 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
7881 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
7882 pbd->tcp_send_seq, pbd->total_hlen);
7883
7884 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
7885
96fc1784
ET
7886 fp->hw_tx_prods->bds_prod =
7887 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 7888 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
7889 fp->hw_tx_prods->packets_prod =
7890 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
a2fbb9ea
ET
7891 DOORBELL(bp, fp_index, 0);
7892
7893 mmiowb();
7894
7895 fp->tx_bd_prod = bd_prod;
7896 dev->trans_start = jiffies;
7897
7898 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
7899 netif_stop_queue(dev);
7900 bp->slowpath->eth_stats.driver_xoff++;
7901 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
7902 netif_wake_queue(dev);
7903 }
7904 fp->tx_pkt++;
7905
7906 return NETDEV_TX_OK;
7907}
7908
a2fbb9ea
ET
7909/* Called with rtnl_lock */
7910static int bnx2x_open(struct net_device *dev)
7911{
7912 struct bnx2x *bp = netdev_priv(dev);
7913
7914 bnx2x_set_power_state(bp, PCI_D0);
7915
7916 return bnx2x_nic_load(bp, 1);
7917}
7918
7919/* Called with rtnl_lock */
7920static int bnx2x_close(struct net_device *dev)
7921{
a2fbb9ea
ET
7922 struct bnx2x *bp = netdev_priv(dev);
7923
7924 /* Unload the driver, release IRQs */
228241eb
ET
7925 bnx2x_nic_unload(bp, 1);
7926
7927 if (!CHIP_REV_IS_SLOW(bp))
7928 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
7929
7930 return 0;
7931}
7932
34f80b04
EG
7933/* called with netif_tx_lock from set_multicast */
7934static void bnx2x_set_rx_mode(struct net_device *dev)
7935{
7936 struct bnx2x *bp = netdev_priv(dev);
7937 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
7938 int port = BP_PORT(bp);
7939
7940 if (bp->state != BNX2X_STATE_OPEN) {
7941 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
7942 return;
7943 }
7944
7945 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
7946
7947 if (dev->flags & IFF_PROMISC)
7948 rx_mode = BNX2X_RX_MODE_PROMISC;
7949
7950 else if ((dev->flags & IFF_ALLMULTI) ||
7951 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
7952 rx_mode = BNX2X_RX_MODE_ALLMULTI;
7953
7954 else { /* some multicasts */
7955 if (CHIP_IS_E1(bp)) {
7956 int i, old, offset;
7957 struct dev_mc_list *mclist;
7958 struct mac_configuration_cmd *config =
7959 bnx2x_sp(bp, mcast_config);
7960
7961 for (i = 0, mclist = dev->mc_list;
7962 mclist && (i < dev->mc_count);
7963 i++, mclist = mclist->next) {
7964
7965 config->config_table[i].
7966 cam_entry.msb_mac_addr =
7967 swab16(*(u16 *)&mclist->dmi_addr[0]);
7968 config->config_table[i].
7969 cam_entry.middle_mac_addr =
7970 swab16(*(u16 *)&mclist->dmi_addr[2]);
7971 config->config_table[i].
7972 cam_entry.lsb_mac_addr =
7973 swab16(*(u16 *)&mclist->dmi_addr[4]);
7974 config->config_table[i].cam_entry.flags =
7975 cpu_to_le16(port);
7976 config->config_table[i].
7977 target_table_entry.flags = 0;
7978 config->config_table[i].
7979 target_table_entry.client_id = 0;
7980 config->config_table[i].
7981 target_table_entry.vlan_id = 0;
7982
7983 DP(NETIF_MSG_IFUP,
7984 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
7985 config->config_table[i].
7986 cam_entry.msb_mac_addr,
7987 config->config_table[i].
7988 cam_entry.middle_mac_addr,
7989 config->config_table[i].
7990 cam_entry.lsb_mac_addr);
7991 }
7992 old = config->hdr.length_6b;
7993 if (old > i) {
7994 for (; i < old; i++) {
7995 if (CAM_IS_INVALID(config->
7996 config_table[i])) {
7997 i--; /* already invalidated */
7998 break;
7999 }
8000 /* invalidate */
8001 CAM_INVALIDATE(config->
8002 config_table[i]);
8003 }
8004 }
8005
8006 if (CHIP_REV_IS_SLOW(bp))
8007 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8008 else
8009 offset = BNX2X_MAX_MULTICAST*(1 + port);
8010
8011 config->hdr.length_6b = i;
8012 config->hdr.offset = offset;
8013 config->hdr.client_id = BP_CL_ID(bp);
8014 config->hdr.reserved1 = 0;
8015
8016 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8017 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8018 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
8019 0);
8020 } else { /* E1H */
8021 /* Accept one or more multicasts */
8022 struct dev_mc_list *mclist;
8023 u32 mc_filter[MC_HASH_SIZE];
8024 u32 crc, bit, regidx;
8025 int i;
8026
8027 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8028
8029 for (i = 0, mclist = dev->mc_list;
8030 mclist && (i < dev->mc_count);
8031 i++, mclist = mclist->next) {
8032
8033 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
8034 "%02x:%02x:%02x:%02x:%02x:%02x\n",
8035 mclist->dmi_addr[0], mclist->dmi_addr[1],
8036 mclist->dmi_addr[2], mclist->dmi_addr[3],
8037 mclist->dmi_addr[4], mclist->dmi_addr[5]);
8038
8039 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
8040 bit = (crc >> 24) & 0xff;
8041 regidx = bit >> 5;
8042 bit &= 0x1f;
8043 mc_filter[regidx] |= (1 << bit);
8044 }
8045
8046 for (i = 0; i < MC_HASH_SIZE; i++)
8047 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8048 mc_filter[i]);
8049 }
8050 }
8051
8052 bp->rx_mode = rx_mode;
8053 bnx2x_set_storm_rx_mode(bp);
8054}
8055
8056/* called with rtnl_lock */
a2fbb9ea
ET
8057static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
8058{
8059 struct sockaddr *addr = p;
8060 struct bnx2x *bp = netdev_priv(dev);
8061
34f80b04 8062 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
8063 return -EINVAL;
8064
8065 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
8066 if (netif_running(dev)) {
8067 if (CHIP_IS_E1(bp))
8068 bnx2x_set_mac_addr_e1(bp);
8069 else
8070 bnx2x_set_mac_addr_e1h(bp);
8071 }
a2fbb9ea
ET
8072
8073 return 0;
8074}
8075
c18487ee 8076/* called with rtnl_lock */
a2fbb9ea
ET
8077static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8078{
8079 struct mii_ioctl_data *data = if_mii(ifr);
8080 struct bnx2x *bp = netdev_priv(dev);
8081 int err;
8082
8083 switch (cmd) {
8084 case SIOCGMIIPHY:
34f80b04 8085 data->phy_id = bp->port.phy_addr;
a2fbb9ea 8086
c14423fe 8087 /* fallthrough */
c18487ee 8088
a2fbb9ea 8089 case SIOCGMIIREG: {
c18487ee 8090 u16 mii_regval;
a2fbb9ea 8091
c18487ee
YR
8092 if (!netif_running(dev))
8093 return -EAGAIN;
a2fbb9ea 8094
34f80b04
EG
8095 mutex_lock(&bp->port.phy_mutex);
8096 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
8097 DEFAULT_PHY_DEV_ADDR,
8098 (data->reg_num & 0x1f), &mii_regval);
8099 data->val_out = mii_regval;
34f80b04 8100 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
8101 return err;
8102 }
8103
8104 case SIOCSMIIREG:
8105 if (!capable(CAP_NET_ADMIN))
8106 return -EPERM;
8107
c18487ee
YR
8108 if (!netif_running(dev))
8109 return -EAGAIN;
8110
34f80b04
EG
8111 mutex_lock(&bp->port.phy_mutex);
8112 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
8113 DEFAULT_PHY_DEV_ADDR,
8114 (data->reg_num & 0x1f), data->val_in);
34f80b04 8115 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
8116 return err;
8117
8118 default:
8119 /* do nothing */
8120 break;
8121 }
8122
8123 return -EOPNOTSUPP;
8124}
8125
34f80b04 8126/* called with rtnl_lock */
a2fbb9ea
ET
8127static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
8128{
8129 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8130 int rc = 0;
a2fbb9ea
ET
8131
8132 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
8133 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
8134 return -EINVAL;
8135
8136 /* This does not race with packet allocation
c14423fe 8137 * because the actual alloc size is
a2fbb9ea
ET
8138 * only updated as part of load
8139 */
8140 dev->mtu = new_mtu;
8141
8142 if (netif_running(dev)) {
34f80b04
EG
8143 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8144 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 8145 }
34f80b04
EG
8146
8147 return rc;
a2fbb9ea
ET
8148}
8149
8150static void bnx2x_tx_timeout(struct net_device *dev)
8151{
8152 struct bnx2x *bp = netdev_priv(dev);
8153
8154#ifdef BNX2X_STOP_ON_ERROR
8155 if (!bp->panic)
8156 bnx2x_panic();
8157#endif
8158 /* This allows the netif to be shutdown gracefully before resetting */
8159 schedule_work(&bp->reset_task);
8160}
8161
8162#ifdef BCM_VLAN
34f80b04 8163/* called with rtnl_lock */
a2fbb9ea
ET
8164static void bnx2x_vlan_rx_register(struct net_device *dev,
8165 struct vlan_group *vlgrp)
8166{
8167 struct bnx2x *bp = netdev_priv(dev);
8168
8169 bp->vlgrp = vlgrp;
8170 if (netif_running(dev))
49d66772 8171 bnx2x_set_client_config(bp);
a2fbb9ea 8172}
34f80b04 8173
a2fbb9ea
ET
8174#endif
8175
8176#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8177static void poll_bnx2x(struct net_device *dev)
8178{
8179 struct bnx2x *bp = netdev_priv(dev);
8180
8181 disable_irq(bp->pdev->irq);
8182 bnx2x_interrupt(bp->pdev->irq, dev);
8183 enable_irq(bp->pdev->irq);
8184}
8185#endif
8186
34f80b04
EG
8187static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8188 struct net_device *dev)
a2fbb9ea
ET
8189{
8190 struct bnx2x *bp;
8191 int rc;
8192
8193 SET_NETDEV_DEV(dev, &pdev->dev);
8194 bp = netdev_priv(dev);
8195
34f80b04
EG
8196 bp->dev = dev;
8197 bp->pdev = pdev;
a2fbb9ea 8198 bp->flags = 0;
34f80b04 8199 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
8200
8201 rc = pci_enable_device(pdev);
8202 if (rc) {
8203 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
8204 goto err_out;
8205 }
8206
8207 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8208 printk(KERN_ERR PFX "Cannot find PCI device base address,"
8209 " aborting\n");
8210 rc = -ENODEV;
8211 goto err_out_disable;
8212 }
8213
8214 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8215 printk(KERN_ERR PFX "Cannot find second PCI device"
8216 " base address, aborting\n");
8217 rc = -ENODEV;
8218 goto err_out_disable;
8219 }
8220
34f80b04
EG
8221 if (atomic_read(&pdev->enable_cnt) == 1) {
8222 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8223 if (rc) {
8224 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
8225 " aborting\n");
8226 goto err_out_disable;
8227 }
a2fbb9ea 8228
34f80b04
EG
8229 pci_set_master(pdev);
8230 pci_save_state(pdev);
8231 }
a2fbb9ea
ET
8232
8233 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8234 if (bp->pm_cap == 0) {
8235 printk(KERN_ERR PFX "Cannot find power management"
8236 " capability, aborting\n");
8237 rc = -EIO;
8238 goto err_out_release;
8239 }
8240
8241 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8242 if (bp->pcie_cap == 0) {
8243 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
8244 " aborting\n");
8245 rc = -EIO;
8246 goto err_out_release;
8247 }
8248
8249 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
8250 bp->flags |= USING_DAC_FLAG;
8251 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
8252 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
8253 " failed, aborting\n");
8254 rc = -EIO;
8255 goto err_out_release;
8256 }
8257
8258 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
8259 printk(KERN_ERR PFX "System does not support DMA,"
8260 " aborting\n");
8261 rc = -EIO;
8262 goto err_out_release;
8263 }
8264
34f80b04
EG
8265 dev->mem_start = pci_resource_start(pdev, 0);
8266 dev->base_addr = dev->mem_start;
8267 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
8268
8269 dev->irq = pdev->irq;
8270
8271 bp->regview = ioremap_nocache(dev->base_addr,
8272 pci_resource_len(pdev, 0));
8273 if (!bp->regview) {
8274 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
8275 rc = -ENOMEM;
8276 goto err_out_release;
8277 }
8278
34f80b04
EG
8279 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
8280 min_t(u64, BNX2X_DB_SIZE,
8281 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
8282 if (!bp->doorbells) {
8283 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
8284 rc = -ENOMEM;
8285 goto err_out_unmap;
8286 }
8287
8288 bnx2x_set_power_state(bp, PCI_D0);
8289
34f80b04
EG
8290 /* clean indirect addresses */
8291 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8292 PCICFG_VENDOR_ID_OFFSET);
8293 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8294 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8295 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8296 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 8297
34f80b04
EG
8298 dev->hard_start_xmit = bnx2x_start_xmit;
8299 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 8300
34f80b04
EG
8301 dev->ethtool_ops = &bnx2x_ethtool_ops;
8302 dev->open = bnx2x_open;
8303 dev->stop = bnx2x_close;
8304 dev->set_multicast_list = bnx2x_set_rx_mode;
8305 dev->set_mac_address = bnx2x_change_mac_addr;
8306 dev->do_ioctl = bnx2x_ioctl;
8307 dev->change_mtu = bnx2x_change_mtu;
8308 dev->tx_timeout = bnx2x_tx_timeout;
8309#ifdef BCM_VLAN
8310 dev->vlan_rx_register = bnx2x_vlan_rx_register;
8311#endif
8312#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8313 dev->poll_controller = poll_bnx2x;
8314#endif
8315 dev->features |= NETIF_F_SG;
8316 dev->features |= NETIF_F_HW_CSUM;
8317 if (bp->flags & USING_DAC_FLAG)
8318 dev->features |= NETIF_F_HIGHDMA;
8319#ifdef BCM_VLAN
8320 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8321#endif
8322 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
a2fbb9ea
ET
8323
8324 bp->timer_interval = HZ;
8325 bp->current_interval = (poll ? poll : HZ);
8326
a2fbb9ea
ET
8327
8328 return 0;
8329
8330err_out_unmap:
8331 if (bp->regview) {
8332 iounmap(bp->regview);
8333 bp->regview = NULL;
8334 }
a2fbb9ea
ET
8335 if (bp->doorbells) {
8336 iounmap(bp->doorbells);
8337 bp->doorbells = NULL;
8338 }
8339
8340err_out_release:
34f80b04
EG
8341 if (atomic_read(&pdev->enable_cnt) == 1)
8342 pci_release_regions(pdev);
a2fbb9ea
ET
8343
8344err_out_disable:
8345 pci_disable_device(pdev);
8346 pci_set_drvdata(pdev, NULL);
8347
8348err_out:
8349 return rc;
8350}
8351
25047950
ET
8352static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
8353{
8354 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8355
8356 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8357 return val;
8358}
8359
8360/* return value of 1=2.5GHz 2=5GHz */
8361static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
8362{
8363 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8364
8365 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
8366 return val;
8367}
8368
a2fbb9ea
ET
8369static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8370 const struct pci_device_id *ent)
8371{
8372 static int version_printed;
8373 struct net_device *dev = NULL;
8374 struct bnx2x *bp;
25047950 8375 int rc;
25047950 8376 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
8377
8378 if (version_printed++ == 0)
8379 printk(KERN_INFO "%s", version);
8380
8381 /* dev zeroed in init_etherdev */
8382 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
8383 if (!dev) {
8384 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 8385 return -ENOMEM;
34f80b04 8386 }
a2fbb9ea
ET
8387
8388 netif_carrier_off(dev);
8389
8390 bp = netdev_priv(dev);
8391 bp->msglevel = debug;
8392
34f80b04 8393 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
8394 if (rc < 0) {
8395 free_netdev(dev);
8396 return rc;
8397 }
8398
a2fbb9ea
ET
8399 rc = register_netdev(dev);
8400 if (rc) {
c14423fe 8401 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 8402 goto init_one_exit;
a2fbb9ea
ET
8403 }
8404
8405 pci_set_drvdata(pdev, dev);
8406
34f80b04
EG
8407 rc = bnx2x_init_bp(bp);
8408 if (rc) {
8409 unregister_netdev(dev);
8410 goto init_one_exit;
8411 }
8412
8413 bp->common.name = board_info[ent->driver_data].name;
25047950 8414 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
8415 " IRQ %d, ", dev->name, bp->common.name,
8416 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
8417 bnx2x_get_pcie_width(bp),
8418 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
8419 dev->base_addr, bp->pdev->irq);
8420 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 8421 return 0;
34f80b04
EG
8422
8423init_one_exit:
8424 if (bp->regview)
8425 iounmap(bp->regview);
8426
8427 if (bp->doorbells)
8428 iounmap(bp->doorbells);
8429
8430 free_netdev(dev);
8431
8432 if (atomic_read(&pdev->enable_cnt) == 1)
8433 pci_release_regions(pdev);
8434
8435 pci_disable_device(pdev);
8436 pci_set_drvdata(pdev, NULL);
8437
8438 return rc;
a2fbb9ea
ET
8439}
8440
8441static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8442{
8443 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
8444 struct bnx2x *bp;
8445
8446 if (!dev) {
228241eb
ET
8447 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
8448 return;
8449 }
228241eb 8450 bp = netdev_priv(dev);
a2fbb9ea 8451
a2fbb9ea
ET
8452 unregister_netdev(dev);
8453
8454 if (bp->regview)
8455 iounmap(bp->regview);
8456
8457 if (bp->doorbells)
8458 iounmap(bp->doorbells);
8459
8460 free_netdev(dev);
34f80b04
EG
8461
8462 if (atomic_read(&pdev->enable_cnt) == 1)
8463 pci_release_regions(pdev);
8464
a2fbb9ea
ET
8465 pci_disable_device(pdev);
8466 pci_set_drvdata(pdev, NULL);
8467}
8468
8469static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
8470{
8471 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
8472 struct bnx2x *bp;
8473
34f80b04
EG
8474 if (!dev) {
8475 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
8476 return -ENODEV;
8477 }
8478 bp = netdev_priv(dev);
a2fbb9ea 8479
34f80b04 8480 rtnl_lock();
a2fbb9ea 8481
34f80b04 8482 pci_save_state(pdev);
228241eb 8483
34f80b04
EG
8484 if (!netif_running(dev)) {
8485 rtnl_unlock();
8486 return 0;
8487 }
a2fbb9ea
ET
8488
8489 netif_device_detach(dev);
a2fbb9ea 8490
34f80b04
EG
8491 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8492
a2fbb9ea 8493 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 8494
34f80b04
EG
8495 rtnl_unlock();
8496
a2fbb9ea
ET
8497 return 0;
8498}
8499
8500static int bnx2x_resume(struct pci_dev *pdev)
8501{
8502 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 8503 struct bnx2x *bp;
a2fbb9ea
ET
8504 int rc;
8505
228241eb
ET
8506 if (!dev) {
8507 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
8508 return -ENODEV;
8509 }
228241eb 8510 bp = netdev_priv(dev);
a2fbb9ea 8511
34f80b04
EG
8512 rtnl_lock();
8513
228241eb 8514 pci_restore_state(pdev);
34f80b04
EG
8515
8516 if (!netif_running(dev)) {
8517 rtnl_unlock();
8518 return 0;
8519 }
8520
a2fbb9ea
ET
8521 bnx2x_set_power_state(bp, PCI_D0);
8522 netif_device_attach(dev);
8523
34f80b04 8524 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 8525
34f80b04
EG
8526 rtnl_unlock();
8527
8528 return rc;
a2fbb9ea
ET
8529}
8530
8531static struct pci_driver bnx2x_pci_driver = {
8532 .name = DRV_MODULE_NAME,
8533 .id_table = bnx2x_pci_tbl,
8534 .probe = bnx2x_init_one,
8535 .remove = __devexit_p(bnx2x_remove_one),
8536 .suspend = bnx2x_suspend,
8537 .resume = bnx2x_resume,
8538};
8539
8540static int __init bnx2x_init(void)
8541{
8542 return pci_register_driver(&bnx2x_pci_driver);
8543}
8544
8545static void __exit bnx2x_cleanup(void)
8546{
8547 pci_unregister_driver(&bnx2x_pci_driver);
8548}
8549
8550module_init(bnx2x_init);
8551module_exit(bnx2x_cleanup);
8552