]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: FW (bootcode) interface fixes
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
e35c3269
EG
63#define DRV_MODULE_VERSION "1.45.6"
64#define DRV_MODULE_RELDATE "2008/06/23"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 78
19680c48 79static int disable_tpa;
a2fbb9ea
ET
80static int use_inta;
81static int poll;
a2fbb9ea 82static int debug;
34f80b04 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
19680c48 86module_param(disable_tpa, int, 0);
a2fbb9ea
ET
87module_param(use_inta, int, 0);
88module_param(poll, int, 0);
a2fbb9ea 89module_param(debug, int, 0);
19680c48 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 93MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
240 msleep(100);
241 else
242 udelay(5);
243
244 if (!cnt) {
a2fbb9ea
ET
245 BNX2X_ERR("dmae timeout!\n");
246 break;
247 }
ad8d3948 248 cnt--;
a2fbb9ea 249 }
ad8d3948
EG
250
251 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
252}
253
c18487ee 254void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 255{
ad8d3948 256 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
258 int cnt = 200;
259
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 int i;
263
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 return;
269 }
270
271 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
272
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
275
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
34f80b04
EG
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->len = len32;
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 293 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 294
ad8d3948 295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
302
303 *wb_comp = 0;
304
34f80b04 305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
306
307 udelay(5);
ad8d3948
EG
308
309 while (*wb_comp != DMAE_COMP_VAL) {
310
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
313 msleep(100);
314 else
315 udelay(5);
316
317 if (!cnt) {
a2fbb9ea
ET
318 BNX2X_ERR("dmae timeout!\n");
319 break;
320 }
ad8d3948 321 cnt--;
a2fbb9ea 322 }
ad8d3948 323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
326
327 mutex_unlock(&bp->dmae_mutex);
328}
329
330/* used only for slowpath so not inlined */
331static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332{
333 u32 wb_write[2];
334
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 338}
a2fbb9ea 339
ad8d3948
EG
340#ifdef USE_WB_RD
341static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342{
343 u32 wb_data[2];
344
345 REG_RD_DMAE(bp, reg, wb_data, 2);
346
347 return HILO_U64(wb_data[0], wb_data[1]);
348}
349#endif
350
a2fbb9ea
ET
351static int bnx2x_mc_assert(struct bnx2x *bp)
352{
a2fbb9ea 353 char last_idx;
34f80b04
EG
354 int i, rc = 0;
355 u32 row0, row1, row2, row3;
356
357 /* XSTORM */
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 if (last_idx)
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
379 rc++;
380 } else {
381 break;
382 }
383 }
384
385 /* TSTORM */
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 if (last_idx)
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
407 rc++;
408 } else {
409 break;
410 }
411 }
412
413 /* CSTORM */
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 if (last_idx)
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
435 rc++;
436 } else {
437 break;
438 }
439 }
440
441 /* USTORM */
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 if (last_idx)
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
463 rc++;
464 } else {
465 break;
a2fbb9ea
ET
466 }
467 }
34f80b04 468
a2fbb9ea
ET
469 return rc;
470}
c14423fe 471
a2fbb9ea
ET
472static void bnx2x_fw_dump(struct bnx2x *bp)
473{
474 u32 mark, offset;
475 u32 data[9];
476 int word;
477
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
481
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 offset + 4*word));
486 data[8] = 0x0;
49d66772 487 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
488 }
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
497}
498
499static void bnx2x_panic_dump(struct bnx2x *bp)
500{
501 int i;
502 u16 j, start, end;
503
504 BNX2X_ERR("begin crash dump -----------------\n");
505
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04
EG
513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)"
7a9b2557
VZ
515 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)"
516 " rx_sge_prod(%x) last_max_sge(%x)\n",
34f80b04 517 fp->rx_comp_prod, fp->rx_comp_cons,
7a9b2557
VZ
518 le16_to_cpu(*fp->rx_cons_sb),
519 le16_to_cpu(*fp->rx_bd_cons_sb),
520 fp->rx_sge_prod, fp->last_max_sge);
34f80b04 521 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)"
7a9b2557 522 " bd data(%x,%x) rx_alloc_failed(%lx)\n",
34f80b04 523 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod,
7a9b2557 524 hw_prods->bds_prod, fp->rx_alloc_failed);
a2fbb9ea
ET
525
526 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
527 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
528 for (j = start; j < end; j++) {
529 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
530
531 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
532 sw_bd->skb, sw_bd->first_bd);
533 }
534
535 start = TX_BD(fp->tx_bd_cons - 10);
536 end = TX_BD(fp->tx_bd_cons + 254);
537 for (j = start; j < end; j++) {
538 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
539
540 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
541 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
542 }
543
544 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
545 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
546 for (j = start; j < end; j++) {
547 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
548 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
549
550 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 551 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
552 }
553
7a9b2557
VZ
554 start = 0;
555 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
556 for (j = start; j < end; j++) {
557 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
558 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
559
560 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
561 j, rx_sge[1], rx_sge[0], sw_page->page);
562 }
563
a2fbb9ea
ET
564 start = RCQ_BD(fp->rx_comp_cons - 10);
565 end = RCQ_BD(fp->rx_comp_cons + 503);
566 for (j = start; j < end; j++) {
567 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
568
569 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
570 j, cqe[0], cqe[1], cqe[2], cqe[3]);
571 }
572 }
573
49d66772
ET
574 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
575 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 576 " spq_prod_idx(%u)\n",
49d66772 577 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
578 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
579
34f80b04 580 bnx2x_fw_dump(bp);
a2fbb9ea
ET
581 bnx2x_mc_assert(bp);
582 BNX2X_ERR("end crash dump -----------------\n");
583
bb2a0f7a
YG
584 bp->stats_state = STATS_STATE_DISABLED;
585 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
a2fbb9ea
ET
586}
587
615f8fd9 588static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 589{
34f80b04 590 int port = BP_PORT(bp);
a2fbb9ea
ET
591 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592 u32 val = REG_RD(bp, addr);
593 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595 if (msix) {
596 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 } else {
600 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
602 HC_CONFIG_0_REG_INT_LINE_EN_0 |
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 604
615f8fd9
ET
605 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val, port, addr, msix);
607
608 REG_WR(bp, addr, val);
609
a2fbb9ea
ET
610 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611 }
612
615f8fd9 613 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
614 val, port, addr, msix);
615
616 REG_WR(bp, addr, val);
34f80b04
EG
617
618 if (CHIP_IS_E1H(bp)) {
619 /* init leading/trailing edge */
620 if (IS_E1HMF(bp)) {
621 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 if (bp->port.pmf)
623 /* enable nig attention */
624 val |= 0x0100;
625 } else
626 val = 0xffff;
627
628 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630 }
a2fbb9ea
ET
631}
632
615f8fd9 633static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 634{
34f80b04 635 int port = BP_PORT(bp);
a2fbb9ea
ET
636 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637 u32 val = REG_RD(bp, addr);
638
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645 val, port, addr);
646
647 REG_WR(bp, addr, val);
648 if (REG_RD(bp, addr) != val)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650}
651
615f8fd9 652static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 653{
a2fbb9ea
ET
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int i;
656
34f80b04 657 /* disable interrupt handling */
a2fbb9ea 658 atomic_inc(&bp->intr_sem);
c14423fe 659 /* prevent the HW from sending interrupts */
615f8fd9 660 bnx2x_int_disable(bp);
a2fbb9ea
ET
661
662 /* make sure all ISRs are done */
663 if (msix) {
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
666
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
669 } else
670 synchronize_irq(bp->pdev->irq);
671
672 /* make sure sp_task is not running */
673 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
674}
675
34f80b04 676/* fast path */
a2fbb9ea
ET
677
678/*
34f80b04 679 * General service functions
a2fbb9ea
ET
680 */
681
34f80b04 682static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
683 u8 storm, u16 index, u8 op, u8 update)
684{
34f80b04 685 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
686 struct igu_ack_register igu_ack;
687
688 igu_ack.status_block_index = index;
689 igu_ack.sb_id_and_flags =
34f80b04 690 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
691 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
692 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
693 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
694
34f80b04
EG
695 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
696 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
697 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
698}
699
700static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
701{
702 struct host_status_block *fpsb = fp->status_blk;
703 u16 rc = 0;
704
705 barrier(); /* status block is written to by the chip */
706 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
707 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
708 rc |= 1;
709 }
710 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
711 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
712 rc |= 2;
713 }
714 return rc;
715}
716
717static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
718{
719 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
720
721 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
722 rx_cons_sb++;
723
34f80b04
EG
724 if ((fp->rx_comp_cons != rx_cons_sb) ||
725 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
726 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
727 return 1;
728
729 return 0;
730}
731
732static u16 bnx2x_ack_int(struct bnx2x *bp)
733{
34f80b04 734 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
735 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
736
34f80b04
EG
737 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
738 result, BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
739
740#ifdef IGU_DEBUG
741#warning IGU_DEBUG active
742 if (result == 0) {
743 BNX2X_ERR("read %x from IGU\n", result);
744 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
745 }
746#endif
747 return result;
748}
749
750
751/*
752 * fast path service functions
753 */
754
755/* free skb in the packet ring at pos idx
756 * return idx of last bd freed
757 */
758static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
759 u16 idx)
760{
761 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
762 struct eth_tx_bd *tx_bd;
763 struct sk_buff *skb = tx_buf->skb;
34f80b04 764 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
765 int nbd;
766
767 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
768 idx, tx_buf, skb);
769
770 /* unmap first bd */
771 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
772 tx_bd = &fp->tx_desc_ring[bd_idx];
773 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
774 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
775
776 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 777 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
778#ifdef BNX2X_STOP_ON_ERROR
779 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 780 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
781 bnx2x_panic();
782 }
783#endif
784
785 /* Skip a parse bd and the TSO split header bd
786 since they have no mapping */
787 if (nbd)
788 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
789
790 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
791 ETH_TX_BD_FLAGS_TCP_CSUM |
792 ETH_TX_BD_FLAGS_SW_LSO)) {
793 if (--nbd)
794 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795 tx_bd = &fp->tx_desc_ring[bd_idx];
796 /* is this a TSO split header bd? */
797 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
798 if (--nbd)
799 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
800 }
801 }
802
803 /* now free frags */
804 while (nbd > 0) {
805
806 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
807 tx_bd = &fp->tx_desc_ring[bd_idx];
808 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
809 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
810 if (--nbd)
811 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
812 }
813
814 /* release skb */
53e5e96e 815 WARN_ON(!skb);
a2fbb9ea
ET
816 dev_kfree_skb(skb);
817 tx_buf->first_bd = 0;
818 tx_buf->skb = NULL;
819
34f80b04 820 return new_cons;
a2fbb9ea
ET
821}
822
34f80b04 823static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 824{
34f80b04
EG
825 s16 used;
826 u16 prod;
827 u16 cons;
a2fbb9ea 828
34f80b04 829 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
830 prod = fp->tx_bd_prod;
831 cons = fp->tx_bd_cons;
832
34f80b04
EG
833 /* NUM_TX_RINGS = number of "next-page" entries
834 It will be used as a threshold */
835 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 836
34f80b04 837#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
838 WARN_ON(used < 0);
839 WARN_ON(used > fp->bp->tx_ring_size);
840 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 841#endif
a2fbb9ea 842
34f80b04 843 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
844}
845
846static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
847{
848 struct bnx2x *bp = fp->bp;
849 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
850 int done = 0;
851
852#ifdef BNX2X_STOP_ON_ERROR
853 if (unlikely(bp->panic))
854 return;
855#endif
856
857 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
858 sw_cons = fp->tx_pkt_cons;
859
860 while (sw_cons != hw_cons) {
861 u16 pkt_cons;
862
863 pkt_cons = TX_BD(sw_cons);
864
865 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
866
34f80b04 867 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
868 hw_cons, sw_cons, pkt_cons);
869
34f80b04 870/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
871 rmb();
872 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
873 }
874*/
875 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
876 sw_cons++;
877 done++;
878
879 if (done == work)
880 break;
881 }
882
883 fp->tx_pkt_cons = sw_cons;
884 fp->tx_bd_cons = bd_cons;
885
886 /* Need to make the tx_cons update visible to start_xmit()
887 * before checking for netif_queue_stopped(). Without the
888 * memory barrier, there is a small possibility that start_xmit()
889 * will miss it and cause the queue to be stopped forever.
890 */
891 smp_mb();
892
893 /* TBD need a thresh? */
894 if (unlikely(netif_queue_stopped(bp->dev))) {
895
896 netif_tx_lock(bp->dev);
897
898 if (netif_queue_stopped(bp->dev) &&
899 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900 netif_wake_queue(bp->dev);
901
902 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
903 }
904}
905
906static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
907 union eth_rx_cqe *rr_cqe)
908{
909 struct bnx2x *bp = fp->bp;
910 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
911 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912
34f80b04 913 DP(BNX2X_MSG_SP,
a2fbb9ea 914 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
915 FP_IDX(fp), cid, command, bp->state,
916 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
917
918 bp->spq_left++;
919
34f80b04 920 if (FP_IDX(fp)) {
a2fbb9ea
ET
921 switch (command | fp->state) {
922 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
923 BNX2X_FP_STATE_OPENING):
924 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
925 cid);
926 fp->state = BNX2X_FP_STATE_OPEN;
927 break;
928
929 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
930 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
931 cid);
932 fp->state = BNX2X_FP_STATE_HALTED;
933 break;
934
935 default:
34f80b04
EG
936 BNX2X_ERR("unexpected MC reply (%d) "
937 "fp->state is %x\n", command, fp->state);
938 break;
a2fbb9ea 939 }
34f80b04 940 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
941 return;
942 }
c14423fe 943
a2fbb9ea
ET
944 switch (command | bp->state) {
945 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
946 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
947 bp->state = BNX2X_STATE_OPEN;
948 break;
949
950 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
951 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
952 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
953 fp->state = BNX2X_FP_STATE_HALTED;
954 break;
955
a2fbb9ea 956 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 957 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 958 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
959 break;
960
961 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 962 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 963 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 964 bp->set_mac_pending = 0;
a2fbb9ea
ET
965 break;
966
49d66772 967 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 968 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
969 break;
970
a2fbb9ea 971 default:
34f80b04 972 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 973 command, bp->state);
34f80b04 974 break;
a2fbb9ea 975 }
34f80b04 976 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
977}
978
7a9b2557
VZ
979static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
980 struct bnx2x_fastpath *fp, u16 index)
981{
982 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
983 struct page *page = sw_buf->page;
984 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
985
986 /* Skip "next page" elements */
987 if (!page)
988 return;
989
990 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
991 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
992 __free_pages(page, PAGES_PER_SGE_SHIFT);
993
994 sw_buf->page = NULL;
995 sge->addr_hi = 0;
996 sge->addr_lo = 0;
997}
998
999static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1000 struct bnx2x_fastpath *fp, int last)
1001{
1002 int i;
1003
1004 for (i = 0; i < last; i++)
1005 bnx2x_free_rx_sge(bp, fp, i);
1006}
1007
1008static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1009 struct bnx2x_fastpath *fp, u16 index)
1010{
1011 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1012 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1013 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1014 dma_addr_t mapping;
1015
1016 if (unlikely(page == NULL))
1017 return -ENOMEM;
1018
1019 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1020 PCI_DMA_FROMDEVICE);
8d8bb39b 1021 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1022 __free_pages(page, PAGES_PER_SGE_SHIFT);
1023 return -ENOMEM;
1024 }
1025
1026 sw_buf->page = page;
1027 pci_unmap_addr_set(sw_buf, mapping, mapping);
1028
1029 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1030 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1031
1032 return 0;
1033}
1034
a2fbb9ea
ET
1035static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1036 struct bnx2x_fastpath *fp, u16 index)
1037{
1038 struct sk_buff *skb;
1039 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1040 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1041 dma_addr_t mapping;
1042
1043 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1044 if (unlikely(skb == NULL))
1045 return -ENOMEM;
1046
1047 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1048 PCI_DMA_FROMDEVICE);
8d8bb39b 1049 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1050 dev_kfree_skb(skb);
1051 return -ENOMEM;
1052 }
1053
1054 rx_buf->skb = skb;
1055 pci_unmap_addr_set(rx_buf, mapping, mapping);
1056
1057 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1058 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1059
1060 return 0;
1061}
1062
1063/* note that we are not allocating a new skb,
1064 * we are just moving one from cons to prod
1065 * we are not creating a new mapping,
1066 * so there is no need to check for dma_mapping_error().
1067 */
1068static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1069 struct sk_buff *skb, u16 cons, u16 prod)
1070{
1071 struct bnx2x *bp = fp->bp;
1072 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1073 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1074 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1075 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1076
1077 pci_dma_sync_single_for_device(bp->pdev,
1078 pci_unmap_addr(cons_rx_buf, mapping),
1079 bp->rx_offset + RX_COPY_THRESH,
1080 PCI_DMA_FROMDEVICE);
1081
1082 prod_rx_buf->skb = cons_rx_buf->skb;
1083 pci_unmap_addr_set(prod_rx_buf, mapping,
1084 pci_unmap_addr(cons_rx_buf, mapping));
1085 *prod_bd = *cons_bd;
1086}
1087
7a9b2557
VZ
1088static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1089 u16 idx)
1090{
1091 u16 last_max = fp->last_max_sge;
1092
1093 if (SUB_S16(idx, last_max) > 0)
1094 fp->last_max_sge = idx;
1095}
1096
1097static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1098{
1099 int i, j;
1100
1101 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1102 int idx = RX_SGE_CNT * i - 1;
1103
1104 for (j = 0; j < 2; j++) {
1105 SGE_MASK_CLEAR_BIT(fp, idx);
1106 idx--;
1107 }
1108 }
1109}
1110
1111static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1112 struct eth_fast_path_rx_cqe *fp_cqe)
1113{
1114 struct bnx2x *bp = fp->bp;
1115 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1116 le16_to_cpu(fp_cqe->len_on_bd)) >>
1117 BCM_PAGE_SHIFT;
1118 u16 last_max, last_elem, first_elem;
1119 u16 delta = 0;
1120 u16 i;
1121
1122 if (!sge_len)
1123 return;
1124
1125 /* First mark all used pages */
1126 for (i = 0; i < sge_len; i++)
1127 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1128
1129 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1130 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1131
1132 /* Here we assume that the last SGE index is the biggest */
1133 prefetch((void *)(fp->sge_mask));
1134 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1135
1136 last_max = RX_SGE(fp->last_max_sge);
1137 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1138 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1139
1140 /* If ring is not full */
1141 if (last_elem + 1 != first_elem)
1142 last_elem++;
1143
1144 /* Now update the prod */
1145 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1146 if (likely(fp->sge_mask[i]))
1147 break;
1148
1149 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1150 delta += RX_SGE_MASK_ELEM_SZ;
1151 }
1152
1153 if (delta > 0) {
1154 fp->rx_sge_prod += delta;
1155 /* clear page-end entries */
1156 bnx2x_clear_sge_mask_next_elems(fp);
1157 }
1158
1159 DP(NETIF_MSG_RX_STATUS,
1160 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1161 fp->last_max_sge, fp->rx_sge_prod);
1162}
1163
1164static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1165{
1166 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1167 memset(fp->sge_mask, 0xff,
1168 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1169
1170 /* Clear the two last indeces in the page to 1:
1171 these are the indeces that correspond to the "next" element,
1172 hence will never be indicated and should be removed from
1173 the calculations. */
1174 bnx2x_clear_sge_mask_next_elems(fp);
1175}
1176
1177static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1178 struct sk_buff *skb, u16 cons, u16 prod)
1179{
1180 struct bnx2x *bp = fp->bp;
1181 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1182 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1183 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1184 dma_addr_t mapping;
1185
1186 /* move empty skb from pool to prod and map it */
1187 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1188 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1189 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1190 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1191
1192 /* move partial skb from cons to pool (don't unmap yet) */
1193 fp->tpa_pool[queue] = *cons_rx_buf;
1194
1195 /* mark bin state as start - print error if current state != stop */
1196 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1197 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1198
1199 fp->tpa_state[queue] = BNX2X_TPA_START;
1200
1201 /* point prod_bd to new skb */
1202 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1203 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1204
1205#ifdef BNX2X_STOP_ON_ERROR
1206 fp->tpa_queue_used |= (1 << queue);
1207#ifdef __powerpc64__
1208 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1209#else
1210 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1211#endif
1212 fp->tpa_queue_used);
1213#endif
1214}
1215
1216static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1217 struct sk_buff *skb,
1218 struct eth_fast_path_rx_cqe *fp_cqe,
1219 u16 cqe_idx)
1220{
1221 struct sw_rx_page *rx_pg, old_rx_pg;
1222 struct page *sge;
1223 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1224 u32 i, frag_len, frag_size, pages;
1225 int err;
1226 int j;
1227
1228 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1229 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1230
1231 /* This is needed in order to enable forwarding support */
1232 if (frag_size)
1233 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1234 max(frag_size, (u32)len_on_bd));
1235
1236#ifdef BNX2X_STOP_ON_ERROR
1237 if (pages > 8*PAGES_PER_SGE) {
1238 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1239 pages, cqe_idx);
1240 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1241 fp_cqe->pkt_len, len_on_bd);
1242 bnx2x_panic();
1243 return -EINVAL;
1244 }
1245#endif
1246
1247 /* Run through the SGL and compose the fragmented skb */
1248 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1249 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1250
1251 /* FW gives the indices of the SGE as if the ring is an array
1252 (meaning that "next" element will consume 2 indices) */
1253 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1254 rx_pg = &fp->rx_page_ring[sge_idx];
1255 sge = rx_pg->page;
1256 old_rx_pg = *rx_pg;
1257
1258 /* If we fail to allocate a substitute page, we simply stop
1259 where we are and drop the whole packet */
1260 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1261 if (unlikely(err)) {
1262 fp->rx_alloc_failed++;
1263 return err;
1264 }
1265
1266 /* Unmap the page as we r going to pass it to the stack */
1267 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1268 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1269
1270 /* Add one frag and update the appropriate fields in the skb */
1271 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1272
1273 skb->data_len += frag_len;
1274 skb->truesize += frag_len;
1275 skb->len += frag_len;
1276
1277 frag_size -= frag_len;
1278 }
1279
1280 return 0;
1281}
1282
1283static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1284 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1285 u16 cqe_idx)
1286{
1287 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1288 struct sk_buff *skb = rx_buf->skb;
1289 /* alloc new skb */
1290 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1291
1292 /* Unmap skb in the pool anyway, as we are going to change
1293 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1294 fails. */
1295 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1296 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1297
1298 /* if alloc failed drop the packet and keep the buffer in the bin */
1299 if (likely(new_skb)) {
1300
1301 prefetch(skb);
1302 prefetch(((char *)(skb)) + 128);
1303
1304 /* else fix ip xsum and give it to the stack */
1305 /* (no need to map the new skb) */
1306#ifdef BNX2X_STOP_ON_ERROR
1307 if (pad + len > bp->rx_buf_size) {
1308 BNX2X_ERR("skb_put is about to fail... "
1309 "pad %d len %d rx_buf_size %d\n",
1310 pad, len, bp->rx_buf_size);
1311 bnx2x_panic();
1312 return;
1313 }
1314#endif
1315
1316 skb_reserve(skb, pad);
1317 skb_put(skb, len);
1318
1319 skb->protocol = eth_type_trans(skb, bp->dev);
1320 skb->ip_summed = CHECKSUM_UNNECESSARY;
1321
1322 {
1323 struct iphdr *iph;
1324
1325 iph = (struct iphdr *)skb->data;
1326 iph->check = 0;
1327 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1328 }
1329
1330 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1331 &cqe->fast_path_cqe, cqe_idx)) {
1332#ifdef BCM_VLAN
1333 if ((bp->vlgrp != NULL) &&
1334 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1335 PARSING_FLAGS_VLAN))
1336 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1337 le16_to_cpu(cqe->fast_path_cqe.
1338 vlan_tag));
1339 else
1340#endif
1341 netif_receive_skb(skb);
1342 } else {
1343 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1344 " - dropping packet!\n");
1345 dev_kfree_skb(skb);
1346 }
1347
1348 bp->dev->last_rx = jiffies;
1349
1350 /* put new skb in bin */
1351 fp->tpa_pool[queue].skb = new_skb;
1352
1353 } else {
1354 DP(NETIF_MSG_RX_STATUS,
1355 "Failed to allocate new skb - dropping packet!\n");
1356 fp->rx_alloc_failed++;
1357 }
1358
1359 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1360}
1361
1362static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1363 struct bnx2x_fastpath *fp,
1364 u16 bd_prod, u16 rx_comp_prod,
1365 u16 rx_sge_prod)
1366{
1367 struct tstorm_eth_rx_producers rx_prods = {0};
1368 int i;
1369
1370 /* Update producers */
1371 rx_prods.bd_prod = bd_prod;
1372 rx_prods.cqe_prod = rx_comp_prod;
1373 rx_prods.sge_prod = rx_sge_prod;
1374
1375 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1376 REG_WR(bp, BAR_TSTRORM_INTMEM +
1377 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1378 ((u32 *)&rx_prods)[i]);
1379
1380 DP(NETIF_MSG_RX_STATUS,
1381 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1382 bd_prod, rx_comp_prod, rx_sge_prod);
1383}
1384
a2fbb9ea
ET
1385static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1386{
1387 struct bnx2x *bp = fp->bp;
34f80b04 1388 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1389 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1390 int rx_pkt = 0;
7a9b2557 1391 u16 queue;
a2fbb9ea
ET
1392
1393#ifdef BNX2X_STOP_ON_ERROR
1394 if (unlikely(bp->panic))
1395 return 0;
1396#endif
1397
34f80b04
EG
1398 /* CQ "next element" is of the size of the regular element,
1399 that's why it's ok here */
a2fbb9ea
ET
1400 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1401 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1402 hw_comp_cons++;
1403
1404 bd_cons = fp->rx_bd_cons;
1405 bd_prod = fp->rx_bd_prod;
34f80b04 1406 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1407 sw_comp_cons = fp->rx_comp_cons;
1408 sw_comp_prod = fp->rx_comp_prod;
1409
1410 /* Memory barrier necessary as speculative reads of the rx
1411 * buffer can be ahead of the index in the status block
1412 */
1413 rmb();
1414
1415 DP(NETIF_MSG_RX_STATUS,
1416 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1417 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1418
1419 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1420 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1421 struct sk_buff *skb;
1422 union eth_rx_cqe *cqe;
34f80b04
EG
1423 u8 cqe_fp_flags;
1424 u16 len, pad;
a2fbb9ea
ET
1425
1426 comp_ring_cons = RCQ_BD(sw_comp_cons);
1427 bd_prod = RX_BD(bd_prod);
1428 bd_cons = RX_BD(bd_cons);
1429
1430 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1431 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1432
a2fbb9ea 1433 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1434 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1435 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1436 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1437 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1438 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1439
1440 /* is this a slowpath msg? */
34f80b04 1441 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1442 bnx2x_sp_event(fp, cqe);
1443 goto next_cqe;
1444
1445 /* this is an rx packet */
1446 } else {
1447 rx_buf = &fp->rx_buf_ring[bd_cons];
1448 skb = rx_buf->skb;
a2fbb9ea
ET
1449 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1450 pad = cqe->fast_path_cqe.placement_offset;
1451
7a9b2557
VZ
1452 /* If CQE is marked both TPA_START and TPA_END
1453 it is a non-TPA CQE */
1454 if ((!fp->disable_tpa) &&
1455 (TPA_TYPE(cqe_fp_flags) !=
1456 (TPA_TYPE_START | TPA_TYPE_END))) {
1457 queue = cqe->fast_path_cqe.queue_index;
1458
1459 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1460 DP(NETIF_MSG_RX_STATUS,
1461 "calling tpa_start on queue %d\n",
1462 queue);
1463
1464 bnx2x_tpa_start(fp, queue, skb,
1465 bd_cons, bd_prod);
1466 goto next_rx;
1467 }
1468
1469 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1470 DP(NETIF_MSG_RX_STATUS,
1471 "calling tpa_stop on queue %d\n",
1472 queue);
1473
1474 if (!BNX2X_RX_SUM_FIX(cqe))
1475 BNX2X_ERR("STOP on none TCP "
1476 "data\n");
1477
1478 /* This is a size of the linear data
1479 on this skb */
1480 len = le16_to_cpu(cqe->fast_path_cqe.
1481 len_on_bd);
1482 bnx2x_tpa_stop(bp, fp, queue, pad,
1483 len, cqe, comp_ring_cons);
1484#ifdef BNX2X_STOP_ON_ERROR
1485 if (bp->panic)
1486 return -EINVAL;
1487#endif
1488
1489 bnx2x_update_sge_prod(fp,
1490 &cqe->fast_path_cqe);
1491 goto next_cqe;
1492 }
1493 }
1494
a2fbb9ea
ET
1495 pci_dma_sync_single_for_device(bp->pdev,
1496 pci_unmap_addr(rx_buf, mapping),
1497 pad + RX_COPY_THRESH,
1498 PCI_DMA_FROMDEVICE);
1499 prefetch(skb);
1500 prefetch(((char *)(skb)) + 128);
1501
1502 /* is this an error packet? */
34f80b04 1503 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea
ET
1504 /* do we sometimes forward error packets anyway? */
1505 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1506 "ERROR flags %x rx packet %u\n",
1507 cqe_fp_flags, sw_comp_cons);
a2fbb9ea
ET
1508 /* TBD make sure MC counts this as a drop */
1509 goto reuse_rx;
1510 }
1511
1512 /* Since we don't have a jumbo ring
1513 * copy small packets if mtu > 1500
1514 */
1515 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1516 (len <= RX_COPY_THRESH)) {
1517 struct sk_buff *new_skb;
1518
1519 new_skb = netdev_alloc_skb(bp->dev,
1520 len + pad);
1521 if (new_skb == NULL) {
1522 DP(NETIF_MSG_RX_ERR,
34f80b04 1523 "ERROR packet dropped "
a2fbb9ea 1524 "because of alloc failure\n");
7a9b2557 1525 fp->rx_alloc_failed++;
a2fbb9ea
ET
1526 goto reuse_rx;
1527 }
1528
1529 /* aligned copy */
1530 skb_copy_from_linear_data_offset(skb, pad,
1531 new_skb->data + pad, len);
1532 skb_reserve(new_skb, pad);
1533 skb_put(new_skb, len);
1534
1535 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1536
1537 skb = new_skb;
1538
1539 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1540 pci_unmap_single(bp->pdev,
1541 pci_unmap_addr(rx_buf, mapping),
1542 bp->rx_buf_use_size,
1543 PCI_DMA_FROMDEVICE);
1544 skb_reserve(skb, pad);
1545 skb_put(skb, len);
1546
1547 } else {
1548 DP(NETIF_MSG_RX_ERR,
34f80b04 1549 "ERROR packet dropped because "
a2fbb9ea 1550 "of alloc failure\n");
7a9b2557 1551 fp->rx_alloc_failed++;
a2fbb9ea
ET
1552reuse_rx:
1553 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1554 goto next_rx;
1555 }
1556
1557 skb->protocol = eth_type_trans(skb, bp->dev);
1558
1559 skb->ip_summed = CHECKSUM_NONE;
1560 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1561 skb->ip_summed = CHECKSUM_UNNECESSARY;
1562
1563 /* TBD do we pass bad csum packets in promisc */
1564 }
1565
1566#ifdef BCM_VLAN
34f80b04
EG
1567 if ((bp->vlgrp != NULL) &&
1568 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1569 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1570 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1571 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1572 else
1573#endif
34f80b04 1574 netif_receive_skb(skb);
a2fbb9ea
ET
1575
1576 bp->dev->last_rx = jiffies;
1577
1578next_rx:
1579 rx_buf->skb = NULL;
1580
1581 bd_cons = NEXT_RX_IDX(bd_cons);
1582 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1583 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1584 rx_pkt++;
a2fbb9ea
ET
1585next_cqe:
1586 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1587 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1588
34f80b04 1589 if (rx_pkt == budget)
a2fbb9ea
ET
1590 break;
1591 } /* while */
1592
1593 fp->rx_bd_cons = bd_cons;
34f80b04 1594 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1595 fp->rx_comp_cons = sw_comp_cons;
1596 fp->rx_comp_prod = sw_comp_prod;
1597
7a9b2557
VZ
1598 /* Update producers */
1599 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1600 fp->rx_sge_prod);
a2fbb9ea
ET
1601 mmiowb(); /* keep prod updates ordered */
1602
1603 fp->rx_pkt += rx_pkt;
1604 fp->rx_calls++;
1605
1606 return rx_pkt;
1607}
1608
1609static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1610{
1611 struct bnx2x_fastpath *fp = fp_cookie;
1612 struct bnx2x *bp = fp->bp;
1613 struct net_device *dev = bp->dev;
34f80b04 1614 int index = FP_IDX(fp);
a2fbb9ea 1615
34f80b04
EG
1616 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1617 index, FP_SB_ID(fp));
1618 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1619
1620#ifdef BNX2X_STOP_ON_ERROR
1621 if (unlikely(bp->panic))
1622 return IRQ_HANDLED;
1623#endif
1624
1625 prefetch(fp->rx_cons_sb);
1626 prefetch(fp->tx_cons_sb);
1627 prefetch(&fp->status_blk->c_status_block.status_block_index);
1628 prefetch(&fp->status_blk->u_status_block.status_block_index);
1629
1630 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1631
a2fbb9ea
ET
1632 return IRQ_HANDLED;
1633}
1634
1635static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1636{
1637 struct net_device *dev = dev_instance;
1638 struct bnx2x *bp = netdev_priv(dev);
1639 u16 status = bnx2x_ack_int(bp);
34f80b04 1640 u16 mask;
a2fbb9ea 1641
34f80b04 1642 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1643 if (unlikely(status == 0)) {
1644 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1645 return IRQ_NONE;
1646 }
34f80b04 1647 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1648
1649#ifdef BNX2X_STOP_ON_ERROR
1650 if (unlikely(bp->panic))
1651 return IRQ_HANDLED;
1652#endif
1653
34f80b04 1654 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1655 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1656 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1657 return IRQ_HANDLED;
1658 }
1659
34f80b04
EG
1660 mask = 0x2 << bp->fp[0].sb_id;
1661 if (status & mask) {
a2fbb9ea
ET
1662 struct bnx2x_fastpath *fp = &bp->fp[0];
1663
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668
1669 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1670
34f80b04 1671 status &= ~mask;
a2fbb9ea
ET
1672 }
1673
a2fbb9ea 1674
34f80b04 1675 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1676 schedule_work(&bp->sp_task);
1677
1678 status &= ~0x1;
1679 if (!status)
1680 return IRQ_HANDLED;
1681 }
1682
34f80b04
EG
1683 if (status)
1684 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1685 status);
a2fbb9ea 1686
c18487ee 1687 return IRQ_HANDLED;
a2fbb9ea
ET
1688}
1689
c18487ee 1690/* end of fast path */
a2fbb9ea 1691
bb2a0f7a 1692static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1693
c18487ee
YR
1694/* Link */
1695
1696/*
1697 * General service functions
1698 */
a2fbb9ea 1699
c18487ee
YR
1700static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1701{
1702 u32 lock_status;
1703 u32 resource_bit = (1 << resource);
34f80b04 1704 u8 port = BP_PORT(bp);
c18487ee 1705 int cnt;
a2fbb9ea 1706
c18487ee
YR
1707 /* Validating that the resource is within range */
1708 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1709 DP(NETIF_MSG_HW,
1710 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1711 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1712 return -EINVAL;
1713 }
a2fbb9ea 1714
c18487ee
YR
1715 /* Validating that the resource is not already taken */
1716 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1717 if (lock_status & resource_bit) {
1718 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1719 lock_status, resource_bit);
1720 return -EEXIST;
1721 }
a2fbb9ea 1722
c18487ee
YR
1723 /* Try for 1 second every 5ms */
1724 for (cnt = 0; cnt < 200; cnt++) {
1725 /* Try to acquire the lock */
1726 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1727 resource_bit);
1728 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1729 if (lock_status & resource_bit)
1730 return 0;
a2fbb9ea 1731
c18487ee 1732 msleep(5);
a2fbb9ea 1733 }
c18487ee
YR
1734 DP(NETIF_MSG_HW, "Timeout\n");
1735 return -EAGAIN;
1736}
a2fbb9ea 1737
c18487ee
YR
1738static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1739{
1740 u32 lock_status;
1741 u32 resource_bit = (1 << resource);
34f80b04 1742 u8 port = BP_PORT(bp);
a2fbb9ea 1743
c18487ee
YR
1744 /* Validating that the resource is within range */
1745 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1746 DP(NETIF_MSG_HW,
1747 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1748 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1749 return -EINVAL;
1750 }
1751
1752 /* Validating that the resource is currently taken */
1753 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1754 if (!(lock_status & resource_bit)) {
1755 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1756 lock_status, resource_bit);
1757 return -EFAULT;
a2fbb9ea
ET
1758 }
1759
c18487ee
YR
1760 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1761 return 0;
1762}
1763
1764/* HW Lock for shared dual port PHYs */
1765static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1766{
1767 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1768
34f80b04 1769 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1770
c18487ee
YR
1771 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1772 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1773 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1774}
a2fbb9ea 1775
c18487ee
YR
1776static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1777{
1778 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1779
c18487ee
YR
1780 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1781 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1782 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1783
34f80b04 1784 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1785}
a2fbb9ea 1786
c18487ee
YR
1787int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1788{
1789 /* The GPIO should be swapped if swap register is set and active */
1790 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1791 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1792 int gpio_shift = gpio_num +
1793 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1794 u32 gpio_mask = (1 << gpio_shift);
1795 u32 gpio_reg;
a2fbb9ea 1796
c18487ee
YR
1797 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1798 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1799 return -EINVAL;
1800 }
a2fbb9ea 1801
c18487ee
YR
1802 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1803 /* read GPIO and mask except the float bits */
1804 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1805
c18487ee
YR
1806 switch (mode) {
1807 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1808 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1809 gpio_num, gpio_shift);
1810 /* clear FLOAT and set CLR */
1811 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1812 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1813 break;
a2fbb9ea 1814
c18487ee
YR
1815 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1816 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1817 gpio_num, gpio_shift);
1818 /* clear FLOAT and set SET */
1819 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1820 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1821 break;
a2fbb9ea 1822
c18487ee
YR
1823 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1824 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1825 gpio_num, gpio_shift);
1826 /* set FLOAT */
1827 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1828 break;
a2fbb9ea 1829
c18487ee
YR
1830 default:
1831 break;
a2fbb9ea
ET
1832 }
1833
c18487ee
YR
1834 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1835 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1836
c18487ee 1837 return 0;
a2fbb9ea
ET
1838}
1839
c18487ee 1840static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1841{
c18487ee
YR
1842 u32 spio_mask = (1 << spio_num);
1843 u32 spio_reg;
a2fbb9ea 1844
c18487ee
YR
1845 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1846 (spio_num > MISC_REGISTERS_SPIO_7)) {
1847 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1848 return -EINVAL;
a2fbb9ea
ET
1849 }
1850
c18487ee
YR
1851 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1852 /* read SPIO and mask except the float bits */
1853 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1854
c18487ee
YR
1855 switch (mode) {
1856 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1857 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1858 /* clear FLOAT and set CLR */
1859 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1860 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1861 break;
a2fbb9ea 1862
c18487ee
YR
1863 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1864 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1865 /* clear FLOAT and set SET */
1866 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1867 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1868 break;
a2fbb9ea 1869
c18487ee
YR
1870 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1871 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1872 /* set FLOAT */
1873 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1874 break;
a2fbb9ea 1875
c18487ee
YR
1876 default:
1877 break;
a2fbb9ea
ET
1878 }
1879
c18487ee
YR
1880 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1881 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1882
a2fbb9ea
ET
1883 return 0;
1884}
1885
c18487ee 1886static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1887{
c18487ee
YR
1888 switch (bp->link_vars.ieee_fc) {
1889 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1890 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1891 ADVERTISED_Pause);
1892 break;
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1894 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1895 ADVERTISED_Pause);
1896 break;
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1898 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1899 break;
1900 default:
34f80b04 1901 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1902 ADVERTISED_Pause);
1903 break;
1904 }
1905}
f1410647 1906
c18487ee
YR
1907static void bnx2x_link_report(struct bnx2x *bp)
1908{
1909 if (bp->link_vars.link_up) {
1910 if (bp->state == BNX2X_STATE_OPEN)
1911 netif_carrier_on(bp->dev);
1912 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1913
c18487ee 1914 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1915
c18487ee
YR
1916 if (bp->link_vars.duplex == DUPLEX_FULL)
1917 printk("full duplex");
1918 else
1919 printk("half duplex");
f1410647 1920
c18487ee
YR
1921 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1922 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1923 printk(", receive ");
1924 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1925 printk("& transmit ");
1926 } else {
1927 printk(", transmit ");
1928 }
1929 printk("flow control ON");
1930 }
1931 printk("\n");
f1410647 1932
c18487ee
YR
1933 } else { /* link_down */
1934 netif_carrier_off(bp->dev);
1935 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1936 }
c18487ee
YR
1937}
1938
1939static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1940{
19680c48
EG
1941 if (!BP_NOMCP(bp)) {
1942 u8 rc;
a2fbb9ea 1943
19680c48
EG
1944 /* Initialize link parameters structure variables */
1945 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1946
19680c48
EG
1947 bnx2x_phy_hw_lock(bp);
1948 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1949 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1950
19680c48
EG
1951 if (bp->link_vars.link_up)
1952 bnx2x_link_report(bp);
a2fbb9ea 1953
19680c48 1954 bnx2x_calc_fc_adv(bp);
34f80b04 1955
19680c48
EG
1956 return rc;
1957 }
1958 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1959 return -EINVAL;
a2fbb9ea
ET
1960}
1961
c18487ee 1962static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1963{
19680c48
EG
1964 if (!BP_NOMCP(bp)) {
1965 bnx2x_phy_hw_lock(bp);
1966 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1967 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1968
19680c48
EG
1969 bnx2x_calc_fc_adv(bp);
1970 } else
1971 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1972}
a2fbb9ea 1973
c18487ee
YR
1974static void bnx2x__link_reset(struct bnx2x *bp)
1975{
19680c48
EG
1976 if (!BP_NOMCP(bp)) {
1977 bnx2x_phy_hw_lock(bp);
1978 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1979 bnx2x_phy_hw_unlock(bp);
1980 } else
1981 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1982}
a2fbb9ea 1983
c18487ee
YR
1984static u8 bnx2x_link_test(struct bnx2x *bp)
1985{
1986 u8 rc;
a2fbb9ea 1987
c18487ee
YR
1988 bnx2x_phy_hw_lock(bp);
1989 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1990 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1991
c18487ee
YR
1992 return rc;
1993}
a2fbb9ea 1994
34f80b04
EG
1995/* Calculates the sum of vn_min_rates.
1996 It's needed for further normalizing of the min_rates.
1997
1998 Returns:
1999 sum of vn_min_rates
2000 or
2001 0 - if all the min_rates are 0.
2002 In the later case fainess algorithm should be deactivated.
2003 If not all min_rates are zero then those that are zeroes will
2004 be set to 1.
2005 */
2006static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2007{
2008 int i, port = BP_PORT(bp);
2009 u32 wsum = 0;
2010 int all_zero = 1;
2011
2012 for (i = 0; i < E1HVN_MAX; i++) {
2013 u32 vn_cfg =
2014 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2015 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2016 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2017 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2018 /* If min rate is zero - set it to 1 */
2019 if (!vn_min_rate)
2020 vn_min_rate = DEF_MIN_RATE;
2021 else
2022 all_zero = 0;
2023
2024 wsum += vn_min_rate;
2025 }
2026 }
2027
2028 /* ... only if all min rates are zeros - disable FAIRNESS */
2029 if (all_zero)
2030 return 0;
2031
2032 return wsum;
2033}
2034
2035static void bnx2x_init_port_minmax(struct bnx2x *bp,
2036 int en_fness,
2037 u16 port_rate,
2038 struct cmng_struct_per_port *m_cmng_port)
2039{
2040 u32 r_param = port_rate / 8;
2041 int port = BP_PORT(bp);
2042 int i;
2043
2044 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2045
2046 /* Enable minmax only if we are in e1hmf mode */
2047 if (IS_E1HMF(bp)) {
2048 u32 fair_periodic_timeout_usec;
2049 u32 t_fair;
2050
2051 /* Enable rate shaping and fairness */
2052 m_cmng_port->flags.cmng_vn_enable = 1;
2053 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2054 m_cmng_port->flags.rate_shaping_enable = 1;
2055
2056 if (!en_fness)
2057 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2058 " fairness will be disabled\n");
2059
2060 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2061 m_cmng_port->rs_vars.rs_periodic_timeout =
2062 RS_PERIODIC_TIMEOUT_USEC / 4;
2063
2064 /* this is the threshold below which no timer arming will occur
2065 1.25 coefficient is for the threshold to be a little bigger
2066 than the real time, to compensate for timer in-accuracy */
2067 m_cmng_port->rs_vars.rs_threshold =
2068 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2069
2070 /* resolution of fairness timer */
2071 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2072 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2073 t_fair = T_FAIR_COEF / port_rate;
2074
2075 /* this is the threshold below which we won't arm
2076 the timer anymore */
2077 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2078
2079 /* we multiply by 1e3/8 to get bytes/msec.
2080 We don't want the credits to pass a credit
2081 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2082 m_cmng_port->fair_vars.upper_bound =
2083 r_param * t_fair * FAIR_MEM;
2084 /* since each tick is 4 usec */
2085 m_cmng_port->fair_vars.fairness_timeout =
2086 fair_periodic_timeout_usec / 4;
2087
2088 } else {
2089 /* Disable rate shaping and fairness */
2090 m_cmng_port->flags.cmng_vn_enable = 0;
2091 m_cmng_port->flags.fairness_enable = 0;
2092 m_cmng_port->flags.rate_shaping_enable = 0;
2093
2094 DP(NETIF_MSG_IFUP,
2095 "Single function mode minmax will be disabled\n");
2096 }
2097
2098 /* Store it to internal memory */
2099 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2100 REG_WR(bp, BAR_XSTRORM_INTMEM +
2101 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2102 ((u32 *)(m_cmng_port))[i]);
2103}
2104
2105static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2106 u32 wsum, u16 port_rate,
2107 struct cmng_struct_per_port *m_cmng_port)
2108{
2109 struct rate_shaping_vars_per_vn m_rs_vn;
2110 struct fairness_vars_per_vn m_fair_vn;
2111 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2112 u16 vn_min_rate, vn_max_rate;
2113 int i;
2114
2115 /* If function is hidden - set min and max to zeroes */
2116 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2117 vn_min_rate = 0;
2118 vn_max_rate = 0;
2119
2120 } else {
2121 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2122 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2123 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2124 if current min rate is zero - set it to 1.
2125 This is a requirment of the algorithm. */
2126 if ((vn_min_rate == 0) && wsum)
2127 vn_min_rate = DEF_MIN_RATE;
2128 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2129 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2130 }
2131
2132 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2133 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2134
2135 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2136 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2137
2138 /* global vn counter - maximal Mbps for this vn */
2139 m_rs_vn.vn_counter.rate = vn_max_rate;
2140
2141 /* quota - number of bytes transmitted in this period */
2142 m_rs_vn.vn_counter.quota =
2143 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2144
2145#ifdef BNX2X_PER_PROT_QOS
2146 /* per protocol counter */
2147 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2148 /* maximal Mbps for this protocol */
2149 m_rs_vn.protocol_counters[protocol].rate =
2150 protocol_max_rate[protocol];
2151 /* the quota in each timer period -
2152 number of bytes transmitted in this period */
2153 m_rs_vn.protocol_counters[protocol].quota =
2154 (u32)(rs_periodic_timeout_usec *
2155 ((double)m_rs_vn.
2156 protocol_counters[protocol].rate/8));
2157 }
2158#endif
2159
2160 if (wsum) {
2161 /* credit for each period of the fairness algorithm:
2162 number of bytes in T_FAIR (the vn share the port rate).
2163 wsum should not be larger than 10000, thus
2164 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2165 m_fair_vn.vn_credit_delta =
2166 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2167 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2168 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2169 m_fair_vn.vn_credit_delta);
2170 }
2171
2172#ifdef BNX2X_PER_PROT_QOS
2173 do {
2174 u32 protocolWeightSum = 0;
2175
2176 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2177 protocolWeightSum +=
2178 drvInit.protocol_min_rate[protocol];
2179 /* per protocol counter -
2180 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2181 if (protocolWeightSum > 0) {
2182 for (protocol = 0;
2183 protocol < NUM_OF_PROTOCOLS; protocol++)
2184 /* credit for each period of the
2185 fairness algorithm - number of bytes in
2186 T_FAIR (the protocol share the vn rate) */
2187 m_fair_vn.protocol_credit_delta[protocol] =
2188 (u32)((vn_min_rate / 8) * t_fair *
2189 protocol_min_rate / protocolWeightSum);
2190 }
2191 } while (0);
2192#endif
2193
2194 /* Store it to internal memory */
2195 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2196 REG_WR(bp, BAR_XSTRORM_INTMEM +
2197 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2198 ((u32 *)(&m_rs_vn))[i]);
2199
2200 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2201 REG_WR(bp, BAR_XSTRORM_INTMEM +
2202 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2203 ((u32 *)(&m_fair_vn))[i]);
2204}
2205
c18487ee
YR
2206/* This function is called upon link interrupt */
2207static void bnx2x_link_attn(struct bnx2x *bp)
2208{
34f80b04
EG
2209 int vn;
2210
bb2a0f7a
YG
2211 /* Make sure that we are synced with the current statistics */
2212 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2213
c18487ee
YR
2214 bnx2x_phy_hw_lock(bp);
2215 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2216 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 2217
bb2a0f7a
YG
2218 if (bp->link_vars.link_up) {
2219
2220 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2221 struct host_port_stats *pstats;
2222
2223 pstats = bnx2x_sp(bp, port_stats);
2224 /* reset old bmac stats */
2225 memset(&(pstats->mac_stx[0]), 0,
2226 sizeof(struct mac_stx));
2227 }
2228 if ((bp->state == BNX2X_STATE_OPEN) ||
2229 (bp->state == BNX2X_STATE_DISABLED))
2230 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2231 }
2232
c18487ee
YR
2233 /* indicate link status */
2234 bnx2x_link_report(bp);
34f80b04
EG
2235
2236 if (IS_E1HMF(bp)) {
2237 int func;
2238
2239 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2240 if (vn == BP_E1HVN(bp))
2241 continue;
2242
2243 func = ((vn << 1) | BP_PORT(bp));
2244
2245 /* Set the attention towards other drivers
2246 on the same port */
2247 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2248 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2249 }
2250 }
2251
2252 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2253 struct cmng_struct_per_port m_cmng_port;
2254 u32 wsum;
2255 int port = BP_PORT(bp);
2256
2257 /* Init RATE SHAPING and FAIRNESS contexts */
2258 wsum = bnx2x_calc_vn_wsum(bp);
2259 bnx2x_init_port_minmax(bp, (int)wsum,
2260 bp->link_vars.line_speed,
2261 &m_cmng_port);
2262 if (IS_E1HMF(bp))
2263 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2264 bnx2x_init_vn_minmax(bp, 2*vn + port,
2265 wsum, bp->link_vars.line_speed,
2266 &m_cmng_port);
2267 }
c18487ee 2268}
a2fbb9ea 2269
c18487ee
YR
2270static void bnx2x__link_status_update(struct bnx2x *bp)
2271{
2272 if (bp->state != BNX2X_STATE_OPEN)
2273 return;
a2fbb9ea 2274
c18487ee 2275 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2276
bb2a0f7a
YG
2277 if (bp->link_vars.link_up)
2278 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2279 else
2280 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2281
c18487ee
YR
2282 /* indicate link status */
2283 bnx2x_link_report(bp);
a2fbb9ea 2284}
a2fbb9ea 2285
34f80b04
EG
2286static void bnx2x_pmf_update(struct bnx2x *bp)
2287{
2288 int port = BP_PORT(bp);
2289 u32 val;
2290
2291 bp->port.pmf = 1;
2292 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2293
2294 /* enable nig attention */
2295 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2296 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2297 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2298
2299 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2300}
2301
c18487ee 2302/* end of Link */
a2fbb9ea
ET
2303
2304/* slow path */
2305
2306/*
2307 * General service functions
2308 */
2309
2310/* the slow path queue is odd since completions arrive on the fastpath ring */
2311static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2312 u32 data_hi, u32 data_lo, int common)
2313{
34f80b04 2314 int func = BP_FUNC(bp);
a2fbb9ea 2315
34f80b04
EG
2316 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2317 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2318 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2319 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2320 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2321
2322#ifdef BNX2X_STOP_ON_ERROR
2323 if (unlikely(bp->panic))
2324 return -EIO;
2325#endif
2326
34f80b04 2327 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2328
2329 if (!bp->spq_left) {
2330 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2331 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2332 bnx2x_panic();
2333 return -EBUSY;
2334 }
f1410647 2335
a2fbb9ea
ET
2336 /* CID needs port number to be encoded int it */
2337 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2338 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2339 HW_CID(bp, cid)));
2340 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2341 if (common)
2342 bp->spq_prod_bd->hdr.type |=
2343 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2344
2345 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2346 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2347
2348 bp->spq_left--;
2349
2350 if (bp->spq_prod_bd == bp->spq_last_bd) {
2351 bp->spq_prod_bd = bp->spq;
2352 bp->spq_prod_idx = 0;
2353 DP(NETIF_MSG_TIMER, "end of spq\n");
2354
2355 } else {
2356 bp->spq_prod_bd++;
2357 bp->spq_prod_idx++;
2358 }
2359
34f80b04 2360 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2361 bp->spq_prod_idx);
2362
34f80b04 2363 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2364 return 0;
2365}
2366
2367/* acquire split MCP access lock register */
2368static int bnx2x_lock_alr(struct bnx2x *bp)
2369{
a2fbb9ea 2370 u32 i, j, val;
34f80b04 2371 int rc = 0;
a2fbb9ea
ET
2372
2373 might_sleep();
2374 i = 100;
2375 for (j = 0; j < i*10; j++) {
2376 val = (1UL << 31);
2377 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2378 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2379 if (val & (1L << 31))
2380 break;
2381
2382 msleep(5);
2383 }
a2fbb9ea 2384 if (!(val & (1L << 31))) {
19680c48 2385 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2386 rc = -EBUSY;
2387 }
2388
2389 return rc;
2390}
2391
2392/* Release split MCP access lock register */
2393static void bnx2x_unlock_alr(struct bnx2x *bp)
2394{
2395 u32 val = 0;
2396
2397 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2398}
2399
2400static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2401{
2402 struct host_def_status_block *def_sb = bp->def_status_blk;
2403 u16 rc = 0;
2404
2405 barrier(); /* status block is written to by the chip */
2406
2407 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2408 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2409 rc |= 1;
2410 }
2411 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2412 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2413 rc |= 2;
2414 }
2415 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2416 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2417 rc |= 4;
2418 }
2419 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2420 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2421 rc |= 8;
2422 }
2423 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2424 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2425 rc |= 16;
2426 }
2427 return rc;
2428}
2429
2430/*
2431 * slow path service functions
2432 */
2433
2434static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2435{
34f80b04
EG
2436 int port = BP_PORT(bp);
2437 int func = BP_FUNC(bp);
2438 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
a2fbb9ea
ET
2439 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2440 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2441 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2442 NIG_REG_MASK_INTERRUPT_PORT0;
a2fbb9ea
ET
2443
2444 if (~bp->aeu_mask & (asserted & 0xff))
2445 BNX2X_ERR("IGU ERROR\n");
2446 if (bp->attn_state & asserted)
2447 BNX2X_ERR("IGU ERROR\n");
2448
2449 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2450 bp->aeu_mask, asserted);
2451 bp->aeu_mask &= ~(asserted & 0xff);
2452 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2453
2454 REG_WR(bp, aeu_addr, bp->aeu_mask);
2455
2456 bp->attn_state |= asserted;
2457
2458 if (asserted & ATTN_HARD_WIRED_MASK) {
2459 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2460
877e9aa4
ET
2461 /* save nig interrupt mask */
2462 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2463 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2464
c18487ee 2465 bnx2x_link_attn(bp);
a2fbb9ea
ET
2466
2467 /* handle unicore attn? */
2468 }
2469 if (asserted & ATTN_SW_TIMER_4_FUNC)
2470 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2471
2472 if (asserted & GPIO_2_FUNC)
2473 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2474
2475 if (asserted & GPIO_3_FUNC)
2476 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2477
2478 if (asserted & GPIO_4_FUNC)
2479 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2480
2481 if (port == 0) {
2482 if (asserted & ATTN_GENERAL_ATTN_1) {
2483 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2484 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2485 }
2486 if (asserted & ATTN_GENERAL_ATTN_2) {
2487 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2488 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2489 }
2490 if (asserted & ATTN_GENERAL_ATTN_3) {
2491 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2493 }
2494 } else {
2495 if (asserted & ATTN_GENERAL_ATTN_4) {
2496 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2497 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2498 }
2499 if (asserted & ATTN_GENERAL_ATTN_5) {
2500 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2501 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2502 }
2503 if (asserted & ATTN_GENERAL_ATTN_6) {
2504 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2505 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2506 }
2507 }
2508
2509 } /* if hardwired */
2510
2511 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2512 asserted, BAR_IGU_INTMEM + igu_addr);
2513 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2514
2515 /* now set back the mask */
2516 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2517 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2518}
2519
877e9aa4 2520static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2521{
34f80b04 2522 int port = BP_PORT(bp);
877e9aa4
ET
2523 int reg_offset;
2524 u32 val;
2525
34f80b04
EG
2526 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2527 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2528
34f80b04 2529 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2530
2531 val = REG_RD(bp, reg_offset);
2532 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2533 REG_WR(bp, reg_offset, val);
2534
2535 BNX2X_ERR("SPIO5 hw attention\n");
2536
34f80b04 2537 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2538 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2539 /* Fan failure attention */
2540
2541 /* The PHY reset is controled by GPIO 1 */
2542 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2543 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2544 /* Low power mode is controled by GPIO 2 */
2545 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2546 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2547 /* mark the failure */
c18487ee 2548 bp->link_params.ext_phy_config &=
877e9aa4 2549 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2550 bp->link_params.ext_phy_config |=
877e9aa4
ET
2551 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2552 SHMEM_WR(bp,
2553 dev_info.port_hw_config[port].
2554 external_phy_config,
c18487ee 2555 bp->link_params.ext_phy_config);
877e9aa4
ET
2556 /* log the failure */
2557 printk(KERN_ERR PFX "Fan Failure on Network"
2558 " Controller %s has caused the driver to"
2559 " shutdown the card to prevent permanent"
2560 " damage. Please contact Dell Support for"
2561 " assistance\n", bp->dev->name);
2562 break;
2563
2564 default:
2565 break;
2566 }
2567 }
34f80b04
EG
2568
2569 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2570
2571 val = REG_RD(bp, reg_offset);
2572 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2573 REG_WR(bp, reg_offset, val);
2574
2575 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2576 (attn & HW_INTERRUT_ASSERT_SET_0));
2577 bnx2x_panic();
2578 }
877e9aa4
ET
2579}
2580
2581static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2582{
2583 u32 val;
2584
2585 if (attn & BNX2X_DOORQ_ASSERT) {
2586
2587 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2588 BNX2X_ERR("DB hw attention 0x%x\n", val);
2589 /* DORQ discard attention */
2590 if (val & 0x2)
2591 BNX2X_ERR("FATAL error from DORQ\n");
2592 }
34f80b04
EG
2593
2594 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2595
2596 int port = BP_PORT(bp);
2597 int reg_offset;
2598
2599 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2600 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2601
2602 val = REG_RD(bp, reg_offset);
2603 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2604 REG_WR(bp, reg_offset, val);
2605
2606 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2607 (attn & HW_INTERRUT_ASSERT_SET_1));
2608 bnx2x_panic();
2609 }
877e9aa4
ET
2610}
2611
2612static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2613{
2614 u32 val;
2615
2616 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2617
2618 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2619 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2620 /* CFC error attention */
2621 if (val & 0x2)
2622 BNX2X_ERR("FATAL error from CFC\n");
2623 }
2624
2625 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2626
2627 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2628 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2629 /* RQ_USDMDP_FIFO_OVERFLOW */
2630 if (val & 0x18000)
2631 BNX2X_ERR("FATAL error from PXP\n");
2632 }
34f80b04
EG
2633
2634 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2635
2636 int port = BP_PORT(bp);
2637 int reg_offset;
2638
2639 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2640 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2641
2642 val = REG_RD(bp, reg_offset);
2643 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2644 REG_WR(bp, reg_offset, val);
2645
2646 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2647 (attn & HW_INTERRUT_ASSERT_SET_2));
2648 bnx2x_panic();
2649 }
877e9aa4
ET
2650}
2651
2652static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2653{
34f80b04
EG
2654 u32 val;
2655
877e9aa4
ET
2656 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2657
34f80b04
EG
2658 if (attn & BNX2X_PMF_LINK_ASSERT) {
2659 int func = BP_FUNC(bp);
2660
2661 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2662 bnx2x__link_status_update(bp);
2663 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2664 DRV_STATUS_PMF)
2665 bnx2x_pmf_update(bp);
2666
2667 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2668
2669 BNX2X_ERR("MC assert!\n");
2670 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2671 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2672 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2673 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2674 bnx2x_panic();
2675
2676 } else if (attn & BNX2X_MCP_ASSERT) {
2677
2678 BNX2X_ERR("MCP assert!\n");
2679 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2680 bnx2x_fw_dump(bp);
877e9aa4
ET
2681
2682 } else
2683 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2684 }
2685
2686 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2687 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2688 if (attn & BNX2X_GRC_TIMEOUT) {
2689 val = CHIP_IS_E1H(bp) ?
2690 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2691 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2692 }
2693 if (attn & BNX2X_GRC_RSV) {
2694 val = CHIP_IS_E1H(bp) ?
2695 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2696 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2697 }
877e9aa4 2698 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2699 }
2700}
2701
2702static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2703{
a2fbb9ea
ET
2704 struct attn_route attn;
2705 struct attn_route group_mask;
34f80b04 2706 int port = BP_PORT(bp);
877e9aa4 2707 int index;
a2fbb9ea
ET
2708 u32 reg_addr;
2709 u32 val;
2710
2711 /* need to take HW lock because MCP or other port might also
2712 try to handle this event */
2713 bnx2x_lock_alr(bp);
2714
2715 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2716 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2717 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2718 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2719 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2720 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2721
2722 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2723 if (deasserted & (1 << index)) {
2724 group_mask = bp->attn_group[index];
2725
34f80b04
EG
2726 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2727 index, group_mask.sig[0], group_mask.sig[1],
2728 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2729
877e9aa4
ET
2730 bnx2x_attn_int_deasserted3(bp,
2731 attn.sig[3] & group_mask.sig[3]);
2732 bnx2x_attn_int_deasserted1(bp,
2733 attn.sig[1] & group_mask.sig[1]);
2734 bnx2x_attn_int_deasserted2(bp,
2735 attn.sig[2] & group_mask.sig[2]);
2736 bnx2x_attn_int_deasserted0(bp,
2737 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2738
a2fbb9ea
ET
2739 if ((attn.sig[0] & group_mask.sig[0] &
2740 HW_PRTY_ASSERT_SET_0) ||
2741 (attn.sig[1] & group_mask.sig[1] &
2742 HW_PRTY_ASSERT_SET_1) ||
2743 (attn.sig[2] & group_mask.sig[2] &
2744 HW_PRTY_ASSERT_SET_2))
877e9aa4 2745 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2746 }
2747 }
2748
2749 bnx2x_unlock_alr(bp);
2750
34f80b04 2751 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
2752
2753 val = ~deasserted;
34f80b04 2754/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
a2fbb9ea
ET
2755 val, BAR_IGU_INTMEM + reg_addr); */
2756 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2757
2758 if (bp->aeu_mask & (deasserted & 0xff))
34f80b04 2759 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea 2760 if (~bp->attn_state & deasserted)
34f80b04 2761 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea
ET
2762
2763 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2764 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2765
2766 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2767 bp->aeu_mask |= (deasserted & 0xff);
2768
2769 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2770 REG_WR(bp, reg_addr, bp->aeu_mask);
2771
2772 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2773 bp->attn_state &= ~deasserted;
2774 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2775}
2776
2777static void bnx2x_attn_int(struct bnx2x *bp)
2778{
2779 /* read local copy of bits */
2780 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2781 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2782 u32 attn_state = bp->attn_state;
2783
2784 /* look for changed bits */
2785 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2786 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2787
2788 DP(NETIF_MSG_HW,
2789 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2790 attn_bits, attn_ack, asserted, deasserted);
2791
2792 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2793 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2794
2795 /* handle bits that were raised */
2796 if (asserted)
2797 bnx2x_attn_int_asserted(bp, asserted);
2798
2799 if (deasserted)
2800 bnx2x_attn_int_deasserted(bp, deasserted);
2801}
2802
2803static void bnx2x_sp_task(struct work_struct *work)
2804{
2805 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2806 u16 status;
2807
34f80b04 2808
a2fbb9ea
ET
2809 /* Return here if interrupt is disabled */
2810 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2811 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2812 return;
2813 }
2814
2815 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2816/* if (status == 0) */
2817/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2818
34f80b04 2819 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2820
877e9aa4
ET
2821 /* HW attentions */
2822 if (status & 0x1)
a2fbb9ea 2823 bnx2x_attn_int(bp);
a2fbb9ea 2824
bb2a0f7a
YG
2825 /* CStorm events: query_stats, port delete ramrod */
2826 if (status & 0x2)
2827 bp->stats_pending = 0;
2828
a2fbb9ea
ET
2829 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2830 IGU_INT_NOP, 1);
2831 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2832 IGU_INT_NOP, 1);
2833 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2834 IGU_INT_NOP, 1);
2835 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2836 IGU_INT_NOP, 1);
2837 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2838 IGU_INT_ENABLE, 1);
877e9aa4 2839
a2fbb9ea
ET
2840}
2841
2842static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2843{
2844 struct net_device *dev = dev_instance;
2845 struct bnx2x *bp = netdev_priv(dev);
2846
2847 /* Return here if interrupt is disabled */
2848 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2849 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2850 return IRQ_HANDLED;
2851 }
2852
877e9aa4 2853 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2854
2855#ifdef BNX2X_STOP_ON_ERROR
2856 if (unlikely(bp->panic))
2857 return IRQ_HANDLED;
2858#endif
2859
2860 schedule_work(&bp->sp_task);
2861
2862 return IRQ_HANDLED;
2863}
2864
2865/* end of slow path */
2866
2867/* Statistics */
2868
2869/****************************************************************************
2870* Macros
2871****************************************************************************/
2872
a2fbb9ea
ET
2873/* sum[hi:lo] += add[hi:lo] */
2874#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2875 do { \
2876 s_lo += a_lo; \
2877 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2878 } while (0)
2879
2880/* difference = minuend - subtrahend */
2881#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2882 do { \
bb2a0f7a
YG
2883 if (m_lo < s_lo) { \
2884 /* underflow */ \
a2fbb9ea 2885 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2886 if (d_hi > 0) { \
2887 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2888 d_hi--; \
2889 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2890 } else { \
2891 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2892 d_hi = 0; \
2893 d_lo = 0; \
2894 } \
bb2a0f7a
YG
2895 } else { \
2896 /* m_lo >= s_lo */ \
a2fbb9ea 2897 if (m_hi < s_hi) { \
bb2a0f7a
YG
2898 d_hi = 0; \
2899 d_lo = 0; \
2900 } else { \
2901 /* m_hi >= s_hi */ \
2902 d_hi = m_hi - s_hi; \
2903 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2904 } \
2905 } \
2906 } while (0)
2907
bb2a0f7a 2908#define UPDATE_STAT64(s, t) \
a2fbb9ea 2909 do { \
bb2a0f7a
YG
2910 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2911 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2912 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2913 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2914 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2915 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2916 } while (0)
2917
bb2a0f7a 2918#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2919 do { \
bb2a0f7a
YG
2920 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2921 diff.lo, new->s##_lo, old->s##_lo); \
2922 ADD_64(estats->t##_hi, diff.hi, \
2923 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2924 } while (0)
2925
2926/* sum[hi:lo] += add */
2927#define ADD_EXTEND_64(s_hi, s_lo, a) \
2928 do { \
2929 s_lo += a; \
2930 s_hi += (s_lo < a) ? 1 : 0; \
2931 } while (0)
2932
bb2a0f7a 2933#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2934 do { \
bb2a0f7a
YG
2935 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2936 pstats->mac_stx[1].s##_lo, \
2937 new->s); \
a2fbb9ea
ET
2938 } while (0)
2939
bb2a0f7a 2940#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2941 do { \
2942 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2943 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2944 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2945 } while (0)
2946
2947#define UPDATE_EXTEND_XSTAT(s, t) \
2948 do { \
2949 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2950 old_xclient->s = le32_to_cpu(xclient->s); \
2951 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2952 } while (0)
2953
2954/*
2955 * General service functions
2956 */
2957
2958static inline long bnx2x_hilo(u32 *hiref)
2959{
2960 u32 lo = *(hiref + 1);
2961#if (BITS_PER_LONG == 64)
2962 u32 hi = *hiref;
2963
2964 return HILO_U64(hi, lo);
2965#else
2966 return lo;
2967#endif
2968}
2969
2970/*
2971 * Init service functions
2972 */
2973
bb2a0f7a
YG
2974static void bnx2x_storm_stats_init(struct bnx2x *bp)
2975{
2976 int func = BP_FUNC(bp);
2977
2978 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2979 REG_WR(bp, BAR_XSTRORM_INTMEM +
2980 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2981
2982 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2983 REG_WR(bp, BAR_TSTRORM_INTMEM +
2984 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2985
2986 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2987 REG_WR(bp, BAR_CSTRORM_INTMEM +
2988 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2989
2990 REG_WR(bp, BAR_XSTRORM_INTMEM +
2991 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2992 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2993 REG_WR(bp, BAR_XSTRORM_INTMEM +
2994 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2995 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2996
2997 REG_WR(bp, BAR_TSTRORM_INTMEM +
2998 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2999 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3000 REG_WR(bp, BAR_TSTRORM_INTMEM +
3001 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3002 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3003}
3004
3005static void bnx2x_storm_stats_post(struct bnx2x *bp)
3006{
3007 if (!bp->stats_pending) {
3008 struct eth_query_ramrod_data ramrod_data = {0};
3009 int rc;
3010
3011 ramrod_data.drv_counter = bp->stats_counter++;
3012 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3013 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3014
3015 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3016 ((u32 *)&ramrod_data)[1],
3017 ((u32 *)&ramrod_data)[0], 0);
3018 if (rc == 0) {
3019 /* stats ramrod has it's own slot on the spq */
3020 bp->spq_left++;
3021 bp->stats_pending = 1;
3022 }
3023 }
3024}
3025
3026static void bnx2x_stats_init(struct bnx2x *bp)
3027{
3028 int port = BP_PORT(bp);
3029
3030 bp->executer_idx = 0;
3031 bp->stats_counter = 0;
3032
3033 /* port stats */
3034 if (!BP_NOMCP(bp))
3035 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3036 else
3037 bp->port.port_stx = 0;
3038 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3039
3040 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3041 bp->port.old_nig_stats.brb_discard =
3042 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3043 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3044 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3045 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3046 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3047
3048 /* function stats */
3049 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3050 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3051 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3052 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3053
3054 bp->stats_state = STATS_STATE_DISABLED;
3055 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3056 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3057}
3058
3059static void bnx2x_hw_stats_post(struct bnx2x *bp)
3060{
3061 struct dmae_command *dmae = &bp->stats_dmae;
3062 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3063
3064 *stats_comp = DMAE_COMP_VAL;
3065
3066 /* loader */
3067 if (bp->executer_idx) {
3068 int loader_idx = PMF_DMAE_C(bp);
3069
3070 memset(dmae, 0, sizeof(struct dmae_command));
3071
3072 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3073 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3074 DMAE_CMD_DST_RESET |
3075#ifdef __BIG_ENDIAN
3076 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3077#else
3078 DMAE_CMD_ENDIANITY_DW_SWAP |
3079#endif
3080 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3081 DMAE_CMD_PORT_0) |
3082 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3083 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3084 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3085 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3086 sizeof(struct dmae_command) *
3087 (loader_idx + 1)) >> 2;
3088 dmae->dst_addr_hi = 0;
3089 dmae->len = sizeof(struct dmae_command) >> 2;
3090 if (CHIP_IS_E1(bp))
3091 dmae->len--;
3092 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3093 dmae->comp_addr_hi = 0;
3094 dmae->comp_val = 1;
3095
3096 *stats_comp = 0;
3097 bnx2x_post_dmae(bp, dmae, loader_idx);
3098
3099 } else if (bp->func_stx) {
3100 *stats_comp = 0;
3101 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3102 }
3103}
3104
3105static int bnx2x_stats_comp(struct bnx2x *bp)
3106{
3107 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3108 int cnt = 10;
3109
3110 might_sleep();
3111 while (*stats_comp != DMAE_COMP_VAL) {
3112 msleep(1);
3113 if (!cnt) {
3114 BNX2X_ERR("timeout waiting for stats finished\n");
3115 break;
3116 }
3117 cnt--;
3118 }
3119 return 1;
3120}
3121
3122/*
3123 * Statistics service functions
3124 */
3125
3126static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3127{
3128 struct dmae_command *dmae;
3129 u32 opcode;
3130 int loader_idx = PMF_DMAE_C(bp);
3131 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3132
3133 /* sanity */
3134 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3135 BNX2X_ERR("BUG!\n");
3136 return;
3137 }
3138
3139 bp->executer_idx = 0;
3140
3141 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3142 DMAE_CMD_C_ENABLE |
3143 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3144#ifdef __BIG_ENDIAN
3145 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3146#else
3147 DMAE_CMD_ENDIANITY_DW_SWAP |
3148#endif
3149 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3150 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3151
3152 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3153 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3154 dmae->src_addr_lo = bp->port.port_stx >> 2;
3155 dmae->src_addr_hi = 0;
3156 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3157 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3158 dmae->len = DMAE_LEN32_RD_MAX;
3159 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3160 dmae->comp_addr_hi = 0;
3161 dmae->comp_val = 1;
3162
3163 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3164 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3165 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3166 dmae->src_addr_hi = 0;
7a9b2557
VZ
3167 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3168 DMAE_LEN32_RD_MAX * 4);
3169 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3170 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3171 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3172 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3173 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3174 dmae->comp_val = DMAE_COMP_VAL;
3175
3176 *stats_comp = 0;
3177 bnx2x_hw_stats_post(bp);
3178 bnx2x_stats_comp(bp);
3179}
3180
3181static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3182{
3183 struct dmae_command *dmae;
34f80b04 3184 int port = BP_PORT(bp);
bb2a0f7a 3185 int vn = BP_E1HVN(bp);
a2fbb9ea 3186 u32 opcode;
bb2a0f7a 3187 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3188 u32 mac_addr;
bb2a0f7a
YG
3189 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3190
3191 /* sanity */
3192 if (!bp->link_vars.link_up || !bp->port.pmf) {
3193 BNX2X_ERR("BUG!\n");
3194 return;
3195 }
a2fbb9ea
ET
3196
3197 bp->executer_idx = 0;
bb2a0f7a
YG
3198
3199 /* MCP */
3200 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3201 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3202 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3203#ifdef __BIG_ENDIAN
bb2a0f7a 3204 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3205#else
bb2a0f7a 3206 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3207#endif
bb2a0f7a
YG
3208 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3209 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3210
bb2a0f7a 3211 if (bp->port.port_stx) {
a2fbb9ea
ET
3212
3213 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3214 dmae->opcode = opcode;
bb2a0f7a
YG
3215 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3216 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3217 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3218 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3219 dmae->len = sizeof(struct host_port_stats) >> 2;
3220 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3221 dmae->comp_addr_hi = 0;
3222 dmae->comp_val = 1;
a2fbb9ea
ET
3223 }
3224
bb2a0f7a
YG
3225 if (bp->func_stx) {
3226
3227 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3228 dmae->opcode = opcode;
3229 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3230 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3231 dmae->dst_addr_lo = bp->func_stx >> 2;
3232 dmae->dst_addr_hi = 0;
3233 dmae->len = sizeof(struct host_func_stats) >> 2;
3234 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3235 dmae->comp_addr_hi = 0;
3236 dmae->comp_val = 1;
a2fbb9ea
ET
3237 }
3238
bb2a0f7a 3239 /* MAC */
a2fbb9ea
ET
3240 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3241 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3242 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3243#ifdef __BIG_ENDIAN
3244 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3245#else
3246 DMAE_CMD_ENDIANITY_DW_SWAP |
3247#endif
bb2a0f7a
YG
3248 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3249 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3250
c18487ee 3251 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3252
3253 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3254 NIG_REG_INGRESS_BMAC0_MEM);
3255
3256 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3257 BIGMAC_REGISTER_TX_STAT_GTBYT */
3258 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3259 dmae->opcode = opcode;
3260 dmae->src_addr_lo = (mac_addr +
3261 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3262 dmae->src_addr_hi = 0;
3263 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3264 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3265 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3266 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3267 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3268 dmae->comp_addr_hi = 0;
3269 dmae->comp_val = 1;
3270
3271 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3272 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = opcode;
3275 dmae->src_addr_lo = (mac_addr +
3276 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3277 dmae->src_addr_hi = 0;
3278 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3279 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3280 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3281 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3282 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3283 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285 dmae->comp_addr_hi = 0;
3286 dmae->comp_val = 1;
3287
c18487ee 3288 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3289
3290 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3291
3292 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3293 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3294 dmae->opcode = opcode;
3295 dmae->src_addr_lo = (mac_addr +
3296 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3297 dmae->src_addr_hi = 0;
3298 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3299 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3300 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302 dmae->comp_addr_hi = 0;
3303 dmae->comp_val = 1;
3304
3305 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3306 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3307 dmae->opcode = opcode;
3308 dmae->src_addr_lo = (mac_addr +
3309 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3310 dmae->src_addr_hi = 0;
3311 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3312 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3313 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3314 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3315 dmae->len = 1;
3316 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3317 dmae->comp_addr_hi = 0;
3318 dmae->comp_val = 1;
3319
3320 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = (mac_addr +
3324 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3325 dmae->src_addr_hi = 0;
3326 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3327 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3328 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3329 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3330 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3331 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3332 dmae->comp_addr_hi = 0;
3333 dmae->comp_val = 1;
3334 }
3335
3336 /* NIG */
bb2a0f7a
YG
3337 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3338 dmae->opcode = opcode;
3339 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3340 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3341 dmae->src_addr_hi = 0;
3342 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3343 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3344 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3345 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3346 dmae->comp_addr_hi = 0;
3347 dmae->comp_val = 1;
3348
3349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3350 dmae->opcode = opcode;
3351 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3352 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3353 dmae->src_addr_hi = 0;
3354 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3355 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3356 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3357 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3358 dmae->len = (2*sizeof(u32)) >> 2;
3359 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3360 dmae->comp_addr_hi = 0;
3361 dmae->comp_val = 1;
3362
a2fbb9ea
ET
3363 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3364 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3365 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3366 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3367#ifdef __BIG_ENDIAN
3368 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3369#else
3370 DMAE_CMD_ENDIANITY_DW_SWAP |
3371#endif
bb2a0f7a
YG
3372 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3373 (vn << DMAE_CMD_E1HVN_SHIFT));
3374 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3375 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3376 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3377 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3378 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3379 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3380 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3381 dmae->len = (2*sizeof(u32)) >> 2;
3382 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3383 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3384 dmae->comp_val = DMAE_COMP_VAL;
3385
3386 *stats_comp = 0;
a2fbb9ea
ET
3387}
3388
bb2a0f7a 3389static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3390{
bb2a0f7a
YG
3391 struct dmae_command *dmae = &bp->stats_dmae;
3392 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3393
bb2a0f7a
YG
3394 /* sanity */
3395 if (!bp->func_stx) {
3396 BNX2X_ERR("BUG!\n");
3397 return;
3398 }
a2fbb9ea 3399
bb2a0f7a
YG
3400 bp->executer_idx = 0;
3401 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3402
bb2a0f7a
YG
3403 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3404 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3405 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3406#ifdef __BIG_ENDIAN
3407 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3408#else
3409 DMAE_CMD_ENDIANITY_DW_SWAP |
3410#endif
3411 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3412 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3413 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3414 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3415 dmae->dst_addr_lo = bp->func_stx >> 2;
3416 dmae->dst_addr_hi = 0;
3417 dmae->len = sizeof(struct host_func_stats) >> 2;
3418 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3419 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3420 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3421
bb2a0f7a
YG
3422 *stats_comp = 0;
3423}
a2fbb9ea 3424
bb2a0f7a
YG
3425static void bnx2x_stats_start(struct bnx2x *bp)
3426{
3427 if (bp->port.pmf)
3428 bnx2x_port_stats_init(bp);
3429
3430 else if (bp->func_stx)
3431 bnx2x_func_stats_init(bp);
3432
3433 bnx2x_hw_stats_post(bp);
3434 bnx2x_storm_stats_post(bp);
3435}
3436
3437static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3438{
3439 bnx2x_stats_comp(bp);
3440 bnx2x_stats_pmf_update(bp);
3441 bnx2x_stats_start(bp);
3442}
3443
3444static void bnx2x_stats_restart(struct bnx2x *bp)
3445{
3446 bnx2x_stats_comp(bp);
3447 bnx2x_stats_start(bp);
3448}
3449
3450static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3451{
3452 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3453 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3454 struct regpair diff;
3455
3456 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3457 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3458 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3459 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3460 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3461 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3462 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3463 UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3464 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3465 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3466 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3467 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3468 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3469 UPDATE_STAT64(tx_stat_gt127,
3470 tx_stat_etherstatspkts65octetsto127octets);
3471 UPDATE_STAT64(tx_stat_gt255,
3472 tx_stat_etherstatspkts128octetsto255octets);
3473 UPDATE_STAT64(tx_stat_gt511,
3474 tx_stat_etherstatspkts256octetsto511octets);
3475 UPDATE_STAT64(tx_stat_gt1023,
3476 tx_stat_etherstatspkts512octetsto1023octets);
3477 UPDATE_STAT64(tx_stat_gt1518,
3478 tx_stat_etherstatspkts1024octetsto1522octets);
3479 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3480 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3481 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3482 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3483 UPDATE_STAT64(tx_stat_gterr,
3484 tx_stat_dot3statsinternalmactransmiterrors);
3485 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3486}
3487
3488static void bnx2x_emac_stats_update(struct bnx2x *bp)
3489{
3490 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3491 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3492
3493 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3494 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3495 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3496 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3497 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3498 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3499 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3500 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3501 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3502 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3503 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3504 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3505 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3506 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3507 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3508 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3509 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3511 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3516 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3517 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3518 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3519 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3520 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3523 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3524}
3525
3526static int bnx2x_hw_stats_update(struct bnx2x *bp)
3527{
3528 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3529 struct nig_stats *old = &(bp->port.old_nig_stats);
3530 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3531 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3532 struct regpair diff;
3533
3534 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3535 bnx2x_bmac_stats_update(bp);
3536
3537 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3538 bnx2x_emac_stats_update(bp);
3539
3540 else { /* unreached */
3541 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3542 return -1;
3543 }
a2fbb9ea 3544
bb2a0f7a
YG
3545 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3546 new->brb_discard - old->brb_discard);
a2fbb9ea 3547
bb2a0f7a
YG
3548 UPDATE_STAT64_NIG(egress_mac_pkt0,
3549 etherstatspkts1024octetsto1522octets);
3550 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3551
bb2a0f7a 3552 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3553
bb2a0f7a
YG
3554 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3555 sizeof(struct mac_stx));
3556 estats->brb_drop_hi = pstats->brb_drop_hi;
3557 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3558
bb2a0f7a 3559 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3560
bb2a0f7a 3561 return 0;
a2fbb9ea
ET
3562}
3563
bb2a0f7a 3564static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3565{
3566 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3567 int cl_id = BP_CL_ID(bp);
3568 struct tstorm_per_port_stats *tport =
3569 &stats->tstorm_common.port_statistics;
a2fbb9ea 3570 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3571 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3572 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3573 struct xstorm_per_client_stats *xclient =
3574 &stats->xstorm_common.client_statistics[cl_id];
3575 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3576 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3577 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3578 u32 diff;
3579
bb2a0f7a
YG
3580 /* are storm stats valid? */
3581 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3582 bp->stats_counter) {
3583 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3584 " tstorm counter (%d) != stats_counter (%d)\n",
3585 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3586 return -1;
3587 }
bb2a0f7a
YG
3588 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3589 bp->stats_counter) {
3590 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3591 " xstorm counter (%d) != stats_counter (%d)\n",
3592 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3593 return -2;
3594 }
a2fbb9ea 3595
bb2a0f7a
YG
3596 fstats->total_bytes_received_hi =
3597 fstats->valid_bytes_received_hi =
a2fbb9ea 3598 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3599 fstats->total_bytes_received_lo =
3600 fstats->valid_bytes_received_lo =
a2fbb9ea 3601 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3602
3603 estats->error_bytes_received_hi =
3604 le32_to_cpu(tclient->rcv_error_bytes.hi);
3605 estats->error_bytes_received_lo =
3606 le32_to_cpu(tclient->rcv_error_bytes.lo);
3607 ADD_64(estats->error_bytes_received_hi,
3608 estats->rx_stat_ifhcinbadoctets_hi,
3609 estats->error_bytes_received_lo,
3610 estats->rx_stat_ifhcinbadoctets_lo);
3611
3612 ADD_64(fstats->total_bytes_received_hi,
3613 estats->error_bytes_received_hi,
3614 fstats->total_bytes_received_lo,
3615 estats->error_bytes_received_lo);
3616
3617 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3618 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3619 total_multicast_packets_received);
a2fbb9ea 3620 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3621 total_broadcast_packets_received);
3622
3623 fstats->total_bytes_transmitted_hi =
3624 le32_to_cpu(xclient->total_sent_bytes.hi);
3625 fstats->total_bytes_transmitted_lo =
3626 le32_to_cpu(xclient->total_sent_bytes.lo);
3627
3628 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3629 total_unicast_packets_transmitted);
3630 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3631 total_multicast_packets_transmitted);
3632 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3633 total_broadcast_packets_transmitted);
3634
3635 memcpy(estats, &(fstats->total_bytes_received_hi),
3636 sizeof(struct host_func_stats) - 2*sizeof(u32));
3637
3638 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3639 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3640 estats->brb_truncate_discard =
3641 le32_to_cpu(tport->brb_truncate_discard);
3642 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3643
3644 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3645 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3646 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3647 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3648 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3649 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3650 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3651 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3652 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3653 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3654 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3655 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3656 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3657
bb2a0f7a
YG
3658 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3659 old_tclient->packets_too_big_discard =
a2fbb9ea 3660 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3661 estats->no_buff_discard =
3662 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3663 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3664
3665 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3666 old_xclient->unicast_bytes_sent.hi =
3667 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3668 old_xclient->unicast_bytes_sent.lo =
3669 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3670 old_xclient->multicast_bytes_sent.hi =
3671 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3672 old_xclient->multicast_bytes_sent.lo =
3673 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3674 old_xclient->broadcast_bytes_sent.hi =
3675 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3676 old_xclient->broadcast_bytes_sent.lo =
3677 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3678
3679 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3680
3681 return 0;
3682}
3683
bb2a0f7a 3684static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3685{
bb2a0f7a
YG
3686 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3687 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3688 struct net_device_stats *nstats = &bp->dev->stats;
3689
3690 nstats->rx_packets =
3691 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3692 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3693 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3694
3695 nstats->tx_packets =
3696 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3697 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3698 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3699
bb2a0f7a 3700 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3701
0e39e645 3702 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3703
bb2a0f7a
YG
3704 nstats->rx_dropped = old_tclient->checksum_discard +
3705 estats->mac_discard;
a2fbb9ea
ET
3706 nstats->tx_dropped = 0;
3707
3708 nstats->multicast =
3709 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3710
bb2a0f7a
YG
3711 nstats->collisions =
3712 estats->tx_stat_dot3statssinglecollisionframes_lo +
3713 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3714 estats->tx_stat_dot3statslatecollisions_lo +
3715 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3716
bb2a0f7a
YG
3717 estats->jabber_packets_received =
3718 old_tclient->packets_too_big_discard +
3719 estats->rx_stat_dot3statsframestoolong_lo;
3720
3721 nstats->rx_length_errors =
3722 estats->rx_stat_etherstatsundersizepkts_lo +
3723 estats->jabber_packets_received;
3724 nstats->rx_over_errors = estats->brb_drop_lo +
0e39e645 3725 estats->brb_truncate_discard;
bb2a0f7a
YG
3726 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3727 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3728 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3729 nstats->rx_missed_errors = estats->xxoverflow_discard;
3730
3731 nstats->rx_errors = nstats->rx_length_errors +
3732 nstats->rx_over_errors +
3733 nstats->rx_crc_errors +
3734 nstats->rx_frame_errors +
0e39e645
ET
3735 nstats->rx_fifo_errors +
3736 nstats->rx_missed_errors;
a2fbb9ea 3737
bb2a0f7a
YG
3738 nstats->tx_aborted_errors =
3739 estats->tx_stat_dot3statslatecollisions_lo +
3740 estats->tx_stat_dot3statsexcessivecollisions_lo;
3741 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3742 nstats->tx_fifo_errors = 0;
3743 nstats->tx_heartbeat_errors = 0;
3744 nstats->tx_window_errors = 0;
3745
3746 nstats->tx_errors = nstats->tx_aborted_errors +
3747 nstats->tx_carrier_errors;
a2fbb9ea
ET
3748}
3749
bb2a0f7a 3750static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3751{
bb2a0f7a
YG
3752 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3753 int update = 0;
a2fbb9ea 3754
bb2a0f7a
YG
3755 if (*stats_comp != DMAE_COMP_VAL)
3756 return;
3757
3758 if (bp->port.pmf)
3759 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3760
bb2a0f7a 3761 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3762
bb2a0f7a
YG
3763 if (update)
3764 bnx2x_net_stats_update(bp);
a2fbb9ea 3765
bb2a0f7a
YG
3766 else {
3767 if (bp->stats_pending) {
3768 bp->stats_pending++;
3769 if (bp->stats_pending == 3) {
3770 BNX2X_ERR("stats not updated for 3 times\n");
3771 bnx2x_panic();
3772 return;
3773 }
3774 }
a2fbb9ea
ET
3775 }
3776
3777 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3778 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3779 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3780 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3781 int i;
a2fbb9ea
ET
3782
3783 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3784 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3785 " tx pkt (%lx)\n",
3786 bnx2x_tx_avail(bp->fp),
7a9b2557 3787 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3788 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3789 " rx pkt (%lx)\n",
7a9b2557
VZ
3790 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3791 bp->fp->rx_comp_cons),
3792 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea
ET
3793 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3794 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3795 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3796 printk(KERN_DEBUG "tstats: checksum_discard %u "
3797 "packets_too_big_discard %u no_buff_discard %u "
3798 "mac_discard %u mac_filter_discard %u "
3799 "xxovrflow_discard %u brb_truncate_discard %u "
3800 "ttl0_discard %u\n",
bb2a0f7a
YG
3801 old_tclient->checksum_discard,
3802 old_tclient->packets_too_big_discard,
3803 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3804 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3805 estats->brb_truncate_discard,
3806 old_tclient->ttl0_discard);
a2fbb9ea
ET
3807
3808 for_each_queue(bp, i) {
3809 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3810 bnx2x_fp(bp, i, tx_pkt),
3811 bnx2x_fp(bp, i, rx_pkt),
3812 bnx2x_fp(bp, i, rx_calls));
3813 }
3814 }
3815
bb2a0f7a
YG
3816 bnx2x_hw_stats_post(bp);
3817 bnx2x_storm_stats_post(bp);
3818}
a2fbb9ea 3819
bb2a0f7a
YG
3820static void bnx2x_port_stats_stop(struct bnx2x *bp)
3821{
3822 struct dmae_command *dmae;
3823 u32 opcode;
3824 int loader_idx = PMF_DMAE_C(bp);
3825 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3826
bb2a0f7a 3827 bp->executer_idx = 0;
a2fbb9ea 3828
bb2a0f7a
YG
3829 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3830 DMAE_CMD_C_ENABLE |
3831 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3832#ifdef __BIG_ENDIAN
bb2a0f7a 3833 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3834#else
bb2a0f7a 3835 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3836#endif
bb2a0f7a
YG
3837 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3838 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3839
3840 if (bp->port.port_stx) {
3841
3842 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3843 if (bp->func_stx)
3844 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3845 else
3846 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3847 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3848 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3849 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3850 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3851 dmae->len = sizeof(struct host_port_stats) >> 2;
3852 if (bp->func_stx) {
3853 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3854 dmae->comp_addr_hi = 0;
3855 dmae->comp_val = 1;
3856 } else {
3857 dmae->comp_addr_lo =
3858 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3859 dmae->comp_addr_hi =
3860 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3861 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3862
bb2a0f7a
YG
3863 *stats_comp = 0;
3864 }
a2fbb9ea
ET
3865 }
3866
bb2a0f7a
YG
3867 if (bp->func_stx) {
3868
3869 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3870 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3871 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3872 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3873 dmae->dst_addr_lo = bp->func_stx >> 2;
3874 dmae->dst_addr_hi = 0;
3875 dmae->len = sizeof(struct host_func_stats) >> 2;
3876 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3877 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3878 dmae->comp_val = DMAE_COMP_VAL;
3879
3880 *stats_comp = 0;
a2fbb9ea 3881 }
bb2a0f7a
YG
3882}
3883
3884static void bnx2x_stats_stop(struct bnx2x *bp)
3885{
3886 int update = 0;
3887
3888 bnx2x_stats_comp(bp);
3889
3890 if (bp->port.pmf)
3891 update = (bnx2x_hw_stats_update(bp) == 0);
3892
3893 update |= (bnx2x_storm_stats_update(bp) == 0);
3894
3895 if (update) {
3896 bnx2x_net_stats_update(bp);
a2fbb9ea 3897
bb2a0f7a
YG
3898 if (bp->port.pmf)
3899 bnx2x_port_stats_stop(bp);
3900
3901 bnx2x_hw_stats_post(bp);
3902 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3903 }
3904}
3905
bb2a0f7a
YG
3906static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3907{
3908}
3909
3910static const struct {
3911 void (*action)(struct bnx2x *bp);
3912 enum bnx2x_stats_state next_state;
3913} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3914/* state event */
3915{
3916/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3917/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3918/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3919/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3920},
3921{
3922/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3923/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3924/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3925/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3926}
3927};
3928
3929static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3930{
3931 enum bnx2x_stats_state state = bp->stats_state;
3932
3933 bnx2x_stats_stm[state][event].action(bp);
3934 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3935
3936 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3937 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3938 state, event, bp->stats_state);
3939}
3940
a2fbb9ea
ET
3941static void bnx2x_timer(unsigned long data)
3942{
3943 struct bnx2x *bp = (struct bnx2x *) data;
3944
3945 if (!netif_running(bp->dev))
3946 return;
3947
3948 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3949 goto timer_restart;
a2fbb9ea
ET
3950
3951 if (poll) {
3952 struct bnx2x_fastpath *fp = &bp->fp[0];
3953 int rc;
3954
3955 bnx2x_tx_int(fp, 1000);
3956 rc = bnx2x_rx_int(fp, 1000);
3957 }
3958
34f80b04
EG
3959 if (!BP_NOMCP(bp)) {
3960 int func = BP_FUNC(bp);
a2fbb9ea
ET
3961 u32 drv_pulse;
3962 u32 mcp_pulse;
3963
3964 ++bp->fw_drv_pulse_wr_seq;
3965 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3966 /* TBD - add SYSTEM_TIME */
3967 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3968 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3969
34f80b04 3970 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3971 MCP_PULSE_SEQ_MASK);
3972 /* The delta between driver pulse and mcp response
3973 * should be 1 (before mcp response) or 0 (after mcp response)
3974 */
3975 if ((drv_pulse != mcp_pulse) &&
3976 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3977 /* someone lost a heartbeat... */
3978 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3979 drv_pulse, mcp_pulse);
3980 }
3981 }
3982
bb2a0f7a
YG
3983 if ((bp->state == BNX2X_STATE_OPEN) ||
3984 (bp->state == BNX2X_STATE_DISABLED))
3985 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3986
f1410647 3987timer_restart:
a2fbb9ea
ET
3988 mod_timer(&bp->timer, jiffies + bp->current_interval);
3989}
3990
3991/* end of Statistics */
3992
3993/* nic init */
3994
3995/*
3996 * nic init service functions
3997 */
3998
34f80b04 3999static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4000{
34f80b04
EG
4001 int port = BP_PORT(bp);
4002
4003 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4004 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4005 sizeof(struct ustorm_def_status_block)/4);
4006 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4007 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4008 sizeof(struct cstorm_def_status_block)/4);
4009}
4010
4011static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
4012 struct host_status_block *sb, dma_addr_t mapping)
4013{
4014 int port = BP_PORT(bp);
bb2a0f7a 4015 int func = BP_FUNC(bp);
a2fbb9ea 4016 int index;
34f80b04 4017 u64 section;
a2fbb9ea
ET
4018
4019 /* USTORM */
4020 section = ((u64)mapping) + offsetof(struct host_status_block,
4021 u_status_block);
34f80b04 4022 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4023
4024 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4025 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4026 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4027 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4028 U64_HI(section));
bb2a0f7a
YG
4029 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4030 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4031
4032 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4033 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4034 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4035
4036 /* CSTORM */
4037 section = ((u64)mapping) + offsetof(struct host_status_block,
4038 c_status_block);
34f80b04 4039 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4040
4041 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4042 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4043 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4044 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4045 U64_HI(section));
7a9b2557
VZ
4046 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4047 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4048
4049 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4050 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4051 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4052
4053 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4054}
4055
4056static void bnx2x_zero_def_sb(struct bnx2x *bp)
4057{
4058 int func = BP_FUNC(bp);
a2fbb9ea 4059
34f80b04
EG
4060 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4061 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4062 sizeof(struct ustorm_def_status_block)/4);
4063 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4064 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4065 sizeof(struct cstorm_def_status_block)/4);
4066 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4067 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4068 sizeof(struct xstorm_def_status_block)/4);
4069 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4070 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4071 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4072}
4073
4074static void bnx2x_init_def_sb(struct bnx2x *bp,
4075 struct host_def_status_block *def_sb,
34f80b04 4076 dma_addr_t mapping, int sb_id)
a2fbb9ea 4077{
34f80b04
EG
4078 int port = BP_PORT(bp);
4079 int func = BP_FUNC(bp);
a2fbb9ea
ET
4080 int index, val, reg_offset;
4081 u64 section;
4082
4083 /* ATTN */
4084 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4085 atten_status_block);
34f80b04 4086 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4087
49d66772
ET
4088 bp->def_att_idx = 0;
4089 bp->attn_state = 0;
4090
a2fbb9ea
ET
4091 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4092 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4093
34f80b04 4094 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4095 bp->attn_group[index].sig[0] = REG_RD(bp,
4096 reg_offset + 0x10*index);
4097 bp->attn_group[index].sig[1] = REG_RD(bp,
4098 reg_offset + 0x4 + 0x10*index);
4099 bp->attn_group[index].sig[2] = REG_RD(bp,
4100 reg_offset + 0x8 + 0x10*index);
4101 bp->attn_group[index].sig[3] = REG_RD(bp,
4102 reg_offset + 0xc + 0x10*index);
4103 }
4104
4105 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4106 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4107
4108 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4109 HC_REG_ATTN_MSG0_ADDR_L);
4110
4111 REG_WR(bp, reg_offset, U64_LO(section));
4112 REG_WR(bp, reg_offset + 4, U64_HI(section));
4113
4114 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4115
4116 val = REG_RD(bp, reg_offset);
34f80b04 4117 val |= sb_id;
a2fbb9ea
ET
4118 REG_WR(bp, reg_offset, val);
4119
4120 /* USTORM */
4121 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4122 u_def_status_block);
34f80b04 4123 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4124
49d66772
ET
4125 bp->def_u_idx = 0;
4126
a2fbb9ea 4127 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4128 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4129 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4130 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4131 U64_HI(section));
34f80b04
EG
4132 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4133 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4134 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4135 BNX2X_BTR);
4136
4137 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4138 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4139 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4140
4141 /* CSTORM */
4142 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4143 c_def_status_block);
34f80b04 4144 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea 4145
49d66772
ET
4146 bp->def_c_idx = 0;
4147
a2fbb9ea 4148 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4149 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4150 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4151 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4152 U64_HI(section));
34f80b04
EG
4153 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4154 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4155 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4156 BNX2X_BTR);
4157
4158 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4159 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4160 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4161
4162 /* TSTORM */
4163 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4164 t_def_status_block);
34f80b04 4165 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea 4166
49d66772
ET
4167 bp->def_t_idx = 0;
4168
a2fbb9ea 4169 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4170 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4171 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4172 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4173 U64_HI(section));
34f80b04
EG
4174 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4175 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4176 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4177 BNX2X_BTR);
4178
4179 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4180 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4181 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4182
4183 /* XSTORM */
4184 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4185 x_def_status_block);
34f80b04 4186 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea 4187
49d66772
ET
4188 bp->def_x_idx = 0;
4189
a2fbb9ea 4190 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4191 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4192 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4193 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4194 U64_HI(section));
34f80b04
EG
4195 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4196 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4197 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4198 BNX2X_BTR);
4199
4200 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4201 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4202 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4203
bb2a0f7a
YG
4204 bp->stats_pending = 0;
4205
34f80b04 4206 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4207}
4208
4209static void bnx2x_update_coalesce(struct bnx2x *bp)
4210{
34f80b04 4211 int port = BP_PORT(bp);
a2fbb9ea
ET
4212 int i;
4213
4214 for_each_queue(bp, i) {
34f80b04 4215 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4216
4217 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4218 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4219 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4220 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4221 bp->rx_ticks/12);
a2fbb9ea 4222 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4223 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4224 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4225 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4226
4227 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4228 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4229 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4230 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4231 bp->tx_ticks/12);
a2fbb9ea 4232 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4233 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4234 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4235 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4236 }
4237}
4238
7a9b2557
VZ
4239static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4240 struct bnx2x_fastpath *fp, int last)
4241{
4242 int i;
4243
4244 for (i = 0; i < last; i++) {
4245 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4246 struct sk_buff *skb = rx_buf->skb;
4247
4248 if (skb == NULL) {
4249 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4250 continue;
4251 }
4252
4253 if (fp->tpa_state[i] == BNX2X_TPA_START)
4254 pci_unmap_single(bp->pdev,
4255 pci_unmap_addr(rx_buf, mapping),
4256 bp->rx_buf_use_size,
4257 PCI_DMA_FROMDEVICE);
4258
4259 dev_kfree_skb(skb);
4260 rx_buf->skb = NULL;
4261 }
4262}
4263
a2fbb9ea
ET
4264static void bnx2x_init_rx_rings(struct bnx2x *bp)
4265{
7a9b2557
VZ
4266 int func = BP_FUNC(bp);
4267 u16 ring_prod, cqe_ring_prod = 0;
a2fbb9ea 4268 int i, j;
a2fbb9ea
ET
4269
4270 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4271 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4272 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4273
7a9b2557
VZ
4274 if (bp->flags & TPA_ENABLE_FLAG) {
4275 DP(NETIF_MSG_IFUP,
4276 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4277 bp->rx_buf_use_size, bp->rx_buf_size,
4278 bp->dev->mtu + ETH_OVREHEAD);
4279
4280 for_each_queue(bp, j) {
4281 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4282 struct bnx2x_fastpath *fp = &bp->fp[j];
4283
4284 fp->tpa_pool[i].skb =
4285 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4286 if (!fp->tpa_pool[i].skb) {
4287 BNX2X_ERR("Failed to allocate TPA "
4288 "skb pool for queue[%d] - "
4289 "disabling TPA on this "
4290 "queue!\n", j);
4291 bnx2x_free_tpa_pool(bp, fp, i);
4292 fp->disable_tpa = 1;
4293 break;
4294 }
4295 pci_unmap_addr_set((struct sw_rx_bd *)
4296 &bp->fp->tpa_pool[i],
4297 mapping, 0);
4298 fp->tpa_state[i] = BNX2X_TPA_STOP;
4299 }
4300 }
4301 }
4302
a2fbb9ea
ET
4303 for_each_queue(bp, j) {
4304 struct bnx2x_fastpath *fp = &bp->fp[j];
4305
4306 fp->rx_bd_cons = 0;
4307 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4308 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4309
4310 /* "next page" elements initialization */
4311 /* SGE ring */
4312 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4313 struct eth_rx_sge *sge;
4314
4315 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4316 sge->addr_hi =
4317 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4318 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4319 sge->addr_lo =
4320 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4321 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4322 }
4323
4324 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4325
7a9b2557 4326 /* RX BD ring */
a2fbb9ea
ET
4327 for (i = 1; i <= NUM_RX_RINGS; i++) {
4328 struct eth_rx_bd *rx_bd;
4329
4330 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4331 rx_bd->addr_hi =
4332 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4333 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4334 rx_bd->addr_lo =
4335 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4336 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4337 }
4338
34f80b04 4339 /* CQ ring */
a2fbb9ea
ET
4340 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4341 struct eth_rx_cqe_next_page *nextpg;
4342
4343 nextpg = (struct eth_rx_cqe_next_page *)
4344 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4345 nextpg->addr_hi =
4346 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4347 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4348 nextpg->addr_lo =
4349 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4350 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4351 }
4352
7a9b2557
VZ
4353 /* Allocate SGEs and initialize the ring elements */
4354 for (i = 0, ring_prod = 0;
4355 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4356
7a9b2557
VZ
4357 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4358 BNX2X_ERR("was only able to allocate "
4359 "%d rx sges\n", i);
4360 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4361 /* Cleanup already allocated elements */
4362 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4363 bnx2x_free_tpa_pool(bp, fp,
4364 ETH_MAX_AGGREGATION_QUEUES_E1H);
4365 fp->disable_tpa = 1;
4366 ring_prod = 0;
4367 break;
4368 }
4369 ring_prod = NEXT_SGE_IDX(ring_prod);
4370 }
4371 fp->rx_sge_prod = ring_prod;
4372
4373 /* Allocate BDs and initialize BD ring */
4374 fp->rx_comp_cons = fp->rx_alloc_failed = 0;
4375 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4376 for (i = 0; i < bp->rx_ring_size; i++) {
4377 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4378 BNX2X_ERR("was only able to allocate "
4379 "%d rx skbs\n", i);
7a9b2557 4380 fp->rx_alloc_failed++;
a2fbb9ea
ET
4381 break;
4382 }
4383 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4384 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4385 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4386 }
4387
7a9b2557
VZ
4388 fp->rx_bd_prod = ring_prod;
4389 /* must not have more available CQEs than BDs */
4390 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4391 cqe_ring_prod);
a2fbb9ea
ET
4392 fp->rx_pkt = fp->rx_calls = 0;
4393
7a9b2557
VZ
4394 /* Warning!
4395 * this will generate an interrupt (to the TSTORM)
4396 * must only be done after chip is initialized
4397 */
4398 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4399 fp->rx_sge_prod);
a2fbb9ea
ET
4400 if (j != 0)
4401 continue;
4402
4403 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4404 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4405 U64_LO(fp->rx_comp_mapping));
4406 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4407 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4408 U64_HI(fp->rx_comp_mapping));
4409 }
4410}
4411
4412static void bnx2x_init_tx_ring(struct bnx2x *bp)
4413{
4414 int i, j;
4415
4416 for_each_queue(bp, j) {
4417 struct bnx2x_fastpath *fp = &bp->fp[j];
4418
4419 for (i = 1; i <= NUM_TX_RINGS; i++) {
4420 struct eth_tx_bd *tx_bd =
4421 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4422
4423 tx_bd->addr_hi =
4424 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4425 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4426 tx_bd->addr_lo =
4427 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4428 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4429 }
4430
4431 fp->tx_pkt_prod = 0;
4432 fp->tx_pkt_cons = 0;
4433 fp->tx_bd_prod = 0;
4434 fp->tx_bd_cons = 0;
4435 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4436 fp->tx_pkt = 0;
4437 }
4438}
4439
4440static void bnx2x_init_sp_ring(struct bnx2x *bp)
4441{
34f80b04 4442 int func = BP_FUNC(bp);
a2fbb9ea
ET
4443
4444 spin_lock_init(&bp->spq_lock);
4445
4446 bp->spq_left = MAX_SPQ_PENDING;
4447 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4448 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4449 bp->spq_prod_bd = bp->spq;
4450 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4451
34f80b04 4452 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4453 U64_LO(bp->spq_mapping));
34f80b04
EG
4454 REG_WR(bp,
4455 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4456 U64_HI(bp->spq_mapping));
4457
34f80b04 4458 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4459 bp->spq_prod_idx);
4460}
4461
4462static void bnx2x_init_context(struct bnx2x *bp)
4463{
4464 int i;
4465
4466 for_each_queue(bp, i) {
4467 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4468 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4469 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4470
4471 context->xstorm_st_context.tx_bd_page_base_hi =
4472 U64_HI(fp->tx_desc_mapping);
4473 context->xstorm_st_context.tx_bd_page_base_lo =
4474 U64_LO(fp->tx_desc_mapping);
4475 context->xstorm_st_context.db_data_addr_hi =
4476 U64_HI(fp->tx_prods_mapping);
4477 context->xstorm_st_context.db_data_addr_lo =
4478 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4479 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4480 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4481
4482 context->ustorm_st_context.common.sb_index_numbers =
4483 BNX2X_RX_SB_INDEX_NUM;
4484 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4485 context->ustorm_st_context.common.status_block_id = sb_id;
4486 context->ustorm_st_context.common.flags =
4487 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4488 context->ustorm_st_context.common.mc_alignment_size = 64;
4489 context->ustorm_st_context.common.bd_buff_size =
4490 bp->rx_buf_use_size;
4491 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4492 U64_HI(fp->rx_desc_mapping);
34f80b04 4493 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4494 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4495 if (!fp->disable_tpa) {
4496 context->ustorm_st_context.common.flags |=
4497 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4498 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4499 context->ustorm_st_context.common.sge_buff_size =
4500 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4501 context->ustorm_st_context.common.sge_page_base_hi =
4502 U64_HI(fp->rx_sge_mapping);
4503 context->ustorm_st_context.common.sge_page_base_lo =
4504 U64_LO(fp->rx_sge_mapping);
4505 }
4506
a2fbb9ea
ET
4507 context->cstorm_st_context.sb_index_number =
4508 HC_INDEX_C_ETH_TX_CQ_CONS;
34f80b04 4509 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4510
4511 context->xstorm_ag_context.cdu_reserved =
4512 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4513 CDU_REGION_NUMBER_XCM_AG,
4514 ETH_CONNECTION_TYPE);
4515 context->ustorm_ag_context.cdu_usage =
4516 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4517 CDU_REGION_NUMBER_UCM_AG,
4518 ETH_CONNECTION_TYPE);
4519 }
4520}
4521
4522static void bnx2x_init_ind_table(struct bnx2x *bp)
4523{
34f80b04 4524 int port = BP_PORT(bp);
a2fbb9ea
ET
4525 int i;
4526
4527 if (!is_multi(bp))
4528 return;
4529
34f80b04 4530 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4531 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4532 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4533 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4534 i % bp->num_queues);
4535
4536 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4537}
4538
49d66772
ET
4539static void bnx2x_set_client_config(struct bnx2x *bp)
4540{
49d66772 4541 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4542 int port = BP_PORT(bp);
4543 int i;
49d66772 4544
34f80b04 4545 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
49d66772
ET
4546 tstorm_client.statistics_counter_id = 0;
4547 tstorm_client.config_flags =
4548 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4549#ifdef BCM_VLAN
34f80b04 4550 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4551 tstorm_client.config_flags |=
4552 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4553 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4554 }
4555#endif
49d66772 4556
7a9b2557
VZ
4557 if (bp->flags & TPA_ENABLE_FLAG) {
4558 tstorm_client.max_sges_for_packet =
4559 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4560 tstorm_client.max_sges_for_packet =
4561 ((tstorm_client.max_sges_for_packet +
4562 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4563 PAGES_PER_SGE_SHIFT;
4564
4565 tstorm_client.config_flags |=
4566 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4567 }
4568
49d66772
ET
4569 for_each_queue(bp, i) {
4570 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4571 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4572 ((u32 *)&tstorm_client)[0]);
4573 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4574 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4575 ((u32 *)&tstorm_client)[1]);
4576 }
4577
34f80b04
EG
4578 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4579 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4580}
4581
a2fbb9ea
ET
4582static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4583{
a2fbb9ea 4584 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4585 int mode = bp->rx_mode;
4586 int mask = (1 << BP_L_ID(bp));
4587 int func = BP_FUNC(bp);
a2fbb9ea
ET
4588 int i;
4589
4590 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4591
4592 switch (mode) {
4593 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4594 tstorm_mac_filter.ucast_drop_all = mask;
4595 tstorm_mac_filter.mcast_drop_all = mask;
4596 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4597 break;
4598 case BNX2X_RX_MODE_NORMAL:
34f80b04 4599 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4600 break;
4601 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4602 tstorm_mac_filter.mcast_accept_all = mask;
4603 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4604 break;
4605 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4606 tstorm_mac_filter.ucast_accept_all = mask;
4607 tstorm_mac_filter.mcast_accept_all = mask;
4608 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4609 break;
4610 default:
34f80b04
EG
4611 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4612 break;
a2fbb9ea
ET
4613 }
4614
4615 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4616 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4617 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4618 ((u32 *)&tstorm_mac_filter)[i]);
4619
34f80b04 4620/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4621 ((u32 *)&tstorm_mac_filter)[i]); */
4622 }
a2fbb9ea 4623
49d66772
ET
4624 if (mode != BNX2X_RX_MODE_NONE)
4625 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4626}
4627
4628static void bnx2x_init_internal(struct bnx2x *bp)
4629{
a2fbb9ea
ET
4630 struct tstorm_eth_function_common_config tstorm_config = {0};
4631 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4632 int port = BP_PORT(bp);
4633 int func = BP_FUNC(bp);
4634 int i;
a2fbb9ea
ET
4635
4636 if (is_multi(bp)) {
4637 tstorm_config.config_flags = MULTI_FLAGS;
4638 tstorm_config.rss_result_mask = MULTI_MASK;
4639 }
4640
34f80b04
EG
4641 tstorm_config.leading_client_id = BP_L_ID(bp);
4642
a2fbb9ea 4643 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4644 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4645 (*(u32 *)&tstorm_config));
4646
34f80b04 4647/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
a2fbb9ea
ET
4648 (*(u32 *)&tstorm_config)); */
4649
c14423fe 4650 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4651 bnx2x_set_storm_rx_mode(bp);
4652
34f80b04 4653 stats_flags.collect_eth = 1;
a2fbb9ea
ET
4654
4655 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
4656 ((u32 *)&stats_flags)[0]);
4657 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
4658 ((u32 *)&stats_flags)[1]);
4659
4660 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
4661 ((u32 *)&stats_flags)[0]);
4662 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
4663 ((u32 *)&stats_flags)[1]);
4664
4665 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
4666 ((u32 *)&stats_flags)[0]);
4667 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
4668 ((u32 *)&stats_flags)[1]);
4669
34f80b04 4670/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
a2fbb9ea 4671 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
34f80b04
EG
4672
4673 if (CHIP_IS_E1H(bp)) {
4674 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4675 IS_E1HMF(bp));
4676 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4677 IS_E1HMF(bp));
4678 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4679 IS_E1HMF(bp));
4680 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4681 IS_E1HMF(bp));
4682
7a9b2557
VZ
4683 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4684 bp->e1hov);
34f80b04
EG
4685 }
4686
4687 /* Zero this manualy as its initialization is
4688 currently missing in the initTool */
4689 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
4690 REG_WR(bp, BAR_USTRORM_INTMEM +
4691 USTORM_AGG_DATA_OFFSET + 4*i, 0);
7a9b2557
VZ
4692
4693 for_each_queue(bp, i) {
4694 struct bnx2x_fastpath *fp = &bp->fp[i];
4695 u16 max_agg_size;
4696
4697 REG_WR(bp, BAR_USTRORM_INTMEM +
4698 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4699 U64_LO(fp->rx_comp_mapping));
4700 REG_WR(bp, BAR_USTRORM_INTMEM +
4701 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4702 U64_HI(fp->rx_comp_mapping));
4703
4704 max_agg_size = min((u32)(bp->rx_buf_use_size +
4705 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4706 (u32)0xffff);
4707 REG_WR16(bp, BAR_USTRORM_INTMEM +
4708 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4709 max_agg_size);
4710 }
a2fbb9ea
ET
4711}
4712
4713static void bnx2x_nic_init(struct bnx2x *bp)
4714{
4715 int i;
4716
4717 for_each_queue(bp, i) {
4718 struct bnx2x_fastpath *fp = &bp->fp[i];
4719
34f80b04 4720 fp->bp = bp;
a2fbb9ea 4721 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4722 fp->index = i;
34f80b04
EG
4723 fp->cl_id = BP_L_ID(bp) + i;
4724 fp->sb_id = fp->cl_id;
4725 DP(NETIF_MSG_IFUP,
4726 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4727 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4728 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4729 fp->status_blk_mapping);
a2fbb9ea
ET
4730 }
4731
4732 bnx2x_init_def_sb(bp, bp->def_status_blk,
34f80b04 4733 bp->def_status_blk_mapping, DEF_SB_ID);
a2fbb9ea
ET
4734 bnx2x_update_coalesce(bp);
4735 bnx2x_init_rx_rings(bp);
4736 bnx2x_init_tx_ring(bp);
4737 bnx2x_init_sp_ring(bp);
4738 bnx2x_init_context(bp);
4739 bnx2x_init_internal(bp);
bb2a0f7a 4740 bnx2x_storm_stats_init(bp);
a2fbb9ea 4741 bnx2x_init_ind_table(bp);
615f8fd9 4742 bnx2x_int_enable(bp);
a2fbb9ea
ET
4743}
4744
4745/* end of nic init */
4746
4747/*
4748 * gzip service functions
4749 */
4750
4751static int bnx2x_gunzip_init(struct bnx2x *bp)
4752{
4753 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4754 &bp->gunzip_mapping);
4755 if (bp->gunzip_buf == NULL)
4756 goto gunzip_nomem1;
4757
4758 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4759 if (bp->strm == NULL)
4760 goto gunzip_nomem2;
4761
4762 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4763 GFP_KERNEL);
4764 if (bp->strm->workspace == NULL)
4765 goto gunzip_nomem3;
4766
4767 return 0;
4768
4769gunzip_nomem3:
4770 kfree(bp->strm);
4771 bp->strm = NULL;
4772
4773gunzip_nomem2:
4774 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4775 bp->gunzip_mapping);
4776 bp->gunzip_buf = NULL;
4777
4778gunzip_nomem1:
4779 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4780 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4781 return -ENOMEM;
4782}
4783
4784static void bnx2x_gunzip_end(struct bnx2x *bp)
4785{
4786 kfree(bp->strm->workspace);
4787
4788 kfree(bp->strm);
4789 bp->strm = NULL;
4790
4791 if (bp->gunzip_buf) {
4792 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4793 bp->gunzip_mapping);
4794 bp->gunzip_buf = NULL;
4795 }
4796}
4797
4798static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4799{
4800 int n, rc;
4801
4802 /* check gzip header */
4803 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4804 return -EINVAL;
4805
4806 n = 10;
4807
34f80b04 4808#define FNAME 0x8
a2fbb9ea
ET
4809
4810 if (zbuf[3] & FNAME)
4811 while ((zbuf[n++] != 0) && (n < len));
4812
4813 bp->strm->next_in = zbuf + n;
4814 bp->strm->avail_in = len - n;
4815 bp->strm->next_out = bp->gunzip_buf;
4816 bp->strm->avail_out = FW_BUF_SIZE;
4817
4818 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4819 if (rc != Z_OK)
4820 return rc;
4821
4822 rc = zlib_inflate(bp->strm, Z_FINISH);
4823 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4824 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4825 bp->dev->name, bp->strm->msg);
4826
4827 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4828 if (bp->gunzip_outlen & 0x3)
4829 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4830 " gunzip_outlen (%d) not aligned\n",
4831 bp->dev->name, bp->gunzip_outlen);
4832 bp->gunzip_outlen >>= 2;
4833
4834 zlib_inflateEnd(bp->strm);
4835
4836 if (rc == Z_STREAM_END)
4837 return 0;
4838
4839 return rc;
4840}
4841
4842/* nic load/unload */
4843
4844/*
34f80b04 4845 * General service functions
a2fbb9ea
ET
4846 */
4847
4848/* send a NIG loopback debug packet */
4849static void bnx2x_lb_pckt(struct bnx2x *bp)
4850{
a2fbb9ea 4851 u32 wb_write[3];
a2fbb9ea
ET
4852
4853 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4854 wb_write[0] = 0x55555555;
4855 wb_write[1] = 0x55555555;
34f80b04 4856 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4857 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4858
4859 /* NON-IP protocol */
a2fbb9ea
ET
4860 wb_write[0] = 0x09000000;
4861 wb_write[1] = 0x55555555;
34f80b04 4862 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4863 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4864}
4865
4866/* some of the internal memories
4867 * are not directly readable from the driver
4868 * to test them we send debug packets
4869 */
4870static int bnx2x_int_mem_test(struct bnx2x *bp)
4871{
4872 int factor;
4873 int count, i;
4874 u32 val = 0;
4875
ad8d3948 4876 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4877 factor = 120;
ad8d3948
EG
4878 else if (CHIP_REV_IS_EMUL(bp))
4879 factor = 200;
4880 else
a2fbb9ea 4881 factor = 1;
a2fbb9ea
ET
4882
4883 DP(NETIF_MSG_HW, "start part1\n");
4884
4885 /* Disable inputs of parser neighbor blocks */
4886 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4887 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4888 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4889 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4890
4891 /* Write 0 to parser credits for CFC search request */
4892 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4893
4894 /* send Ethernet packet */
4895 bnx2x_lb_pckt(bp);
4896
4897 /* TODO do i reset NIG statistic? */
4898 /* Wait until NIG register shows 1 packet of size 0x10 */
4899 count = 1000 * factor;
4900 while (count) {
34f80b04 4901
a2fbb9ea
ET
4902 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4903 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4904 if (val == 0x10)
4905 break;
4906
4907 msleep(10);
4908 count--;
4909 }
4910 if (val != 0x10) {
4911 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4912 return -1;
4913 }
4914
4915 /* Wait until PRS register shows 1 packet */
4916 count = 1000 * factor;
4917 while (count) {
4918 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4919 if (val == 1)
4920 break;
4921
4922 msleep(10);
4923 count--;
4924 }
4925 if (val != 0x1) {
4926 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4927 return -2;
4928 }
4929
4930 /* Reset and init BRB, PRS */
34f80b04 4931 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4932 msleep(50);
34f80b04 4933 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4934 msleep(50);
4935 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4936 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4937
4938 DP(NETIF_MSG_HW, "part2\n");
4939
4940 /* Disable inputs of parser neighbor blocks */
4941 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4942 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4943 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4944 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4945
4946 /* Write 0 to parser credits for CFC search request */
4947 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4948
4949 /* send 10 Ethernet packets */
4950 for (i = 0; i < 10; i++)
4951 bnx2x_lb_pckt(bp);
4952
4953 /* Wait until NIG register shows 10 + 1
4954 packets of size 11*0x10 = 0xb0 */
4955 count = 1000 * factor;
4956 while (count) {
34f80b04 4957
a2fbb9ea
ET
4958 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4959 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4960 if (val == 0xb0)
4961 break;
4962
4963 msleep(10);
4964 count--;
4965 }
4966 if (val != 0xb0) {
4967 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4968 return -3;
4969 }
4970
4971 /* Wait until PRS register shows 2 packets */
4972 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4973 if (val != 2)
4974 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4975
4976 /* Write 1 to parser credits for CFC search request */
4977 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4978
4979 /* Wait until PRS register shows 3 packets */
4980 msleep(10 * factor);
4981 /* Wait until NIG register shows 1 packet of size 0x10 */
4982 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4983 if (val != 3)
4984 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4985
4986 /* clear NIG EOP FIFO */
4987 for (i = 0; i < 11; i++)
4988 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4989 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4990 if (val != 1) {
4991 BNX2X_ERR("clear of NIG failed\n");
4992 return -4;
4993 }
4994
4995 /* Reset and init BRB, PRS, NIG */
4996 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4997 msleep(50);
4998 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4999 msleep(50);
5000 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5001 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5002#ifndef BCM_ISCSI
5003 /* set NIC mode */
5004 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5005#endif
5006
5007 /* Enable inputs of parser neighbor blocks */
5008 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5009 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5010 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5011 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5012
5013 DP(NETIF_MSG_HW, "done\n");
5014
5015 return 0; /* OK */
5016}
5017
5018static void enable_blocks_attention(struct bnx2x *bp)
5019{
5020 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5021 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5022 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5023 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5024 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5025 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5026 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5027 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5028 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5029/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5030/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5031 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5032 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5033 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5034/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5035/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5036 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5037 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5038 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5039 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5040/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5041/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5042 if (CHIP_REV_IS_FPGA(bp))
5043 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5044 else
5045 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5046 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5047 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5048 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5049/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5050/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5051 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5052 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5053/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5054 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5055}
5056
34f80b04
EG
5057
5058static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5059{
a2fbb9ea 5060 u32 val, i;
a2fbb9ea 5061
34f80b04 5062 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5063
34f80b04
EG
5064 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5065 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5066
34f80b04
EG
5067 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5068 if (CHIP_IS_E1H(bp))
5069 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5070
34f80b04
EG
5071 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5072 msleep(30);
5073 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5074
34f80b04
EG
5075 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5076 if (CHIP_IS_E1(bp)) {
5077 /* enable HW interrupt from PXP on USDM overflow
5078 bit 16 on INT_MASK_0 */
5079 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5080 }
a2fbb9ea 5081
34f80b04
EG
5082 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5083 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5084
5085#ifdef __BIG_ENDIAN
34f80b04
EG
5086 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5087 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5088 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5089 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5090 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5091 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5092
5093/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5094 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5095 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5096 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5097 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5098#endif
5099
5100#ifndef BCM_ISCSI
5101 /* set NIC mode */
5102 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5103#endif
5104
34f80b04 5105 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5106#ifdef BCM_ISCSI
34f80b04
EG
5107 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5108 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5109 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5110#endif
5111
34f80b04
EG
5112 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5113 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5114
34f80b04
EG
5115 /* let the HW do it's magic ... */
5116 msleep(100);
5117 /* finish PXP init */
5118 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5119 if (val != 1) {
5120 BNX2X_ERR("PXP2 CFG failed\n");
5121 return -EBUSY;
5122 }
5123 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5124 if (val != 1) {
5125 BNX2X_ERR("PXP2 RD_INIT failed\n");
5126 return -EBUSY;
5127 }
a2fbb9ea 5128
34f80b04
EG
5129 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5130 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5131
34f80b04 5132 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5133
34f80b04
EG
5134 /* clean the DMAE memory */
5135 bp->dmae_ready = 1;
5136 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5137
34f80b04
EG
5138 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5139 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5140 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5141 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5142
34f80b04
EG
5143 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5144 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5145 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5146 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5147
5148 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5149 /* soft reset pulse */
5150 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5151 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5152
5153#ifdef BCM_ISCSI
34f80b04 5154 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5155#endif
a2fbb9ea 5156
34f80b04
EG
5157 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5158 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5159 if (!CHIP_REV_IS_SLOW(bp)) {
5160 /* enable hw interrupt from doorbell Q */
5161 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5162 }
a2fbb9ea 5163
34f80b04
EG
5164 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5165 if (CHIP_REV_IS_SLOW(bp)) {
5166 /* fix for emulation and FPGA for no pause */
5167 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5168 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5169 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5170 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5171 }
a2fbb9ea 5172
34f80b04
EG
5173 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5174 if (CHIP_IS_E1H(bp))
5175 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5176
34f80b04
EG
5177 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5178 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5179 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5180 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5181
34f80b04
EG
5182 if (CHIP_IS_E1H(bp)) {
5183 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5184 STORM_INTMEM_SIZE_E1H/2);
5185 bnx2x_init_fill(bp,
5186 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5187 0, STORM_INTMEM_SIZE_E1H/2);
5188 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5189 STORM_INTMEM_SIZE_E1H/2);
5190 bnx2x_init_fill(bp,
5191 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5192 0, STORM_INTMEM_SIZE_E1H/2);
5193 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5194 STORM_INTMEM_SIZE_E1H/2);
5195 bnx2x_init_fill(bp,
5196 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5197 0, STORM_INTMEM_SIZE_E1H/2);
5198 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5199 STORM_INTMEM_SIZE_E1H/2);
5200 bnx2x_init_fill(bp,
5201 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5202 0, STORM_INTMEM_SIZE_E1H/2);
5203 } else { /* E1 */
ad8d3948
EG
5204 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5205 STORM_INTMEM_SIZE_E1);
5206 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5207 STORM_INTMEM_SIZE_E1);
5208 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5209 STORM_INTMEM_SIZE_E1);
5210 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5211 STORM_INTMEM_SIZE_E1);
34f80b04 5212 }
a2fbb9ea 5213
34f80b04
EG
5214 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5215 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5216 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5217 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5218
34f80b04
EG
5219 /* sync semi rtc */
5220 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5221 0x80000000);
5222 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5223 0x80000000);
a2fbb9ea 5224
34f80b04
EG
5225 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5226 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5227 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5228
34f80b04
EG
5229 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5230 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5231 REG_WR(bp, i, 0xc0cac01a);
5232 /* TODO: replace with something meaningful */
5233 }
5234 if (CHIP_IS_E1H(bp))
5235 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5236 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5237
34f80b04
EG
5238 if (sizeof(union cdu_context) != 1024)
5239 /* we currently assume that a context is 1024 bytes */
5240 printk(KERN_ALERT PFX "please adjust the size of"
5241 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5242
34f80b04
EG
5243 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5244 val = (4 << 24) + (0 << 12) + 1024;
5245 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5246 if (CHIP_IS_E1(bp)) {
5247 /* !!! fix pxp client crdit until excel update */
5248 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5249 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5250 }
a2fbb9ea 5251
34f80b04
EG
5252 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5253 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5254
34f80b04
EG
5255 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5256 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5257
34f80b04
EG
5258 /* PXPCS COMMON comes here */
5259 /* Reset PCIE errors for debug */
5260 REG_WR(bp, 0x2814, 0xffffffff);
5261 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5262
34f80b04
EG
5263 /* EMAC0 COMMON comes here */
5264 /* EMAC1 COMMON comes here */
5265 /* DBU COMMON comes here */
5266 /* DBG COMMON comes here */
5267
5268 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5269 if (CHIP_IS_E1H(bp)) {
5270 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5271 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5272 }
5273
5274 if (CHIP_REV_IS_SLOW(bp))
5275 msleep(200);
5276
5277 /* finish CFC init */
5278 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5279 if (val != 1) {
5280 BNX2X_ERR("CFC LL_INIT failed\n");
5281 return -EBUSY;
5282 }
5283 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5284 if (val != 1) {
5285 BNX2X_ERR("CFC AC_INIT failed\n");
5286 return -EBUSY;
5287 }
5288 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5289 if (val != 1) {
5290 BNX2X_ERR("CFC CAM_INIT failed\n");
5291 return -EBUSY;
5292 }
5293 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5294
34f80b04
EG
5295 /* read NIG statistic
5296 to see if this is our first up since powerup */
5297 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5298 val = *bnx2x_sp(bp, wb_data[0]);
5299
5300 /* do internal memory self test */
5301 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5302 BNX2X_ERR("internal mem self test failed\n");
5303 return -EBUSY;
5304 }
5305
5306 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5307 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5308 /* Fan failure is indicated by SPIO 5 */
5309 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5310 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5311
5312 /* set to active low mode */
5313 val = REG_RD(bp, MISC_REG_SPIO_INT);
5314 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5315 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5316 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5317
34f80b04
EG
5318 /* enable interrupt to signal the IGU */
5319 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5320 val |= (1 << MISC_REGISTERS_SPIO_5);
5321 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5322 break;
f1410647 5323
34f80b04
EG
5324 default:
5325 break;
5326 }
f1410647 5327
34f80b04
EG
5328 /* clear PXP2 attentions */
5329 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5330
34f80b04 5331 enable_blocks_attention(bp);
a2fbb9ea 5332
7a9b2557
VZ
5333 if (bp->flags & TPA_ENABLE_FLAG) {
5334 struct tstorm_eth_tpa_exist tmp = {0};
5335
5336 tmp.tpa_exist = 1;
5337
5338 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5339 ((u32 *)&tmp)[0]);
5340 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5341 ((u32 *)&tmp)[1]);
5342 }
5343
34f80b04
EG
5344 return 0;
5345}
a2fbb9ea 5346
34f80b04
EG
5347static int bnx2x_init_port(struct bnx2x *bp)
5348{
5349 int port = BP_PORT(bp);
5350 u32 val;
a2fbb9ea 5351
34f80b04
EG
5352 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5353
5354 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5355
5356 /* Port PXP comes here */
5357 /* Port PXP2 comes here */
a2fbb9ea
ET
5358#ifdef BCM_ISCSI
5359 /* Port0 1
5360 * Port1 385 */
5361 i++;
5362 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5363 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5364 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5365 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5366
5367 /* Port0 2
5368 * Port1 386 */
5369 i++;
5370 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5371 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5372 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5373 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5374
5375 /* Port0 3
5376 * Port1 387 */
5377 i++;
5378 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5379 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5380 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5381 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5382#endif
34f80b04 5383 /* Port CMs come here */
a2fbb9ea
ET
5384
5385 /* Port QM comes here */
a2fbb9ea
ET
5386#ifdef BCM_ISCSI
5387 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5388 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5389
5390 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5391 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5392#endif
5393 /* Port DQ comes here */
5394 /* Port BRB1 comes here */
ad8d3948 5395 /* Port PRS comes here */
a2fbb9ea
ET
5396 /* Port TSDM comes here */
5397 /* Port CSDM comes here */
5398 /* Port USDM comes here */
5399 /* Port XSDM comes here */
34f80b04
EG
5400 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5401 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5402 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5403 port ? USEM_PORT1_END : USEM_PORT0_END);
5404 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5405 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5406 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5407 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5408 /* Port UPB comes here */
34f80b04
EG
5409 /* Port XPB comes here */
5410
5411 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5412 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5413
5414 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5415 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5416
5417 /* update threshold */
34f80b04 5418 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5419 /* update init credit */
34f80b04 5420 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5421
5422 /* probe changes */
34f80b04 5423 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5424 msleep(5);
34f80b04 5425 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5426
5427#ifdef BCM_ISCSI
5428 /* tell the searcher where the T2 table is */
5429 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5430
5431 wb_write[0] = U64_LO(bp->t2_mapping);
5432 wb_write[1] = U64_HI(bp->t2_mapping);
5433 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5434 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5435 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5436 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5437
5438 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5439 /* Port SRCH comes here */
5440#endif
5441 /* Port CDU comes here */
5442 /* Port CFC comes here */
34f80b04
EG
5443
5444 if (CHIP_IS_E1(bp)) {
5445 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5446 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5447 }
5448 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5449 port ? HC_PORT1_END : HC_PORT0_END);
5450
5451 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5452 MISC_AEU_PORT0_START,
34f80b04
EG
5453 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5454 /* init aeu_mask_attn_func_0/1:
5455 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5456 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5457 * bits 4-7 are used for "per vn group attention" */
5458 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5459 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5460
a2fbb9ea
ET
5461 /* Port PXPCS comes here */
5462 /* Port EMAC0 comes here */
5463 /* Port EMAC1 comes here */
5464 /* Port DBU comes here */
5465 /* Port DBG comes here */
34f80b04
EG
5466 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5467 port ? NIG_PORT1_END : NIG_PORT0_END);
5468
5469 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5470
5471 if (CHIP_IS_E1H(bp)) {
5472 u32 wsum;
5473 struct cmng_struct_per_port m_cmng_port;
5474 int vn;
5475
5476 /* 0x2 disable e1hov, 0x1 enable */
5477 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5478 (IS_E1HMF(bp) ? 0x1 : 0x2));
5479
5480 /* Init RATE SHAPING and FAIRNESS contexts.
5481 Initialize as if there is 10G link. */
5482 wsum = bnx2x_calc_vn_wsum(bp);
5483 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5484 if (IS_E1HMF(bp))
5485 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5486 bnx2x_init_vn_minmax(bp, 2*vn + port,
5487 wsum, 10000, &m_cmng_port);
5488 }
5489
a2fbb9ea
ET
5490 /* Port MCP comes here */
5491 /* Port DMAE comes here */
5492
34f80b04 5493 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
5494 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5495 /* add SPIO 5 to group 0 */
5496 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5497 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5498 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5499 break;
5500
5501 default:
5502 break;
5503 }
5504
c18487ee 5505 bnx2x__link_reset(bp);
a2fbb9ea 5506
34f80b04
EG
5507 return 0;
5508}
5509
5510#define ILT_PER_FUNC (768/2)
5511#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5512/* the phys address is shifted right 12 bits and has an added
5513 1=valid bit added to the 53rd bit
5514 then since this is a wide register(TM)
5515 we split it into two 32 bit writes
5516 */
5517#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5518#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5519#define PXP_ONE_ILT(x) (((x) << 10) | x)
5520#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5521
5522#define CNIC_ILT_LINES 0
5523
5524static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5525{
5526 int reg;
5527
5528 if (CHIP_IS_E1H(bp))
5529 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5530 else /* E1 */
5531 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5532
5533 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5534}
5535
5536static int bnx2x_init_func(struct bnx2x *bp)
5537{
5538 int port = BP_PORT(bp);
5539 int func = BP_FUNC(bp);
5540 int i;
5541
5542 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5543
5544 i = FUNC_ILT_BASE(func);
5545
5546 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5547 if (CHIP_IS_E1H(bp)) {
5548 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5549 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5550 } else /* E1 */
5551 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5552 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5553
5554
5555 if (CHIP_IS_E1H(bp)) {
5556 for (i = 0; i < 9; i++)
5557 bnx2x_init_block(bp,
5558 cm_start[func][i], cm_end[func][i]);
5559
5560 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5561 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5562 }
5563
5564 /* HC init per function */
5565 if (CHIP_IS_E1H(bp)) {
5566 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5567
5568 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5569 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5570 }
5571 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5572
5573 if (CHIP_IS_E1H(bp))
5574 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5575
c14423fe 5576 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5577 REG_WR(bp, 0x2114, 0xffffffff);
5578 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5579
34f80b04
EG
5580 return 0;
5581}
5582
5583static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5584{
5585 int i, rc = 0;
a2fbb9ea 5586
34f80b04
EG
5587 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5588 BP_FUNC(bp), load_code);
a2fbb9ea 5589
34f80b04
EG
5590 bp->dmae_ready = 0;
5591 mutex_init(&bp->dmae_mutex);
5592 bnx2x_gunzip_init(bp);
a2fbb9ea 5593
34f80b04
EG
5594 switch (load_code) {
5595 case FW_MSG_CODE_DRV_LOAD_COMMON:
5596 rc = bnx2x_init_common(bp);
5597 if (rc)
5598 goto init_hw_err;
5599 /* no break */
5600
5601 case FW_MSG_CODE_DRV_LOAD_PORT:
5602 bp->dmae_ready = 1;
5603 rc = bnx2x_init_port(bp);
5604 if (rc)
5605 goto init_hw_err;
5606 /* no break */
5607
5608 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5609 bp->dmae_ready = 1;
5610 rc = bnx2x_init_func(bp);
5611 if (rc)
5612 goto init_hw_err;
5613 break;
5614
5615 default:
5616 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5617 break;
5618 }
5619
5620 if (!BP_NOMCP(bp)) {
5621 int func = BP_FUNC(bp);
a2fbb9ea
ET
5622
5623 bp->fw_drv_pulse_wr_seq =
34f80b04 5624 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5625 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5626 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5627 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5628 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5629 } else
5630 bp->func_stx = 0;
a2fbb9ea 5631
34f80b04
EG
5632 /* this needs to be done before gunzip end */
5633 bnx2x_zero_def_sb(bp);
5634 for_each_queue(bp, i)
5635 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5636
5637init_hw_err:
5638 bnx2x_gunzip_end(bp);
5639
5640 return rc;
a2fbb9ea
ET
5641}
5642
c14423fe 5643/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5644static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5645{
34f80b04 5646 int func = BP_FUNC(bp);
f1410647
ET
5647 u32 seq = ++bp->fw_seq;
5648 u32 rc = 0;
19680c48
EG
5649 u32 cnt = 1;
5650 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5651
34f80b04 5652 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5653 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5654
19680c48
EG
5655 do {
5656 /* let the FW do it's magic ... */
5657 msleep(delay);
a2fbb9ea 5658
19680c48 5659 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5660
19680c48
EG
5661 /* Give the FW up to 2 second (200*10ms) */
5662 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5663
5664 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5665 cnt*delay, rc, seq);
a2fbb9ea
ET
5666
5667 /* is this a reply to our command? */
5668 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5669 rc &= FW_MSG_CODE_MASK;
f1410647 5670
a2fbb9ea
ET
5671 } else {
5672 /* FW BUG! */
5673 BNX2X_ERR("FW failed to respond!\n");
5674 bnx2x_fw_dump(bp);
5675 rc = 0;
5676 }
f1410647 5677
a2fbb9ea
ET
5678 return rc;
5679}
5680
5681static void bnx2x_free_mem(struct bnx2x *bp)
5682{
5683
5684#define BNX2X_PCI_FREE(x, y, size) \
5685 do { \
5686 if (x) { \
5687 pci_free_consistent(bp->pdev, size, x, y); \
5688 x = NULL; \
5689 y = 0; \
5690 } \
5691 } while (0)
5692
5693#define BNX2X_FREE(x) \
5694 do { \
5695 if (x) { \
5696 vfree(x); \
5697 x = NULL; \
5698 } \
5699 } while (0)
5700
5701 int i;
5702
5703 /* fastpath */
5704 for_each_queue(bp, i) {
5705
5706 /* Status blocks */
5707 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5708 bnx2x_fp(bp, i, status_blk_mapping),
5709 sizeof(struct host_status_block) +
5710 sizeof(struct eth_tx_db_data));
5711
5712 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5713 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5714 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5715 bnx2x_fp(bp, i, tx_desc_mapping),
5716 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5717
5718 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5719 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5720 bnx2x_fp(bp, i, rx_desc_mapping),
5721 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5722
5723 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5724 bnx2x_fp(bp, i, rx_comp_mapping),
5725 sizeof(struct eth_fast_path_rx_cqe) *
5726 NUM_RCQ_BD);
a2fbb9ea 5727
7a9b2557
VZ
5728 /* SGE ring */
5729 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5730 bnx2x_fp(bp, i, rx_sge_mapping),
5731 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5732 }
a2fbb9ea
ET
5733 /* end of fastpath */
5734
5735 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5736 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5737
5738 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5739 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5740
5741#ifdef BCM_ISCSI
5742 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5743 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5744 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5745 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5746#endif
7a9b2557 5747 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5748
5749#undef BNX2X_PCI_FREE
5750#undef BNX2X_KFREE
5751}
5752
5753static int bnx2x_alloc_mem(struct bnx2x *bp)
5754{
5755
5756#define BNX2X_PCI_ALLOC(x, y, size) \
5757 do { \
5758 x = pci_alloc_consistent(bp->pdev, size, y); \
5759 if (x == NULL) \
5760 goto alloc_mem_err; \
5761 memset(x, 0, size); \
5762 } while (0)
5763
5764#define BNX2X_ALLOC(x, size) \
5765 do { \
5766 x = vmalloc(size); \
5767 if (x == NULL) \
5768 goto alloc_mem_err; \
5769 memset(x, 0, size); \
5770 } while (0)
5771
5772 int i;
5773
5774 /* fastpath */
a2fbb9ea
ET
5775 for_each_queue(bp, i) {
5776 bnx2x_fp(bp, i, bp) = bp;
5777
5778 /* Status blocks */
5779 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5780 &bnx2x_fp(bp, i, status_blk_mapping),
5781 sizeof(struct host_status_block) +
5782 sizeof(struct eth_tx_db_data));
5783
5784 bnx2x_fp(bp, i, hw_tx_prods) =
5785 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5786
5787 bnx2x_fp(bp, i, tx_prods_mapping) =
5788 bnx2x_fp(bp, i, status_blk_mapping) +
5789 sizeof(struct host_status_block);
5790
5791 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5792 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5793 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5794 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5795 &bnx2x_fp(bp, i, tx_desc_mapping),
5796 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5797
5798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5799 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5801 &bnx2x_fp(bp, i, rx_desc_mapping),
5802 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5803
5804 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5805 &bnx2x_fp(bp, i, rx_comp_mapping),
5806 sizeof(struct eth_fast_path_rx_cqe) *
5807 NUM_RCQ_BD);
5808
7a9b2557
VZ
5809 /* SGE ring */
5810 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5811 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5812 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5813 &bnx2x_fp(bp, i, rx_sge_mapping),
5814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5815 }
5816 /* end of fastpath */
5817
5818 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5819 sizeof(struct host_def_status_block));
5820
5821 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5822 sizeof(struct bnx2x_slowpath));
5823
5824#ifdef BCM_ISCSI
5825 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5826
5827 /* Initialize T1 */
5828 for (i = 0; i < 64*1024; i += 64) {
5829 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5830 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5831 }
5832
5833 /* allocate searcher T2 table
5834 we allocate 1/4 of alloc num for T2
5835 (which is not entered into the ILT) */
5836 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5837
5838 /* Initialize T2 */
5839 for (i = 0; i < 16*1024; i += 64)
5840 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5841
c14423fe 5842 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5843 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5844
5845 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5846 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5847
5848 /* QM queues (128*MAX_CONN) */
5849 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5850#endif
5851
5852 /* Slow path ring */
5853 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5854
5855 return 0;
5856
5857alloc_mem_err:
5858 bnx2x_free_mem(bp);
5859 return -ENOMEM;
5860
5861#undef BNX2X_PCI_ALLOC
5862#undef BNX2X_ALLOC
5863}
5864
5865static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5866{
5867 int i;
5868
5869 for_each_queue(bp, i) {
5870 struct bnx2x_fastpath *fp = &bp->fp[i];
5871
5872 u16 bd_cons = fp->tx_bd_cons;
5873 u16 sw_prod = fp->tx_pkt_prod;
5874 u16 sw_cons = fp->tx_pkt_cons;
5875
a2fbb9ea
ET
5876 while (sw_cons != sw_prod) {
5877 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5878 sw_cons++;
5879 }
5880 }
5881}
5882
5883static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5884{
5885 int i, j;
5886
5887 for_each_queue(bp, j) {
5888 struct bnx2x_fastpath *fp = &bp->fp[j];
5889
a2fbb9ea
ET
5890 for (i = 0; i < NUM_RX_BD; i++) {
5891 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5892 struct sk_buff *skb = rx_buf->skb;
5893
5894 if (skb == NULL)
5895 continue;
5896
5897 pci_unmap_single(bp->pdev,
5898 pci_unmap_addr(rx_buf, mapping),
5899 bp->rx_buf_use_size,
5900 PCI_DMA_FROMDEVICE);
5901
5902 rx_buf->skb = NULL;
5903 dev_kfree_skb(skb);
5904 }
7a9b2557
VZ
5905 if (!fp->disable_tpa)
5906 bnx2x_free_tpa_pool(bp, fp,
5907 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5908 }
5909}
5910
5911static void bnx2x_free_skbs(struct bnx2x *bp)
5912{
5913 bnx2x_free_tx_skbs(bp);
5914 bnx2x_free_rx_skbs(bp);
5915}
5916
5917static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5918{
34f80b04 5919 int i, offset = 1;
a2fbb9ea
ET
5920
5921 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5922 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5923 bp->msix_table[0].vector);
5924
5925 for_each_queue(bp, i) {
c14423fe 5926 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5927 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5928 bnx2x_fp(bp, i, state));
5929
228241eb
ET
5930 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5931 BNX2X_ERR("IRQ of fp #%d being freed while "
5932 "state != closed\n", i);
a2fbb9ea 5933
34f80b04 5934 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5935 }
a2fbb9ea
ET
5936}
5937
5938static void bnx2x_free_irq(struct bnx2x *bp)
5939{
a2fbb9ea 5940 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5941 bnx2x_free_msix_irqs(bp);
5942 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5943 bp->flags &= ~USING_MSIX_FLAG;
5944
5945 } else
5946 free_irq(bp->pdev->irq, bp->dev);
5947}
5948
5949static int bnx2x_enable_msix(struct bnx2x *bp)
5950{
34f80b04 5951 int i, rc, offset;
a2fbb9ea
ET
5952
5953 bp->msix_table[0].entry = 0;
34f80b04
EG
5954 offset = 1;
5955 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 5956
34f80b04
EG
5957 for_each_queue(bp, i) {
5958 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 5959
34f80b04
EG
5960 bp->msix_table[i + offset].entry = igu_vec;
5961 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5962 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
5963 }
5964
34f80b04
EG
5965 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5966 bp->num_queues + offset);
5967 if (rc) {
5968 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
5969 return -1;
5970 }
a2fbb9ea
ET
5971 bp->flags |= USING_MSIX_FLAG;
5972
5973 return 0;
a2fbb9ea
ET
5974}
5975
a2fbb9ea
ET
5976static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5977{
34f80b04 5978 int i, rc, offset = 1;
a2fbb9ea 5979
a2fbb9ea
ET
5980 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
5981 bp->dev->name, bp->dev);
a2fbb9ea
ET
5982 if (rc) {
5983 BNX2X_ERR("request sp irq failed\n");
5984 return -EBUSY;
5985 }
5986
5987 for_each_queue(bp, i) {
34f80b04 5988 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5989 bnx2x_msix_fp_int, 0,
5990 bp->dev->name, &bp->fp[i]);
a2fbb9ea 5991 if (rc) {
34f80b04
EG
5992 BNX2X_ERR("request fp #%d irq failed rc %d\n",
5993 i + offset, rc);
a2fbb9ea
ET
5994 bnx2x_free_msix_irqs(bp);
5995 return -EBUSY;
5996 }
5997
5998 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
5999 }
6000
6001 return 0;
a2fbb9ea
ET
6002}
6003
6004static int bnx2x_req_irq(struct bnx2x *bp)
6005{
34f80b04 6006 int rc;
a2fbb9ea 6007
34f80b04
EG
6008 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6009 bp->dev->name, bp->dev);
a2fbb9ea
ET
6010 if (!rc)
6011 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6012
6013 return rc;
a2fbb9ea
ET
6014}
6015
6016/*
6017 * Init service functions
6018 */
6019
34f80b04 6020static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
a2fbb9ea
ET
6021{
6022 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6023 int port = BP_PORT(bp);
a2fbb9ea
ET
6024
6025 /* CAM allocation
6026 * unicasts 0-31:port0 32-63:port1
6027 * multicast 64-127:port0 128-191:port1
6028 */
6029 config->hdr.length_6b = 2;
34f80b04
EG
6030 config->hdr.offset = port ? 31 : 0;
6031 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6032 config->hdr.reserved1 = 0;
6033
6034 /* primary MAC */
6035 config->config_table[0].cam_entry.msb_mac_addr =
6036 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6037 config->config_table[0].cam_entry.middle_mac_addr =
6038 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6039 config->config_table[0].cam_entry.lsb_mac_addr =
6040 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6041 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6042 config->config_table[0].target_table_entry.flags = 0;
6043 config->config_table[0].target_table_entry.client_id = 0;
6044 config->config_table[0].target_table_entry.vlan_id = 0;
6045
6046 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6047 config->config_table[0].cam_entry.msb_mac_addr,
6048 config->config_table[0].cam_entry.middle_mac_addr,
6049 config->config_table[0].cam_entry.lsb_mac_addr);
6050
6051 /* broadcast */
6052 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6053 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6054 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6055 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6056 config->config_table[1].target_table_entry.flags =
6057 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6058 config->config_table[1].target_table_entry.client_id = 0;
6059 config->config_table[1].target_table_entry.vlan_id = 0;
6060
6061 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6062 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6063 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6064}
6065
34f80b04
EG
6066static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6067{
6068 struct mac_configuration_cmd_e1h *config =
6069 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6070
6071 if (bp->state != BNX2X_STATE_OPEN) {
6072 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6073 return;
6074 }
6075
6076 /* CAM allocation for E1H
6077 * unicasts: by func number
6078 * multicast: 20+FUNC*20, 20 each
6079 */
6080 config->hdr.length_6b = 1;
6081 config->hdr.offset = BP_FUNC(bp);
6082 config->hdr.client_id = BP_CL_ID(bp);
6083 config->hdr.reserved1 = 0;
6084
6085 /* primary MAC */
6086 config->config_table[0].msb_mac_addr =
6087 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6088 config->config_table[0].middle_mac_addr =
6089 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6090 config->config_table[0].lsb_mac_addr =
6091 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6092 config->config_table[0].client_id = BP_L_ID(bp);
6093 config->config_table[0].vlan_id = 0;
6094 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6095 config->config_table[0].flags = BP_PORT(bp);
6096
6097 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6098 config->config_table[0].msb_mac_addr,
6099 config->config_table[0].middle_mac_addr,
6100 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6101
6102 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6103 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6104 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6105}
6106
a2fbb9ea
ET
6107static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6108 int *state_p, int poll)
6109{
6110 /* can take a while if any port is running */
34f80b04 6111 int cnt = 500;
a2fbb9ea 6112
c14423fe
ET
6113 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6114 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6115
6116 might_sleep();
34f80b04 6117 while (cnt--) {
a2fbb9ea
ET
6118 if (poll) {
6119 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6120 /* if index is different from 0
6121 * the reply for some commands will
a2fbb9ea
ET
6122 * be on the none default queue
6123 */
6124 if (idx)
6125 bnx2x_rx_int(&bp->fp[idx], 10);
6126 }
34f80b04 6127 mb(); /* state is changed by bnx2x_sp_event() */
a2fbb9ea 6128
49d66772 6129 if (*state_p == state)
a2fbb9ea
ET
6130 return 0;
6131
a2fbb9ea 6132 msleep(1);
a2fbb9ea
ET
6133 }
6134
a2fbb9ea 6135 /* timeout! */
49d66772
ET
6136 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6137 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6138#ifdef BNX2X_STOP_ON_ERROR
6139 bnx2x_panic();
6140#endif
a2fbb9ea 6141
49d66772 6142 return -EBUSY;
a2fbb9ea
ET
6143}
6144
6145static int bnx2x_setup_leading(struct bnx2x *bp)
6146{
34f80b04 6147 int rc;
a2fbb9ea 6148
c14423fe 6149 /* reset IGU state */
34f80b04 6150 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6151
6152 /* SETUP ramrod */
6153 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6154
34f80b04
EG
6155 /* Wait for completion */
6156 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6157
34f80b04 6158 return rc;
a2fbb9ea
ET
6159}
6160
6161static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6162{
a2fbb9ea 6163 /* reset IGU state */
34f80b04 6164 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6165
228241eb 6166 /* SETUP ramrod */
a2fbb9ea
ET
6167 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6168 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6169
6170 /* Wait for completion */
6171 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6172 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6173}
6174
a2fbb9ea
ET
6175static int bnx2x_poll(struct napi_struct *napi, int budget);
6176static void bnx2x_set_rx_mode(struct net_device *dev);
6177
34f80b04
EG
6178/* must be called with rtnl_lock */
6179static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6180{
228241eb 6181 u32 load_code;
34f80b04
EG
6182 int i, rc;
6183
6184#ifdef BNX2X_STOP_ON_ERROR
6185 if (unlikely(bp->panic))
6186 return -EPERM;
6187#endif
a2fbb9ea
ET
6188
6189 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6190
34f80b04
EG
6191 /* Send LOAD_REQUEST command to MCP
6192 Returns the type of LOAD command:
6193 if it is the first port to be initialized
6194 common blocks should be initialized, otherwise - not
a2fbb9ea 6195 */
34f80b04 6196 if (!BP_NOMCP(bp)) {
228241eb
ET
6197 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6198 if (!load_code) {
6199 BNX2X_ERR("MCP response failure, unloading\n");
6200 return -EBUSY;
6201 }
34f80b04 6202 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6203 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6204
a2fbb9ea 6205 } else {
34f80b04
EG
6206 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6207 load_count[0], load_count[1], load_count[2]);
6208 load_count[0]++;
6209 load_count[1 + BP_PORT(bp)]++;
6210 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6211 load_count[0], load_count[1], load_count[2]);
6212 if (load_count[0] == 1)
6213 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6214 else if (load_count[1 + BP_PORT(bp)] == 1)
6215 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6216 else
6217 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6218 }
6219
34f80b04
EG
6220 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6221 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6222 bp->port.pmf = 1;
6223 else
6224 bp->port.pmf = 0;
6225 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6226
6227 /* if we can't use MSI-X we only need one fp,
6228 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6229 * and fallback to inta with one fp
6230 */
34f80b04
EG
6231 if (use_inta) {
6232 bp->num_queues = 1;
6233
6234 } else {
6235 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6236 /* user requested number */
6237 bp->num_queues = use_multi;
6238
6239 else if (use_multi)
6240 bp->num_queues = min_t(u32, num_online_cpus(),
6241 BP_MAX_QUEUES(bp));
6242 else
a2fbb9ea 6243 bp->num_queues = 1;
34f80b04
EG
6244
6245 if (bnx2x_enable_msix(bp)) {
6246 /* failed to enable MSI-X */
6247 bp->num_queues = 1;
6248 if (use_multi)
6249 BNX2X_ERR("Multi requested but failed"
6250 " to enable MSI-X\n");
a2fbb9ea
ET
6251 }
6252 }
34f80b04
EG
6253 DP(NETIF_MSG_IFUP,
6254 "set number of queues to %d\n", bp->num_queues);
c14423fe 6255
a2fbb9ea
ET
6256 if (bnx2x_alloc_mem(bp))
6257 return -ENOMEM;
6258
7a9b2557
VZ
6259 for_each_queue(bp, i)
6260 bnx2x_fp(bp, i, disable_tpa) =
6261 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6262
34f80b04
EG
6263 /* Disable interrupt handling until HW is initialized */
6264 atomic_set(&bp->intr_sem, 1);
a2fbb9ea 6265
34f80b04
EG
6266 if (bp->flags & USING_MSIX_FLAG) {
6267 rc = bnx2x_req_msix_irqs(bp);
6268 if (rc) {
6269 pci_disable_msix(bp->pdev);
6270 goto load_error;
6271 }
6272 } else {
6273 bnx2x_ack_int(bp);
6274 rc = bnx2x_req_irq(bp);
6275 if (rc) {
6276 BNX2X_ERR("IRQ request failed, aborting\n");
6277 goto load_error;
a2fbb9ea
ET
6278 }
6279 }
6280
6281 for_each_queue(bp, i)
6282 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6283 bnx2x_poll, 128);
6284
a2fbb9ea 6285 /* Initialize HW */
34f80b04
EG
6286 rc = bnx2x_init_hw(bp, load_code);
6287 if (rc) {
a2fbb9ea 6288 BNX2X_ERR("HW init failed, aborting\n");
228241eb 6289 goto load_error;
a2fbb9ea
ET
6290 }
6291
34f80b04 6292 /* Enable interrupt handling */
a2fbb9ea
ET
6293 atomic_set(&bp->intr_sem, 0);
6294
a2fbb9ea
ET
6295 /* Setup NIC internals and enable interrupts */
6296 bnx2x_nic_init(bp);
6297
6298 /* Send LOAD_DONE command to MCP */
34f80b04 6299 if (!BP_NOMCP(bp)) {
228241eb
ET
6300 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6301 if (!load_code) {
a2fbb9ea 6302 BNX2X_ERR("MCP response failure, unloading\n");
34f80b04 6303 rc = -EBUSY;
228241eb 6304 goto load_int_disable;
a2fbb9ea
ET
6305 }
6306 }
6307
bb2a0f7a
YG
6308 bnx2x_stats_init(bp);
6309
a2fbb9ea
ET
6310 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6311
6312 /* Enable Rx interrupt handling before sending the ramrod
6313 as it's completed on Rx FP queue */
6314 for_each_queue(bp, i)
6315 napi_enable(&bnx2x_fp(bp, i, napi));
6316
34f80b04
EG
6317 rc = bnx2x_setup_leading(bp);
6318 if (rc) {
6319#ifdef BNX2X_STOP_ON_ERROR
6320 bp->panic = 1;
6321#endif
228241eb 6322 goto load_stop_netif;
34f80b04 6323 }
a2fbb9ea 6324
34f80b04
EG
6325 if (CHIP_IS_E1H(bp))
6326 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6327 BNX2X_ERR("!!! mf_cfg function disabled\n");
6328 bp->state = BNX2X_STATE_DISABLED;
6329 }
a2fbb9ea 6330
34f80b04
EG
6331 if (bp->state == BNX2X_STATE_OPEN)
6332 for_each_nondefault_queue(bp, i) {
6333 rc = bnx2x_setup_multi(bp, i);
6334 if (rc)
6335 goto load_stop_netif;
6336 }
a2fbb9ea 6337
34f80b04
EG
6338 if (CHIP_IS_E1(bp))
6339 bnx2x_set_mac_addr_e1(bp);
6340 else
6341 bnx2x_set_mac_addr_e1h(bp);
6342
6343 if (bp->port.pmf)
6344 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6345
6346 /* Start fast path */
34f80b04
EG
6347 switch (load_mode) {
6348 case LOAD_NORMAL:
6349 /* Tx queue should be only reenabled */
6350 netif_wake_queue(bp->dev);
6351 bnx2x_set_rx_mode(bp->dev);
6352 break;
6353
6354 case LOAD_OPEN:
6355 /* IRQ is only requested from bnx2x_open */
a2fbb9ea 6356 netif_start_queue(bp->dev);
34f80b04 6357 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6358 if (bp->flags & USING_MSIX_FLAG)
6359 printk(KERN_INFO PFX "%s: using MSI-X\n",
6360 bp->dev->name);
34f80b04 6361 break;
a2fbb9ea 6362
34f80b04 6363 case LOAD_DIAG:
a2fbb9ea 6364 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6365 bp->state = BNX2X_STATE_DIAG;
6366 break;
6367
6368 default:
6369 break;
a2fbb9ea
ET
6370 }
6371
34f80b04
EG
6372 if (!bp->port.pmf)
6373 bnx2x__link_status_update(bp);
6374
a2fbb9ea
ET
6375 /* start the timer */
6376 mod_timer(&bp->timer, jiffies + bp->current_interval);
6377
34f80b04 6378
a2fbb9ea
ET
6379 return 0;
6380
228241eb 6381load_stop_netif:
a2fbb9ea
ET
6382 for_each_queue(bp, i)
6383 napi_disable(&bnx2x_fp(bp, i, napi));
6384
228241eb 6385load_int_disable:
615f8fd9 6386 bnx2x_int_disable_sync(bp);
a2fbb9ea 6387
34f80b04 6388 /* Release IRQs */
a2fbb9ea
ET
6389 bnx2x_free_irq(bp);
6390
7a9b2557
VZ
6391 /* Free SKBs, SGEs, TPA pool and driver internals */
6392 bnx2x_free_skbs(bp);
6393 for_each_queue(bp, i)
6394 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6395 RX_SGE_CNT*NUM_RX_SGE_PAGES);
228241eb 6396load_error:
a2fbb9ea
ET
6397 bnx2x_free_mem(bp);
6398
6399 /* TBD we really need to reset the chip
6400 if we want to recover from this */
34f80b04 6401 return rc;
a2fbb9ea
ET
6402}
6403
6404static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6405{
a2fbb9ea
ET
6406 int rc;
6407
c14423fe 6408 /* halt the connection */
a2fbb9ea
ET
6409 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6410 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6411
34f80b04 6412 /* Wait for completion */
a2fbb9ea 6413 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6414 &(bp->fp[index].state), 1);
c14423fe 6415 if (rc) /* timeout */
a2fbb9ea
ET
6416 return rc;
6417
6418 /* delete cfc entry */
6419 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6420
34f80b04
EG
6421 /* Wait for completion */
6422 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6423 &(bp->fp[index].state), 1);
6424 return rc;
a2fbb9ea
ET
6425}
6426
a2fbb9ea
ET
6427static void bnx2x_stop_leading(struct bnx2x *bp)
6428{
49d66772 6429 u16 dsb_sp_prod_idx;
c14423fe 6430 /* if the other port is handling traffic,
a2fbb9ea 6431 this can take a lot of time */
34f80b04
EG
6432 int cnt = 500;
6433 int rc;
a2fbb9ea
ET
6434
6435 might_sleep();
6436
6437 /* Send HALT ramrod */
6438 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6439 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6440
34f80b04
EG
6441 /* Wait for completion */
6442 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6443 &(bp->fp[0].state), 1);
6444 if (rc) /* timeout */
a2fbb9ea
ET
6445 return;
6446
49d66772 6447 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6448
228241eb 6449 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6450 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6451
49d66772 6452 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6453 we are going to reset the chip anyway
6454 so there is not much to do if this times out
6455 */
34f80b04 6456 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
49d66772 6457 msleep(1);
34f80b04
EG
6458 if (!cnt) {
6459 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6460 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6461 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6462#ifdef BNX2X_STOP_ON_ERROR
6463 bnx2x_panic();
6464#endif
6465 break;
6466 }
6467 cnt--;
49d66772
ET
6468 }
6469 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6470 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
6471}
6472
34f80b04
EG
6473static void bnx2x_reset_func(struct bnx2x *bp)
6474{
6475 int port = BP_PORT(bp);
6476 int func = BP_FUNC(bp);
6477 int base, i;
6478
6479 /* Configure IGU */
6480 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6481 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6482
6483 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6484
6485 /* Clear ILT */
6486 base = FUNC_ILT_BASE(func);
6487 for (i = base; i < base + ILT_PER_FUNC; i++)
6488 bnx2x_ilt_wr(bp, i, 0);
6489}
6490
6491static void bnx2x_reset_port(struct bnx2x *bp)
6492{
6493 int port = BP_PORT(bp);
6494 u32 val;
6495
6496 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6497
6498 /* Do not rcv packets to BRB */
6499 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6500 /* Do not direct rcv packets that are not for MCP to the BRB */
6501 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6502 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6503
6504 /* Configure AEU */
6505 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6506
6507 msleep(100);
6508 /* Check for BRB port occupancy */
6509 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6510 if (val)
6511 DP(NETIF_MSG_IFDOWN,
6512 "BRB1 is not empty %d blooks are occupied\n", val);
6513
6514 /* TODO: Close Doorbell port? */
6515}
6516
6517static void bnx2x_reset_common(struct bnx2x *bp)
6518{
6519 /* reset_common */
6520 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6521 0xd3ffff7f);
6522 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6523}
6524
6525static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6526{
6527 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6528 BP_FUNC(bp), reset_code);
6529
6530 switch (reset_code) {
6531 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6532 bnx2x_reset_port(bp);
6533 bnx2x_reset_func(bp);
6534 bnx2x_reset_common(bp);
6535 break;
6536
6537 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6538 bnx2x_reset_port(bp);
6539 bnx2x_reset_func(bp);
6540 break;
6541
6542 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6543 bnx2x_reset_func(bp);
6544 break;
49d66772 6545
34f80b04
EG
6546 default:
6547 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6548 break;
6549 }
6550}
6551
6552/* msut be called with rtnl_lock */
6553static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea
ET
6554{
6555 u32 reset_code = 0;
34f80b04 6556 int i, cnt;
a2fbb9ea
ET
6557
6558 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6559
228241eb
ET
6560 bp->rx_mode = BNX2X_RX_MODE_NONE;
6561 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6562
228241eb
ET
6563 if (netif_running(bp->dev)) {
6564 netif_tx_disable(bp->dev);
6565 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6566 }
6567
34f80b04
EG
6568 del_timer_sync(&bp->timer);
6569 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6570 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6571 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6572
228241eb
ET
6573 /* Wait until all fast path tasks complete */
6574 for_each_queue(bp, i) {
6575 struct bnx2x_fastpath *fp = &bp->fp[i];
6576
34f80b04
EG
6577#ifdef BNX2X_STOP_ON_ERROR
6578#ifdef __powerpc64__
6579 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6580#else
6581 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6582#endif
6583 fp->tpa_queue_used);
6584#endif
6585 cnt = 1000;
6586 smp_rmb();
6587 while (bnx2x_has_work(fp)) {
228241eb 6588 msleep(1);
34f80b04
EG
6589 if (!cnt) {
6590 BNX2X_ERR("timeout waiting for queue[%d]\n",
6591 i);
6592#ifdef BNX2X_STOP_ON_ERROR
6593 bnx2x_panic();
6594 return -EBUSY;
6595#else
6596 break;
6597#endif
6598 }
6599 cnt--;
6600 smp_rmb();
6601 }
228241eb 6602 }
a2fbb9ea 6603
34f80b04
EG
6604 /* Wait until all slow path tasks complete */
6605 cnt = 1000;
6606 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
a2fbb9ea
ET
6607 msleep(1);
6608
228241eb
ET
6609 for_each_queue(bp, i)
6610 napi_disable(&bnx2x_fp(bp, i, napi));
6611 /* Disable interrupts after Tx and Rx are disabled on stack level */
6612 bnx2x_int_disable_sync(bp);
a2fbb9ea 6613
34f80b04
EG
6614 /* Release IRQs */
6615 bnx2x_free_irq(bp);
6616
a2fbb9ea
ET
6617 if (bp->flags & NO_WOL_FLAG)
6618 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
228241eb 6619
a2fbb9ea 6620 else if (bp->wol) {
34f80b04 6621 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6622 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6623 u32 val;
a2fbb9ea 6624
34f80b04
EG
6625 /* The mac address is written to entries 1-4 to
6626 preserve entry 0 which is used by the PMF */
a2fbb9ea 6627 val = (mac_addr[0] << 8) | mac_addr[1];
34f80b04 6628 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
a2fbb9ea
ET
6629
6630 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6631 (mac_addr[4] << 8) | mac_addr[5];
34f80b04
EG
6632 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
6633 val);
a2fbb9ea
ET
6634
6635 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6636
a2fbb9ea
ET
6637 } else
6638 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6639
34f80b04
EG
6640 /* Close multi and leading connections
6641 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6642 for_each_nondefault_queue(bp, i)
6643 if (bnx2x_stop_multi(bp, i))
228241eb 6644 goto unload_error;
a2fbb9ea 6645
34f80b04
EG
6646 if (CHIP_IS_E1H(bp))
6647 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
6648
6649 bnx2x_stop_leading(bp);
6650#ifdef BNX2X_STOP_ON_ERROR
6651 /* If ramrod completion timed out - break here! */
6652 if (bp->panic) {
6653 BNX2X_ERR("Stop leading failed!\n");
6654 return -EBUSY;
6655 }
6656#endif
6657
228241eb
ET
6658 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6659 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
34f80b04
EG
6660 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6661 "state 0x%x fp[0].state 0x%x\n",
228241eb
ET
6662 bp->state, bp->fp[0].state);
6663 }
6664
6665unload_error:
34f80b04 6666 if (!BP_NOMCP(bp))
228241eb 6667 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6668 else {
6669 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6670 load_count[0], load_count[1], load_count[2]);
6671 load_count[0]--;
6672 load_count[1 + BP_PORT(bp)]--;
6673 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6674 load_count[0], load_count[1], load_count[2]);
6675 if (load_count[0] == 0)
6676 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6677 else if (load_count[1 + BP_PORT(bp)] == 0)
6678 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6679 else
6680 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6681 }
a2fbb9ea 6682
34f80b04
EG
6683 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6684 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6685 bnx2x__link_reset(bp);
a2fbb9ea
ET
6686
6687 /* Reset the chip */
228241eb 6688 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6689
6690 /* Report UNLOAD_DONE to MCP */
34f80b04 6691 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6692 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6693
7a9b2557 6694 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6695 bnx2x_free_skbs(bp);
7a9b2557
VZ
6696 for_each_queue(bp, i)
6697 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6698 RX_SGE_CNT*NUM_RX_SGE_PAGES);
a2fbb9ea
ET
6699 bnx2x_free_mem(bp);
6700
6701 bp->state = BNX2X_STATE_CLOSED;
228241eb 6702
a2fbb9ea
ET
6703 netif_carrier_off(bp->dev);
6704
6705 return 0;
6706}
6707
34f80b04
EG
6708static void bnx2x_reset_task(struct work_struct *work)
6709{
6710 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6711
6712#ifdef BNX2X_STOP_ON_ERROR
6713 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6714 " so reset not done to allow debug dump,\n"
6715 KERN_ERR " you will need to reboot when done\n");
6716 return;
6717#endif
6718
6719 rtnl_lock();
6720
6721 if (!netif_running(bp->dev))
6722 goto reset_task_exit;
6723
6724 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6725 bnx2x_nic_load(bp, LOAD_NORMAL);
6726
6727reset_task_exit:
6728 rtnl_unlock();
6729}
6730
a2fbb9ea
ET
6731/* end of nic load/unload */
6732
6733/* ethtool_ops */
6734
6735/*
6736 * Init service functions
6737 */
6738
34f80b04
EG
6739static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6740{
6741 u32 val;
6742
6743 /* Check if there is any driver already loaded */
6744 val = REG_RD(bp, MISC_REG_UNPREPARED);
6745 if (val == 0x1) {
6746 /* Check if it is the UNDI driver
6747 * UNDI driver initializes CID offset for normal bell to 0x7
6748 */
6749 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6750 if (val == 0x7) {
6751 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6752 /* save our func and fw_seq */
6753 int func = BP_FUNC(bp);
6754 u16 fw_seq = bp->fw_seq;
6755
6756 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6757
6758 /* try unload UNDI on port 0 */
6759 bp->func = 0;
6760 bp->fw_seq = (SHMEM_RD(bp,
6761 func_mb[bp->func].drv_mb_header) &
6762 DRV_MSG_SEQ_NUMBER_MASK);
6763
6764 reset_code = bnx2x_fw_command(bp, reset_code);
6765 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6766
6767 /* if UNDI is loaded on the other port */
6768 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6769
6770 bp->func = 1;
6771 bp->fw_seq = (SHMEM_RD(bp,
6772 func_mb[bp->func].drv_mb_header) &
6773 DRV_MSG_SEQ_NUMBER_MASK);
6774
6775 bnx2x_fw_command(bp,
6776 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
6777 bnx2x_fw_command(bp,
6778 DRV_MSG_CODE_UNLOAD_DONE);
6779
6780 /* restore our func and fw_seq */
6781 bp->func = func;
6782 bp->fw_seq = fw_seq;
6783 }
6784
6785 /* reset device */
6786 REG_WR(bp,
6787 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6788 0xd3ffff7f);
6789 REG_WR(bp,
6790 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6791 0x1403);
6792 }
6793 }
6794}
6795
6796static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6797{
6798 u32 val, val2, val3, val4, id;
6799
6800 /* Get the chip revision id and number. */
6801 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6802 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6803 id = ((val & 0xffff) << 16);
6804 val = REG_RD(bp, MISC_REG_CHIP_REV);
6805 id |= ((val & 0xf) << 12);
6806 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6807 id |= ((val & 0xff) << 4);
6808 REG_RD(bp, MISC_REG_BOND_ID);
6809 id |= (val & 0xf);
6810 bp->common.chip_id = id;
6811 bp->link_params.chip_id = bp->common.chip_id;
6812 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6813
6814 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6815 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6816 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6817 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6818 bp->common.flash_size, bp->common.flash_size);
6819
6820 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6821 bp->link_params.shmem_base = bp->common.shmem_base;
6822 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6823
6824 if (!bp->common.shmem_base ||
6825 (bp->common.shmem_base < 0xA0000) ||
6826 (bp->common.shmem_base >= 0xC0000)) {
6827 BNX2X_DEV_INFO("MCP not active\n");
6828 bp->flags |= NO_MCP_FLAG;
6829 return;
6830 }
6831
6832 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6833 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6834 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6835 BNX2X_ERR("BAD MCP validity signature\n");
6836
6837 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6838 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6839
6840 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6841 bp->common.hw_config, bp->common.board);
6842
6843 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6844 SHARED_HW_CFG_LED_MODE_MASK) >>
6845 SHARED_HW_CFG_LED_MODE_SHIFT);
6846
6847 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6848 bp->common.bc_ver = val;
6849 BNX2X_DEV_INFO("bc_ver %X\n", val);
6850 if (val < BNX2X_BC_VER) {
6851 /* for now only warn
6852 * later we might need to enforce this */
6853 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6854 " please upgrade BC\n", BNX2X_BC_VER, val);
6855 }
6856 BNX2X_DEV_INFO("%sWoL Capable\n",
6857 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6858
6859 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6860 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6861 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6862 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6863
6864 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6865 val, val2, val3, val4);
6866}
6867
6868static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6869 u32 switch_cfg)
a2fbb9ea 6870{
34f80b04 6871 int port = BP_PORT(bp);
a2fbb9ea
ET
6872 u32 ext_phy_type;
6873
a2fbb9ea
ET
6874 switch (switch_cfg) {
6875 case SWITCH_CFG_1G:
6876 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6877
c18487ee
YR
6878 ext_phy_type =
6879 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6880 switch (ext_phy_type) {
6881 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6882 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6883 ext_phy_type);
6884
34f80b04
EG
6885 bp->port.supported |= (SUPPORTED_10baseT_Half |
6886 SUPPORTED_10baseT_Full |
6887 SUPPORTED_100baseT_Half |
6888 SUPPORTED_100baseT_Full |
6889 SUPPORTED_1000baseT_Full |
6890 SUPPORTED_2500baseX_Full |
6891 SUPPORTED_TP |
6892 SUPPORTED_FIBRE |
6893 SUPPORTED_Autoneg |
6894 SUPPORTED_Pause |
6895 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6896 break;
6897
6898 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6899 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6900 ext_phy_type);
6901
34f80b04
EG
6902 bp->port.supported |= (SUPPORTED_10baseT_Half |
6903 SUPPORTED_10baseT_Full |
6904 SUPPORTED_100baseT_Half |
6905 SUPPORTED_100baseT_Full |
6906 SUPPORTED_1000baseT_Full |
6907 SUPPORTED_TP |
6908 SUPPORTED_FIBRE |
6909 SUPPORTED_Autoneg |
6910 SUPPORTED_Pause |
6911 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6912 break;
6913
6914 default:
6915 BNX2X_ERR("NVRAM config error. "
6916 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6917 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6918 return;
6919 }
6920
34f80b04
EG
6921 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6922 port*0x10);
6923 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6924 break;
6925
6926 case SWITCH_CFG_10G:
6927 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6928
c18487ee
YR
6929 ext_phy_type =
6930 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6931 switch (ext_phy_type) {
6932 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6933 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6934 ext_phy_type);
6935
34f80b04
EG
6936 bp->port.supported |= (SUPPORTED_10baseT_Half |
6937 SUPPORTED_10baseT_Full |
6938 SUPPORTED_100baseT_Half |
6939 SUPPORTED_100baseT_Full |
6940 SUPPORTED_1000baseT_Full |
6941 SUPPORTED_2500baseX_Full |
6942 SUPPORTED_10000baseT_Full |
6943 SUPPORTED_TP |
6944 SUPPORTED_FIBRE |
6945 SUPPORTED_Autoneg |
6946 SUPPORTED_Pause |
6947 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6948 break;
6949
6950 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 6951 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 6952 ext_phy_type);
f1410647 6953
34f80b04
EG
6954 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6955 SUPPORTED_FIBRE |
6956 SUPPORTED_Pause |
6957 SUPPORTED_Asym_Pause);
f1410647
ET
6958 break;
6959
a2fbb9ea 6960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
6961 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6962 ext_phy_type);
6963
34f80b04
EG
6964 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6965 SUPPORTED_1000baseT_Full |
6966 SUPPORTED_FIBRE |
6967 SUPPORTED_Pause |
6968 SUPPORTED_Asym_Pause);
f1410647
ET
6969 break;
6970
6971 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6972 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
6973 ext_phy_type);
6974
34f80b04
EG
6975 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6976 SUPPORTED_1000baseT_Full |
6977 SUPPORTED_FIBRE |
6978 SUPPORTED_Autoneg |
6979 SUPPORTED_Pause |
6980 SUPPORTED_Asym_Pause);
f1410647
ET
6981 break;
6982
c18487ee
YR
6983 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6984 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6985 ext_phy_type);
6986
34f80b04
EG
6987 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6988 SUPPORTED_2500baseX_Full |
6989 SUPPORTED_1000baseT_Full |
6990 SUPPORTED_FIBRE |
6991 SUPPORTED_Autoneg |
6992 SUPPORTED_Pause |
6993 SUPPORTED_Asym_Pause);
c18487ee
YR
6994 break;
6995
f1410647
ET
6996 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6997 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6998 ext_phy_type);
6999
34f80b04
EG
7000 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7001 SUPPORTED_TP |
7002 SUPPORTED_Autoneg |
7003 SUPPORTED_Pause |
7004 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7005 break;
7006
c18487ee
YR
7007 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7008 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7009 bp->link_params.ext_phy_config);
7010 break;
7011
a2fbb9ea
ET
7012 default:
7013 BNX2X_ERR("NVRAM config error. "
7014 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7015 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7016 return;
7017 }
7018
34f80b04
EG
7019 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7020 port*0x18);
7021 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7022
a2fbb9ea
ET
7023 break;
7024
7025 default:
7026 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7027 bp->port.link_config);
a2fbb9ea
ET
7028 return;
7029 }
34f80b04 7030 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7031
7032 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7033 if (!(bp->link_params.speed_cap_mask &
7034 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7035 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7036
c18487ee
YR
7037 if (!(bp->link_params.speed_cap_mask &
7038 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7039 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7040
c18487ee
YR
7041 if (!(bp->link_params.speed_cap_mask &
7042 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7043 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7044
c18487ee
YR
7045 if (!(bp->link_params.speed_cap_mask &
7046 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7047 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7048
c18487ee
YR
7049 if (!(bp->link_params.speed_cap_mask &
7050 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7051 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7052 SUPPORTED_1000baseT_Full);
a2fbb9ea 7053
c18487ee
YR
7054 if (!(bp->link_params.speed_cap_mask &
7055 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7056 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7057
c18487ee
YR
7058 if (!(bp->link_params.speed_cap_mask &
7059 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7060 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7061
34f80b04 7062 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7063}
7064
34f80b04 7065static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7066{
c18487ee 7067 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7068
34f80b04 7069 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7070 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7071 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7072 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7073 bp->port.advertising = bp->port.supported;
a2fbb9ea 7074 } else {
c18487ee
YR
7075 u32 ext_phy_type =
7076 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7077
7078 if ((ext_phy_type ==
7079 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7080 (ext_phy_type ==
7081 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7082 /* force 10G, no AN */
c18487ee 7083 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7084 bp->port.advertising =
a2fbb9ea
ET
7085 (ADVERTISED_10000baseT_Full |
7086 ADVERTISED_FIBRE);
7087 break;
7088 }
7089 BNX2X_ERR("NVRAM config error. "
7090 "Invalid link_config 0x%x"
7091 " Autoneg not supported\n",
34f80b04 7092 bp->port.link_config);
a2fbb9ea
ET
7093 return;
7094 }
7095 break;
7096
7097 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7098 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7099 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7100 bp->port.advertising = (ADVERTISED_10baseT_Full |
7101 ADVERTISED_TP);
a2fbb9ea
ET
7102 } else {
7103 BNX2X_ERR("NVRAM config error. "
7104 "Invalid link_config 0x%x"
7105 " speed_cap_mask 0x%x\n",
34f80b04 7106 bp->port.link_config,
c18487ee 7107 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7108 return;
7109 }
7110 break;
7111
7112 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7113 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7114 bp->link_params.req_line_speed = SPEED_10;
7115 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7116 bp->port.advertising = (ADVERTISED_10baseT_Half |
7117 ADVERTISED_TP);
a2fbb9ea
ET
7118 } else {
7119 BNX2X_ERR("NVRAM config error. "
7120 "Invalid link_config 0x%x"
7121 " speed_cap_mask 0x%x\n",
34f80b04 7122 bp->port.link_config,
c18487ee 7123 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7124 return;
7125 }
7126 break;
7127
7128 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7129 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7130 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7131 bp->port.advertising = (ADVERTISED_100baseT_Full |
7132 ADVERTISED_TP);
a2fbb9ea
ET
7133 } else {
7134 BNX2X_ERR("NVRAM config error. "
7135 "Invalid link_config 0x%x"
7136 " speed_cap_mask 0x%x\n",
34f80b04 7137 bp->port.link_config,
c18487ee 7138 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7139 return;
7140 }
7141 break;
7142
7143 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7144 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7145 bp->link_params.req_line_speed = SPEED_100;
7146 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7147 bp->port.advertising = (ADVERTISED_100baseT_Half |
7148 ADVERTISED_TP);
a2fbb9ea
ET
7149 } else {
7150 BNX2X_ERR("NVRAM config error. "
7151 "Invalid link_config 0x%x"
7152 " speed_cap_mask 0x%x\n",
34f80b04 7153 bp->port.link_config,
c18487ee 7154 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7155 return;
7156 }
7157 break;
7158
7159 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7160 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7161 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7162 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7163 ADVERTISED_TP);
a2fbb9ea
ET
7164 } else {
7165 BNX2X_ERR("NVRAM config error. "
7166 "Invalid link_config 0x%x"
7167 " speed_cap_mask 0x%x\n",
34f80b04 7168 bp->port.link_config,
c18487ee 7169 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7170 return;
7171 }
7172 break;
7173
7174 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7175 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7176 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7177 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7178 ADVERTISED_TP);
a2fbb9ea
ET
7179 } else {
7180 BNX2X_ERR("NVRAM config error. "
7181 "Invalid link_config 0x%x"
7182 " speed_cap_mask 0x%x\n",
34f80b04 7183 bp->port.link_config,
c18487ee 7184 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7185 return;
7186 }
7187 break;
7188
7189 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7190 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7191 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7192 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7193 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7194 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7195 ADVERTISED_FIBRE);
a2fbb9ea
ET
7196 } else {
7197 BNX2X_ERR("NVRAM config error. "
7198 "Invalid link_config 0x%x"
7199 " speed_cap_mask 0x%x\n",
34f80b04 7200 bp->port.link_config,
c18487ee 7201 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7202 return;
7203 }
7204 break;
7205
7206 default:
7207 BNX2X_ERR("NVRAM config error. "
7208 "BAD link speed link_config 0x%x\n",
34f80b04 7209 bp->port.link_config);
c18487ee 7210 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7211 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7212 break;
7213 }
a2fbb9ea 7214
34f80b04
EG
7215 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7216 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7217 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7218 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7219 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7220
c18487ee 7221 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7222 " advertising 0x%x\n",
c18487ee
YR
7223 bp->link_params.req_line_speed,
7224 bp->link_params.req_duplex,
34f80b04 7225 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7226}
7227
34f80b04 7228static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7229{
34f80b04
EG
7230 int port = BP_PORT(bp);
7231 u32 val, val2;
a2fbb9ea 7232
c18487ee 7233 bp->link_params.bp = bp;
34f80b04 7234 bp->link_params.port = port;
c18487ee 7235
c18487ee 7236 bp->link_params.serdes_config =
f1410647 7237 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7238 bp->link_params.lane_config =
a2fbb9ea 7239 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7240 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7241 SHMEM_RD(bp,
7242 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7243 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7244 SHMEM_RD(bp,
7245 dev_info.port_hw_config[port].speed_capability_mask);
7246
34f80b04 7247 bp->port.link_config =
a2fbb9ea
ET
7248 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7249
34f80b04
EG
7250 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7251 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7252 " link_config 0x%08x\n",
c18487ee
YR
7253 bp->link_params.serdes_config,
7254 bp->link_params.lane_config,
7255 bp->link_params.ext_phy_config,
34f80b04 7256 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7257
34f80b04 7258 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7259 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7260 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7261
7262 bnx2x_link_settings_requested(bp);
7263
7264 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7265 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7266 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7267 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7268 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7269 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7270 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7271 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7272 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7273 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7274}
7275
7276static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7277{
7278 int func = BP_FUNC(bp);
7279 u32 val, val2;
7280 int rc = 0;
a2fbb9ea 7281
34f80b04 7282 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7283
34f80b04
EG
7284 bp->e1hov = 0;
7285 bp->e1hmf = 0;
7286 if (CHIP_IS_E1H(bp)) {
7287 bp->mf_config =
7288 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7289
34f80b04
EG
7290 val =
7291 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7292 FUNC_MF_CFG_E1HOV_TAG_MASK);
7293 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7294
34f80b04
EG
7295 bp->e1hov = val;
7296 bp->e1hmf = 1;
7297 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7298 "(0x%04x)\n",
7299 func, bp->e1hov, bp->e1hov);
7300 } else {
7301 BNX2X_DEV_INFO("Single function mode\n");
7302 if (BP_E1HVN(bp)) {
7303 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7304 " aborting\n", func);
7305 rc = -EPERM;
7306 }
7307 }
7308 }
a2fbb9ea 7309
34f80b04
EG
7310 if (!BP_NOMCP(bp)) {
7311 bnx2x_get_port_hwinfo(bp);
7312
7313 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7314 DRV_MSG_SEQ_NUMBER_MASK);
7315 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7316 }
7317
7318 if (IS_E1HMF(bp)) {
7319 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7320 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7321 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7322 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7323 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7324 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7325 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7326 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7327 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7328 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7329 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7330 ETH_ALEN);
7331 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7332 ETH_ALEN);
a2fbb9ea 7333 }
34f80b04
EG
7334
7335 return rc;
a2fbb9ea
ET
7336 }
7337
34f80b04
EG
7338 if (BP_NOMCP(bp)) {
7339 /* only supposed to happen on emulation/FPGA */
7340 BNX2X_ERR("warning rendom MAC workaround active\n");
7341 random_ether_addr(bp->dev->dev_addr);
7342 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7343 }
a2fbb9ea 7344
34f80b04
EG
7345 return rc;
7346}
7347
7348static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7349{
7350 int func = BP_FUNC(bp);
7351 int rc;
7352
34f80b04 7353 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7354
34f80b04
EG
7355 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7356 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7357
7358 rc = bnx2x_get_hwinfo(bp);
7359
7360 /* need to reset chip if undi was active */
7361 if (!BP_NOMCP(bp))
7362 bnx2x_undi_unload(bp);
7363
7364 if (CHIP_REV_IS_FPGA(bp))
7365 printk(KERN_ERR PFX "FPGA detected\n");
7366
7367 if (BP_NOMCP(bp) && (func == 0))
7368 printk(KERN_ERR PFX
7369 "MCP disabled, must load devices in order!\n");
7370
7a9b2557
VZ
7371 /* Set TPA flags */
7372 if (disable_tpa) {
7373 bp->flags &= ~TPA_ENABLE_FLAG;
7374 bp->dev->features &= ~NETIF_F_LRO;
7375 } else {
7376 bp->flags |= TPA_ENABLE_FLAG;
7377 bp->dev->features |= NETIF_F_LRO;
7378 }
7379
7380
34f80b04
EG
7381 bp->tx_ring_size = MAX_TX_AVAIL;
7382 bp->rx_ring_size = MAX_RX_AVAIL;
7383
7384 bp->rx_csum = 1;
7385 bp->rx_offset = 0;
7386
7387 bp->tx_ticks = 50;
7388 bp->rx_ticks = 25;
7389
7390 bp->stats_ticks = 1000000 & 0xffff00;
7391
7392 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7393 bp->current_interval = (poll ? poll : bp->timer_interval);
7394
7395 init_timer(&bp->timer);
7396 bp->timer.expires = jiffies + bp->current_interval;
7397 bp->timer.data = (unsigned long) bp;
7398 bp->timer.function = bnx2x_timer;
7399
7400 return rc;
a2fbb9ea
ET
7401}
7402
7403/*
7404 * ethtool service functions
7405 */
7406
7407/* All ethtool functions called with rtnl_lock */
7408
7409static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7410{
7411 struct bnx2x *bp = netdev_priv(dev);
7412
34f80b04
EG
7413 cmd->supported = bp->port.supported;
7414 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7415
7416 if (netif_carrier_ok(dev)) {
c18487ee
YR
7417 cmd->speed = bp->link_vars.line_speed;
7418 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7419 } else {
c18487ee
YR
7420 cmd->speed = bp->link_params.req_line_speed;
7421 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7422 }
34f80b04
EG
7423 if (IS_E1HMF(bp)) {
7424 u16 vn_max_rate;
7425
7426 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7427 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7428 if (vn_max_rate < cmd->speed)
7429 cmd->speed = vn_max_rate;
7430 }
a2fbb9ea 7431
c18487ee
YR
7432 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7433 u32 ext_phy_type =
7434 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7435
7436 switch (ext_phy_type) {
7437 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7438 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7440 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7441 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7442 cmd->port = PORT_FIBRE;
7443 break;
7444
7445 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7446 cmd->port = PORT_TP;
7447 break;
7448
c18487ee
YR
7449 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7450 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7451 bp->link_params.ext_phy_config);
7452 break;
7453
f1410647
ET
7454 default:
7455 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7456 bp->link_params.ext_phy_config);
7457 break;
f1410647
ET
7458 }
7459 } else
a2fbb9ea 7460 cmd->port = PORT_TP;
a2fbb9ea 7461
34f80b04 7462 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7463 cmd->transceiver = XCVR_INTERNAL;
7464
c18487ee 7465 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7466 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7467 else
a2fbb9ea 7468 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7469
7470 cmd->maxtxpkt = 0;
7471 cmd->maxrxpkt = 0;
7472
7473 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7474 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7475 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7476 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7477 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7478 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7479 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7480
7481 return 0;
7482}
7483
7484static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7485{
7486 struct bnx2x *bp = netdev_priv(dev);
7487 u32 advertising;
7488
34f80b04
EG
7489 if (IS_E1HMF(bp))
7490 return 0;
7491
a2fbb9ea
ET
7492 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7493 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7494 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7495 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7496 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7497 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7498 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7499
a2fbb9ea 7500 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7501 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7502 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7503 return -EINVAL;
f1410647 7504 }
a2fbb9ea
ET
7505
7506 /* advertise the requested speed and duplex if supported */
34f80b04 7507 cmd->advertising &= bp->port.supported;
a2fbb9ea 7508
c18487ee
YR
7509 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7510 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7511 bp->port.advertising |= (ADVERTISED_Autoneg |
7512 cmd->advertising);
a2fbb9ea
ET
7513
7514 } else { /* forced speed */
7515 /* advertise the requested speed and duplex if supported */
7516 switch (cmd->speed) {
7517 case SPEED_10:
7518 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7519 if (!(bp->port.supported &
f1410647
ET
7520 SUPPORTED_10baseT_Full)) {
7521 DP(NETIF_MSG_LINK,
7522 "10M full not supported\n");
a2fbb9ea 7523 return -EINVAL;
f1410647 7524 }
a2fbb9ea
ET
7525
7526 advertising = (ADVERTISED_10baseT_Full |
7527 ADVERTISED_TP);
7528 } else {
34f80b04 7529 if (!(bp->port.supported &
f1410647
ET
7530 SUPPORTED_10baseT_Half)) {
7531 DP(NETIF_MSG_LINK,
7532 "10M half not supported\n");
a2fbb9ea 7533 return -EINVAL;
f1410647 7534 }
a2fbb9ea
ET
7535
7536 advertising = (ADVERTISED_10baseT_Half |
7537 ADVERTISED_TP);
7538 }
7539 break;
7540
7541 case SPEED_100:
7542 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7543 if (!(bp->port.supported &
f1410647
ET
7544 SUPPORTED_100baseT_Full)) {
7545 DP(NETIF_MSG_LINK,
7546 "100M full not supported\n");
a2fbb9ea 7547 return -EINVAL;
f1410647 7548 }
a2fbb9ea
ET
7549
7550 advertising = (ADVERTISED_100baseT_Full |
7551 ADVERTISED_TP);
7552 } else {
34f80b04 7553 if (!(bp->port.supported &
f1410647
ET
7554 SUPPORTED_100baseT_Half)) {
7555 DP(NETIF_MSG_LINK,
7556 "100M half not supported\n");
a2fbb9ea 7557 return -EINVAL;
f1410647 7558 }
a2fbb9ea
ET
7559
7560 advertising = (ADVERTISED_100baseT_Half |
7561 ADVERTISED_TP);
7562 }
7563 break;
7564
7565 case SPEED_1000:
f1410647
ET
7566 if (cmd->duplex != DUPLEX_FULL) {
7567 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7568 return -EINVAL;
f1410647 7569 }
a2fbb9ea 7570
34f80b04 7571 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7572 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7573 return -EINVAL;
f1410647 7574 }
a2fbb9ea
ET
7575
7576 advertising = (ADVERTISED_1000baseT_Full |
7577 ADVERTISED_TP);
7578 break;
7579
7580 case SPEED_2500:
f1410647
ET
7581 if (cmd->duplex != DUPLEX_FULL) {
7582 DP(NETIF_MSG_LINK,
7583 "2.5G half not supported\n");
a2fbb9ea 7584 return -EINVAL;
f1410647 7585 }
a2fbb9ea 7586
34f80b04 7587 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7588 DP(NETIF_MSG_LINK,
7589 "2.5G full not supported\n");
a2fbb9ea 7590 return -EINVAL;
f1410647 7591 }
a2fbb9ea 7592
f1410647 7593 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7594 ADVERTISED_TP);
7595 break;
7596
7597 case SPEED_10000:
f1410647
ET
7598 if (cmd->duplex != DUPLEX_FULL) {
7599 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7600 return -EINVAL;
f1410647 7601 }
a2fbb9ea 7602
34f80b04 7603 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7604 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7605 return -EINVAL;
f1410647 7606 }
a2fbb9ea
ET
7607
7608 advertising = (ADVERTISED_10000baseT_Full |
7609 ADVERTISED_FIBRE);
7610 break;
7611
7612 default:
f1410647 7613 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7614 return -EINVAL;
7615 }
7616
c18487ee
YR
7617 bp->link_params.req_line_speed = cmd->speed;
7618 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7619 bp->port.advertising = advertising;
a2fbb9ea
ET
7620 }
7621
c18487ee 7622 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7623 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7624 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7625 bp->port.advertising);
a2fbb9ea 7626
34f80b04 7627 if (netif_running(dev)) {
bb2a0f7a 7628 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7629 bnx2x_link_set(bp);
7630 }
a2fbb9ea
ET
7631
7632 return 0;
7633}
7634
c18487ee
YR
7635#define PHY_FW_VER_LEN 10
7636
a2fbb9ea
ET
7637static void bnx2x_get_drvinfo(struct net_device *dev,
7638 struct ethtool_drvinfo *info)
7639{
7640 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7641 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7642
7643 strcpy(info->driver, DRV_MODULE_NAME);
7644 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7645
7646 phy_fw_ver[0] = '\0';
34f80b04
EG
7647 if (bp->port.pmf) {
7648 bnx2x_phy_hw_lock(bp);
7649 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7650 (bp->state != BNX2X_STATE_CLOSED),
7651 phy_fw_ver, PHY_FW_VER_LEN);
7652 bnx2x_phy_hw_unlock(bp);
7653 }
c18487ee
YR
7654
7655 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7656 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7657 BCM_5710_FW_REVISION_VERSION,
34f80b04 7658 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7659 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7660 strcpy(info->bus_info, pci_name(bp->pdev));
7661 info->n_stats = BNX2X_NUM_STATS;
7662 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7663 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7664 info->regdump_len = 0;
7665}
7666
7667static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7668{
7669 struct bnx2x *bp = netdev_priv(dev);
7670
7671 if (bp->flags & NO_WOL_FLAG) {
7672 wol->supported = 0;
7673 wol->wolopts = 0;
7674 } else {
7675 wol->supported = WAKE_MAGIC;
7676 if (bp->wol)
7677 wol->wolopts = WAKE_MAGIC;
7678 else
7679 wol->wolopts = 0;
7680 }
7681 memset(&wol->sopass, 0, sizeof(wol->sopass));
7682}
7683
7684static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7685{
7686 struct bnx2x *bp = netdev_priv(dev);
7687
7688 if (wol->wolopts & ~WAKE_MAGIC)
7689 return -EINVAL;
7690
7691 if (wol->wolopts & WAKE_MAGIC) {
7692 if (bp->flags & NO_WOL_FLAG)
7693 return -EINVAL;
7694
7695 bp->wol = 1;
34f80b04 7696 } else
a2fbb9ea 7697 bp->wol = 0;
34f80b04 7698
a2fbb9ea
ET
7699 return 0;
7700}
7701
7702static u32 bnx2x_get_msglevel(struct net_device *dev)
7703{
7704 struct bnx2x *bp = netdev_priv(dev);
7705
7706 return bp->msglevel;
7707}
7708
7709static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7710{
7711 struct bnx2x *bp = netdev_priv(dev);
7712
7713 if (capable(CAP_NET_ADMIN))
7714 bp->msglevel = level;
7715}
7716
7717static int bnx2x_nway_reset(struct net_device *dev)
7718{
7719 struct bnx2x *bp = netdev_priv(dev);
7720
34f80b04
EG
7721 if (!bp->port.pmf)
7722 return 0;
a2fbb9ea 7723
34f80b04 7724 if (netif_running(dev)) {
bb2a0f7a 7725 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7726 bnx2x_link_set(bp);
7727 }
a2fbb9ea
ET
7728
7729 return 0;
7730}
7731
7732static int bnx2x_get_eeprom_len(struct net_device *dev)
7733{
7734 struct bnx2x *bp = netdev_priv(dev);
7735
34f80b04 7736 return bp->common.flash_size;
a2fbb9ea
ET
7737}
7738
7739static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7740{
34f80b04 7741 int port = BP_PORT(bp);
a2fbb9ea
ET
7742 int count, i;
7743 u32 val = 0;
7744
7745 /* adjust timeout for emulation/FPGA */
7746 count = NVRAM_TIMEOUT_COUNT;
7747 if (CHIP_REV_IS_SLOW(bp))
7748 count *= 100;
7749
7750 /* request access to nvram interface */
7751 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7752 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7753
7754 for (i = 0; i < count*10; i++) {
7755 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7756 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7757 break;
7758
7759 udelay(5);
7760 }
7761
7762 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7763 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7764 return -EBUSY;
7765 }
7766
7767 return 0;
7768}
7769
7770static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7771{
34f80b04 7772 int port = BP_PORT(bp);
a2fbb9ea
ET
7773 int count, i;
7774 u32 val = 0;
7775
7776 /* adjust timeout for emulation/FPGA */
7777 count = NVRAM_TIMEOUT_COUNT;
7778 if (CHIP_REV_IS_SLOW(bp))
7779 count *= 100;
7780
7781 /* relinquish nvram interface */
7782 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7783 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7784
7785 for (i = 0; i < count*10; i++) {
7786 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7787 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7788 break;
7789
7790 udelay(5);
7791 }
7792
7793 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7794 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7795 return -EBUSY;
7796 }
7797
7798 return 0;
7799}
7800
7801static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7802{
7803 u32 val;
7804
7805 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7806
7807 /* enable both bits, even on read */
7808 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7809 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7810 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7811}
7812
7813static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7814{
7815 u32 val;
7816
7817 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7818
7819 /* disable both bits, even after read */
7820 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7821 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7822 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7823}
7824
7825static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7826 u32 cmd_flags)
7827{
f1410647 7828 int count, i, rc;
a2fbb9ea
ET
7829 u32 val;
7830
7831 /* build the command word */
7832 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7833
7834 /* need to clear DONE bit separately */
7835 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7836
7837 /* address of the NVRAM to read from */
7838 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7839 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7840
7841 /* issue a read command */
7842 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7843
7844 /* adjust timeout for emulation/FPGA */
7845 count = NVRAM_TIMEOUT_COUNT;
7846 if (CHIP_REV_IS_SLOW(bp))
7847 count *= 100;
7848
7849 /* wait for completion */
7850 *ret_val = 0;
7851 rc = -EBUSY;
7852 for (i = 0; i < count; i++) {
7853 udelay(5);
7854 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7855
7856 if (val & MCPR_NVM_COMMAND_DONE) {
7857 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7858 /* we read nvram data in cpu order
7859 * but ethtool sees it as an array of bytes
7860 * converting to big-endian will do the work */
7861 val = cpu_to_be32(val);
7862 *ret_val = val;
7863 rc = 0;
7864 break;
7865 }
7866 }
7867
7868 return rc;
7869}
7870
7871static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7872 int buf_size)
7873{
7874 int rc;
7875 u32 cmd_flags;
7876 u32 val;
7877
7878 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7879 DP(BNX2X_MSG_NVM,
c14423fe 7880 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7881 offset, buf_size);
7882 return -EINVAL;
7883 }
7884
34f80b04
EG
7885 if (offset + buf_size > bp->common.flash_size) {
7886 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7887 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7888 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7889 return -EINVAL;
7890 }
7891
7892 /* request access to nvram interface */
7893 rc = bnx2x_acquire_nvram_lock(bp);
7894 if (rc)
7895 return rc;
7896
7897 /* enable access to nvram interface */
7898 bnx2x_enable_nvram_access(bp);
7899
7900 /* read the first word(s) */
7901 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7902 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7903 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7904 memcpy(ret_buf, &val, 4);
7905
7906 /* advance to the next dword */
7907 offset += sizeof(u32);
7908 ret_buf += sizeof(u32);
7909 buf_size -= sizeof(u32);
7910 cmd_flags = 0;
7911 }
7912
7913 if (rc == 0) {
7914 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7915 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7916 memcpy(ret_buf, &val, 4);
7917 }
7918
7919 /* disable access to nvram interface */
7920 bnx2x_disable_nvram_access(bp);
7921 bnx2x_release_nvram_lock(bp);
7922
7923 return rc;
7924}
7925
7926static int bnx2x_get_eeprom(struct net_device *dev,
7927 struct ethtool_eeprom *eeprom, u8 *eebuf)
7928{
7929 struct bnx2x *bp = netdev_priv(dev);
7930 int rc;
7931
34f80b04 7932 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
7933 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7934 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7935 eeprom->len, eeprom->len);
7936
7937 /* parameters already validated in ethtool_get_eeprom */
7938
7939 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7940
7941 return rc;
7942}
7943
7944static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7945 u32 cmd_flags)
7946{
f1410647 7947 int count, i, rc;
a2fbb9ea
ET
7948
7949 /* build the command word */
7950 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7951
7952 /* need to clear DONE bit separately */
7953 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7954
7955 /* write the data */
7956 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7957
7958 /* address of the NVRAM to write to */
7959 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7960 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7961
7962 /* issue the write command */
7963 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7964
7965 /* adjust timeout for emulation/FPGA */
7966 count = NVRAM_TIMEOUT_COUNT;
7967 if (CHIP_REV_IS_SLOW(bp))
7968 count *= 100;
7969
7970 /* wait for completion */
7971 rc = -EBUSY;
7972 for (i = 0; i < count; i++) {
7973 udelay(5);
7974 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7975 if (val & MCPR_NVM_COMMAND_DONE) {
7976 rc = 0;
7977 break;
7978 }
7979 }
7980
7981 return rc;
7982}
7983
f1410647 7984#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
7985
7986static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7987 int buf_size)
7988{
7989 int rc;
7990 u32 cmd_flags;
7991 u32 align_offset;
7992 u32 val;
7993
34f80b04
EG
7994 if (offset + buf_size > bp->common.flash_size) {
7995 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7996 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7997 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7998 return -EINVAL;
7999 }
8000
8001 /* request access to nvram interface */
8002 rc = bnx2x_acquire_nvram_lock(bp);
8003 if (rc)
8004 return rc;
8005
8006 /* enable access to nvram interface */
8007 bnx2x_enable_nvram_access(bp);
8008
8009 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8010 align_offset = (offset & ~0x03);
8011 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8012
8013 if (rc == 0) {
8014 val &= ~(0xff << BYTE_OFFSET(offset));
8015 val |= (*data_buf << BYTE_OFFSET(offset));
8016
8017 /* nvram data is returned as an array of bytes
8018 * convert it back to cpu order */
8019 val = be32_to_cpu(val);
8020
a2fbb9ea
ET
8021 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8022 cmd_flags);
8023 }
8024
8025 /* disable access to nvram interface */
8026 bnx2x_disable_nvram_access(bp);
8027 bnx2x_release_nvram_lock(bp);
8028
8029 return rc;
8030}
8031
8032static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8033 int buf_size)
8034{
8035 int rc;
8036 u32 cmd_flags;
8037 u32 val;
8038 u32 written_so_far;
8039
34f80b04 8040 if (buf_size == 1) /* ethtool */
a2fbb9ea 8041 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8042
8043 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8044 DP(BNX2X_MSG_NVM,
c14423fe 8045 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8046 offset, buf_size);
8047 return -EINVAL;
8048 }
8049
34f80b04
EG
8050 if (offset + buf_size > bp->common.flash_size) {
8051 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8052 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8053 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8054 return -EINVAL;
8055 }
8056
8057 /* request access to nvram interface */
8058 rc = bnx2x_acquire_nvram_lock(bp);
8059 if (rc)
8060 return rc;
8061
8062 /* enable access to nvram interface */
8063 bnx2x_enable_nvram_access(bp);
8064
8065 written_so_far = 0;
8066 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8067 while ((written_so_far < buf_size) && (rc == 0)) {
8068 if (written_so_far == (buf_size - sizeof(u32)))
8069 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8070 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8071 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8072 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8073 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8074
8075 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8076
8077 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8078
8079 /* advance to the next dword */
8080 offset += sizeof(u32);
8081 data_buf += sizeof(u32);
8082 written_so_far += sizeof(u32);
8083 cmd_flags = 0;
8084 }
8085
8086 /* disable access to nvram interface */
8087 bnx2x_disable_nvram_access(bp);
8088 bnx2x_release_nvram_lock(bp);
8089
8090 return rc;
8091}
8092
8093static int bnx2x_set_eeprom(struct net_device *dev,
8094 struct ethtool_eeprom *eeprom, u8 *eebuf)
8095{
8096 struct bnx2x *bp = netdev_priv(dev);
8097 int rc;
8098
34f80b04 8099 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8100 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8101 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8102 eeprom->len, eeprom->len);
8103
8104 /* parameters already validated in ethtool_set_eeprom */
8105
c18487ee 8106 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8107 if (eeprom->magic == 0x00504859)
8108 if (bp->port.pmf) {
8109
8110 bnx2x_phy_hw_lock(bp);
8111 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8112 bp->link_params.ext_phy_config,
8113 (bp->state != BNX2X_STATE_CLOSED),
8114 eebuf, eeprom->len);
bb2a0f7a
YG
8115 if ((bp->state == BNX2X_STATE_OPEN) ||
8116 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8117 rc |= bnx2x_link_reset(&bp->link_params,
8118 &bp->link_vars);
8119 rc |= bnx2x_phy_init(&bp->link_params,
8120 &bp->link_vars);
bb2a0f7a 8121 }
34f80b04
EG
8122 bnx2x_phy_hw_unlock(bp);
8123
8124 } else /* Only the PMF can access the PHY */
8125 return -EINVAL;
8126 else
c18487ee 8127 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8128
8129 return rc;
8130}
8131
8132static int bnx2x_get_coalesce(struct net_device *dev,
8133 struct ethtool_coalesce *coal)
8134{
8135 struct bnx2x *bp = netdev_priv(dev);
8136
8137 memset(coal, 0, sizeof(struct ethtool_coalesce));
8138
8139 coal->rx_coalesce_usecs = bp->rx_ticks;
8140 coal->tx_coalesce_usecs = bp->tx_ticks;
8141 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8142
8143 return 0;
8144}
8145
8146static int bnx2x_set_coalesce(struct net_device *dev,
8147 struct ethtool_coalesce *coal)
8148{
8149 struct bnx2x *bp = netdev_priv(dev);
8150
8151 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8152 if (bp->rx_ticks > 3000)
8153 bp->rx_ticks = 3000;
8154
8155 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8156 if (bp->tx_ticks > 0x3000)
8157 bp->tx_ticks = 0x3000;
8158
8159 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8160 if (bp->stats_ticks > 0xffff00)
8161 bp->stats_ticks = 0xffff00;
8162 bp->stats_ticks &= 0xffff00;
8163
34f80b04 8164 if (netif_running(dev))
a2fbb9ea
ET
8165 bnx2x_update_coalesce(bp);
8166
8167 return 0;
8168}
8169
7a9b2557
VZ
8170static int bnx2x_set_flags(struct net_device *dev, u32 data)
8171{
8172 struct bnx2x *bp = netdev_priv(dev);
8173 int changed = 0;
8174 int rc = 0;
8175
8176 if (data & ETH_FLAG_LRO) {
8177 if (!(dev->features & NETIF_F_LRO)) {
8178 dev->features |= NETIF_F_LRO;
8179 bp->flags |= TPA_ENABLE_FLAG;
8180 changed = 1;
8181 }
8182
8183 } else if (dev->features & NETIF_F_LRO) {
8184 dev->features &= ~NETIF_F_LRO;
8185 bp->flags &= ~TPA_ENABLE_FLAG;
8186 changed = 1;
8187 }
8188
8189 if (changed && netif_running(dev)) {
8190 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8191 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8192 }
8193
8194 return rc;
8195}
8196
a2fbb9ea
ET
8197static void bnx2x_get_ringparam(struct net_device *dev,
8198 struct ethtool_ringparam *ering)
8199{
8200 struct bnx2x *bp = netdev_priv(dev);
8201
8202 ering->rx_max_pending = MAX_RX_AVAIL;
8203 ering->rx_mini_max_pending = 0;
8204 ering->rx_jumbo_max_pending = 0;
8205
8206 ering->rx_pending = bp->rx_ring_size;
8207 ering->rx_mini_pending = 0;
8208 ering->rx_jumbo_pending = 0;
8209
8210 ering->tx_max_pending = MAX_TX_AVAIL;
8211 ering->tx_pending = bp->tx_ring_size;
8212}
8213
8214static int bnx2x_set_ringparam(struct net_device *dev,
8215 struct ethtool_ringparam *ering)
8216{
8217 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8218 int rc = 0;
a2fbb9ea
ET
8219
8220 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8221 (ering->tx_pending > MAX_TX_AVAIL) ||
8222 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8223 return -EINVAL;
8224
8225 bp->rx_ring_size = ering->rx_pending;
8226 bp->tx_ring_size = ering->tx_pending;
8227
34f80b04
EG
8228 if (netif_running(dev)) {
8229 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8230 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8231 }
8232
34f80b04 8233 return rc;
a2fbb9ea
ET
8234}
8235
8236static void bnx2x_get_pauseparam(struct net_device *dev,
8237 struct ethtool_pauseparam *epause)
8238{
8239 struct bnx2x *bp = netdev_priv(dev);
8240
c18487ee
YR
8241 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8242 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8243
8244 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8245 FLOW_CTRL_RX);
8246 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8247 FLOW_CTRL_TX);
a2fbb9ea
ET
8248
8249 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8250 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8251 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8252}
8253
8254static int bnx2x_set_pauseparam(struct net_device *dev,
8255 struct ethtool_pauseparam *epause)
8256{
8257 struct bnx2x *bp = netdev_priv(dev);
8258
34f80b04
EG
8259 if (IS_E1HMF(bp))
8260 return 0;
8261
a2fbb9ea
ET
8262 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8263 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8264 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8265
c18487ee 8266 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8267
f1410647 8268 if (epause->rx_pause)
c18487ee
YR
8269 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8270
f1410647 8271 if (epause->tx_pause)
c18487ee
YR
8272 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8273
8274 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8275 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8276
c18487ee 8277 if (epause->autoneg) {
34f80b04 8278 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
8279 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8280 return -EINVAL;
8281 }
a2fbb9ea 8282
c18487ee
YR
8283 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8284 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8285 }
a2fbb9ea 8286
c18487ee
YR
8287 DP(NETIF_MSG_LINK,
8288 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8289
8290 if (netif_running(dev)) {
bb2a0f7a 8291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8292 bnx2x_link_set(bp);
8293 }
a2fbb9ea
ET
8294
8295 return 0;
8296}
8297
8298static u32 bnx2x_get_rx_csum(struct net_device *dev)
8299{
8300 struct bnx2x *bp = netdev_priv(dev);
8301
8302 return bp->rx_csum;
8303}
8304
8305static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8306{
8307 struct bnx2x *bp = netdev_priv(dev);
8308
8309 bp->rx_csum = data;
8310 return 0;
8311}
8312
8313static int bnx2x_set_tso(struct net_device *dev, u32 data)
8314{
755735eb 8315 if (data) {
a2fbb9ea 8316 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8317 dev->features |= NETIF_F_TSO6;
8318 } else {
a2fbb9ea 8319 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8320 dev->features &= ~NETIF_F_TSO6;
8321 }
8322
a2fbb9ea
ET
8323 return 0;
8324}
8325
f3c87cdd 8326static const struct {
a2fbb9ea
ET
8327 char string[ETH_GSTRING_LEN];
8328} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8329 { "register_test (offline)" },
8330 { "memory_test (offline)" },
8331 { "loopback_test (offline)" },
8332 { "nvram_test (online)" },
8333 { "interrupt_test (online)" },
8334 { "link_test (online)" },
8335 { "idle check (online)" },
8336 { "MC errors (online)" }
a2fbb9ea
ET
8337};
8338
8339static int bnx2x_self_test_count(struct net_device *dev)
8340{
8341 return BNX2X_NUM_TESTS;
8342}
8343
f3c87cdd
YG
8344static int bnx2x_test_registers(struct bnx2x *bp)
8345{
8346 int idx, i, rc = -ENODEV;
8347 u32 wr_val = 0;
8348 static const struct {
8349 u32 offset0;
8350 u32 offset1;
8351 u32 mask;
8352 } reg_tbl[] = {
8353/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8354 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8355 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8356 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8357 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8358 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8359 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8360 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8361 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8362 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8363/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8364 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8365 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8366 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8367 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8368 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8369 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8370 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8371 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8372 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8373/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8374 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8375 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8376 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8377 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8378 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8379 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8380 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8381 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8382 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8383/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8384 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8385 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8386 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8387 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8388 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8389 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8390 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8391
8392 { 0xffffffff, 0, 0x00000000 }
8393 };
8394
8395 if (!netif_running(bp->dev))
8396 return rc;
8397
8398 /* Repeat the test twice:
8399 First by writing 0x00000000, second by writing 0xffffffff */
8400 for (idx = 0; idx < 2; idx++) {
8401
8402 switch (idx) {
8403 case 0:
8404 wr_val = 0;
8405 break;
8406 case 1:
8407 wr_val = 0xffffffff;
8408 break;
8409 }
8410
8411 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8412 u32 offset, mask, save_val, val;
8413 int port = BP_PORT(bp);
8414
8415 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8416 mask = reg_tbl[i].mask;
8417
8418 save_val = REG_RD(bp, offset);
8419
8420 REG_WR(bp, offset, wr_val);
8421 val = REG_RD(bp, offset);
8422
8423 /* Restore the original register's value */
8424 REG_WR(bp, offset, save_val);
8425
8426 /* verify that value is as expected value */
8427 if ((val & mask) != (wr_val & mask))
8428 goto test_reg_exit;
8429 }
8430 }
8431
8432 rc = 0;
8433
8434test_reg_exit:
8435 return rc;
8436}
8437
8438static int bnx2x_test_memory(struct bnx2x *bp)
8439{
8440 int i, j, rc = -ENODEV;
8441 u32 val;
8442 static const struct {
8443 u32 offset;
8444 int size;
8445 } mem_tbl[] = {
8446 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8447 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8448 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8449 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8450 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8451 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8452 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8453
8454 { 0xffffffff, 0 }
8455 };
8456 static const struct {
8457 char *name;
8458 u32 offset;
8459 u32 mask;
8460 } prty_tbl[] = {
8461 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8462 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8463 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8464 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8465 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8466 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8467
8468 { NULL, 0xffffffff, 0 }
8469 };
8470
8471 if (!netif_running(bp->dev))
8472 return rc;
8473
8474 /* Go through all the memories */
8475 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8476 for (j = 0; j < mem_tbl[i].size; j++)
8477 REG_RD(bp, mem_tbl[i].offset + j*4);
8478
8479 /* Check the parity status */
8480 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8481 val = REG_RD(bp, prty_tbl[i].offset);
8482 if (val & ~(prty_tbl[i].mask)) {
8483 DP(NETIF_MSG_HW,
8484 "%s is 0x%x\n", prty_tbl[i].name, val);
8485 goto test_mem_exit;
8486 }
8487 }
8488
8489 rc = 0;
8490
8491test_mem_exit:
8492 return rc;
8493}
8494
8495static void bnx2x_netif_start(struct bnx2x *bp)
8496{
8497 int i;
8498
8499 if (atomic_dec_and_test(&bp->intr_sem)) {
8500 if (netif_running(bp->dev)) {
8501 bnx2x_int_enable(bp);
8502 for_each_queue(bp, i)
8503 napi_enable(&bnx2x_fp(bp, i, napi));
8504 if (bp->state == BNX2X_STATE_OPEN)
8505 netif_wake_queue(bp->dev);
8506 }
8507 }
8508}
8509
8510static void bnx2x_netif_stop(struct bnx2x *bp)
8511{
8512 int i;
8513
8514 if (netif_running(bp->dev)) {
8515 netif_tx_disable(bp->dev);
8516 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8517 for_each_queue(bp, i)
8518 napi_disable(&bnx2x_fp(bp, i, napi));
8519 }
8520 bnx2x_int_disable_sync(bp);
8521}
8522
8523static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8524{
8525 int cnt = 1000;
8526
8527 if (link_up)
8528 while (bnx2x_link_test(bp) && cnt--)
8529 msleep(10);
8530}
8531
8532static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8533{
8534 unsigned int pkt_size, num_pkts, i;
8535 struct sk_buff *skb;
8536 unsigned char *packet;
8537 struct bnx2x_fastpath *fp = &bp->fp[0];
8538 u16 tx_start_idx, tx_idx;
8539 u16 rx_start_idx, rx_idx;
8540 u16 pkt_prod;
8541 struct sw_tx_bd *tx_buf;
8542 struct eth_tx_bd *tx_bd;
8543 dma_addr_t mapping;
8544 union eth_rx_cqe *cqe;
8545 u8 cqe_fp_flags;
8546 struct sw_rx_bd *rx_buf;
8547 u16 len;
8548 int rc = -ENODEV;
8549
8550 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8551 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8552 bnx2x_phy_hw_lock(bp);
8553 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8554 bnx2x_phy_hw_unlock(bp);
8555
8556 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8557 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8558 bnx2x_phy_hw_lock(bp);
8559 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8560 bnx2x_phy_hw_unlock(bp);
8561 /* wait until link state is restored */
8562 bnx2x_wait_for_link(bp, link_up);
8563
8564 } else
8565 return -EINVAL;
8566
8567 pkt_size = 1514;
8568 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8569 if (!skb) {
8570 rc = -ENOMEM;
8571 goto test_loopback_exit;
8572 }
8573 packet = skb_put(skb, pkt_size);
8574 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8575 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8576 for (i = ETH_HLEN; i < pkt_size; i++)
8577 packet[i] = (unsigned char) (i & 0xff);
8578
8579 num_pkts = 0;
8580 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8581 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8582
8583 pkt_prod = fp->tx_pkt_prod++;
8584 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8585 tx_buf->first_bd = fp->tx_bd_prod;
8586 tx_buf->skb = skb;
8587
8588 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8589 mapping = pci_map_single(bp->pdev, skb->data,
8590 skb_headlen(skb), PCI_DMA_TODEVICE);
8591 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8592 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8593 tx_bd->nbd = cpu_to_le16(1);
8594 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8595 tx_bd->vlan = cpu_to_le16(pkt_prod);
8596 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8597 ETH_TX_BD_FLAGS_END_BD);
8598 tx_bd->general_data = ((UNICAST_ADDRESS <<
8599 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8600
8601 fp->hw_tx_prods->bds_prod =
8602 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8603 mb(); /* FW restriction: must not reorder writing nbd and packets */
8604 fp->hw_tx_prods->packets_prod =
8605 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8606 DOORBELL(bp, FP_IDX(fp), 0);
8607
8608 mmiowb();
8609
8610 num_pkts++;
8611 fp->tx_bd_prod++;
8612 bp->dev->trans_start = jiffies;
8613
8614 udelay(100);
8615
8616 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8617 if (tx_idx != tx_start_idx + num_pkts)
8618 goto test_loopback_exit;
8619
8620 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8621 if (rx_idx != rx_start_idx + num_pkts)
8622 goto test_loopback_exit;
8623
8624 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8625 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8626 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8627 goto test_loopback_rx_exit;
8628
8629 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8630 if (len != pkt_size)
8631 goto test_loopback_rx_exit;
8632
8633 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8634 skb = rx_buf->skb;
8635 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8636 for (i = ETH_HLEN; i < pkt_size; i++)
8637 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8638 goto test_loopback_rx_exit;
8639
8640 rc = 0;
8641
8642test_loopback_rx_exit:
8643 bp->dev->last_rx = jiffies;
8644
8645 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8646 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8647 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8648 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8649
8650 /* Update producers */
8651 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8652 fp->rx_sge_prod);
8653 mmiowb(); /* keep prod updates ordered */
8654
8655test_loopback_exit:
8656 bp->link_params.loopback_mode = LOOPBACK_NONE;
8657
8658 return rc;
8659}
8660
8661static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8662{
8663 int rc = 0;
8664
8665 if (!netif_running(bp->dev))
8666 return BNX2X_LOOPBACK_FAILED;
8667
8668 bnx2x_netif_stop(bp);
8669
8670 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8671 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8672 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8673 }
8674
8675 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8676 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8677 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8678 }
8679
8680 bnx2x_netif_start(bp);
8681
8682 return rc;
8683}
8684
8685#define CRC32_RESIDUAL 0xdebb20e3
8686
8687static int bnx2x_test_nvram(struct bnx2x *bp)
8688{
8689 static const struct {
8690 int offset;
8691 int size;
8692 } nvram_tbl[] = {
8693 { 0, 0x14 }, /* bootstrap */
8694 { 0x14, 0xec }, /* dir */
8695 { 0x100, 0x350 }, /* manuf_info */
8696 { 0x450, 0xf0 }, /* feature_info */
8697 { 0x640, 0x64 }, /* upgrade_key_info */
8698 { 0x6a4, 0x64 },
8699 { 0x708, 0x70 }, /* manuf_key_info */
8700 { 0x778, 0x70 },
8701 { 0, 0 }
8702 };
8703 u32 buf[0x350 / 4];
8704 u8 *data = (u8 *)buf;
8705 int i, rc;
8706 u32 magic, csum;
8707
8708 rc = bnx2x_nvram_read(bp, 0, data, 4);
8709 if (rc) {
8710 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8711 goto test_nvram_exit;
8712 }
8713
8714 magic = be32_to_cpu(buf[0]);
8715 if (magic != 0x669955aa) {
8716 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8717 rc = -ENODEV;
8718 goto test_nvram_exit;
8719 }
8720
8721 for (i = 0; nvram_tbl[i].size; i++) {
8722
8723 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8724 nvram_tbl[i].size);
8725 if (rc) {
8726 DP(NETIF_MSG_PROBE,
8727 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8728 goto test_nvram_exit;
8729 }
8730
8731 csum = ether_crc_le(nvram_tbl[i].size, data);
8732 if (csum != CRC32_RESIDUAL) {
8733 DP(NETIF_MSG_PROBE,
8734 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8735 rc = -ENODEV;
8736 goto test_nvram_exit;
8737 }
8738 }
8739
8740test_nvram_exit:
8741 return rc;
8742}
8743
8744static int bnx2x_test_intr(struct bnx2x *bp)
8745{
8746 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8747 int i, rc;
8748
8749 if (!netif_running(bp->dev))
8750 return -ENODEV;
8751
8752 config->hdr.length_6b = 0;
8753 config->hdr.offset = 0;
8754 config->hdr.client_id = BP_CL_ID(bp);
8755 config->hdr.reserved1 = 0;
8756
8757 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8758 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8759 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8760 if (rc == 0) {
8761 bp->set_mac_pending++;
8762 for (i = 0; i < 10; i++) {
8763 if (!bp->set_mac_pending)
8764 break;
8765 msleep_interruptible(10);
8766 }
8767 if (i == 10)
8768 rc = -ENODEV;
8769 }
8770
8771 return rc;
8772}
8773
a2fbb9ea
ET
8774static void bnx2x_self_test(struct net_device *dev,
8775 struct ethtool_test *etest, u64 *buf)
8776{
8777 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8778
8779 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8780
f3c87cdd 8781 if (!netif_running(dev))
a2fbb9ea 8782 return;
a2fbb9ea 8783
f3c87cdd
YG
8784 /* offline tests are not suppoerted in MF mode */
8785 if (IS_E1HMF(bp))
8786 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8787
8788 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8789 u8 link_up;
8790
8791 link_up = bp->link_vars.link_up;
8792 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8793 bnx2x_nic_load(bp, LOAD_DIAG);
8794 /* wait until link state is restored */
8795 bnx2x_wait_for_link(bp, link_up);
8796
8797 if (bnx2x_test_registers(bp) != 0) {
8798 buf[0] = 1;
8799 etest->flags |= ETH_TEST_FL_FAILED;
8800 }
8801 if (bnx2x_test_memory(bp) != 0) {
8802 buf[1] = 1;
8803 etest->flags |= ETH_TEST_FL_FAILED;
8804 }
8805 buf[2] = bnx2x_test_loopback(bp, link_up);
8806 if (buf[2] != 0)
8807 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8808
f3c87cdd
YG
8809 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8810 bnx2x_nic_load(bp, LOAD_NORMAL);
8811 /* wait until link state is restored */
8812 bnx2x_wait_for_link(bp, link_up);
8813 }
8814 if (bnx2x_test_nvram(bp) != 0) {
8815 buf[3] = 1;
a2fbb9ea
ET
8816 etest->flags |= ETH_TEST_FL_FAILED;
8817 }
f3c87cdd
YG
8818 if (bnx2x_test_intr(bp) != 0) {
8819 buf[4] = 1;
8820 etest->flags |= ETH_TEST_FL_FAILED;
8821 }
8822 if (bp->port.pmf)
8823 if (bnx2x_link_test(bp) != 0) {
8824 buf[5] = 1;
8825 etest->flags |= ETH_TEST_FL_FAILED;
8826 }
8827 buf[7] = bnx2x_mc_assert(bp);
8828 if (buf[7] != 0)
8829 etest->flags |= ETH_TEST_FL_FAILED;
8830
8831#ifdef BNX2X_EXTRA_DEBUG
8832 bnx2x_panic_dump(bp);
8833#endif
a2fbb9ea
ET
8834}
8835
bb2a0f7a
YG
8836static const struct {
8837 long offset;
8838 int size;
8839 u32 flags;
a2fbb9ea 8840 char string[ETH_GSTRING_LEN];
bb2a0f7a
YG
8841} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8842/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" },
8843 { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" },
8844 { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" },
8845 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" },
8846 { STATS_OFFSET32(total_unicast_packets_received_hi),
8847 8, 1, "rx_ucast_packets" },
8848 { STATS_OFFSET32(total_multicast_packets_received_hi),
8849 8, 1, "rx_mcast_packets" },
8850 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8851 8, 1, "rx_bcast_packets" },
8852 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8853 8, 1, "tx_packets" },
8854 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8855 8, 0, "tx_mac_errors" },
8856/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8857 8, 0, "tx_carrier_errors" },
8858 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8859 8, 0, "rx_crc_errors" },
8860 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8861 8, 0, "rx_align_errors" },
8862 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8863 8, 0, "tx_single_collisions" },
8864 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8865 8, 0, "tx_multi_collisions" },
8866 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8867 8, 0, "tx_deferred" },
8868 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8869 8, 0, "tx_excess_collisions" },
8870 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8871 8, 0, "tx_late_collisions" },
8872 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8873 8, 0, "tx_total_collisions" },
8874 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8875 8, 0, "rx_fragments" },
8876/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" },
8877 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8878 8, 0, "rx_undersize_packets" },
8879 { STATS_OFFSET32(jabber_packets_received),
8880 4, 1, "rx_oversize_packets" },
8881 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8882 8, 0, "tx_64_byte_packets" },
8883 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8884 8, 0, "tx_65_to_127_byte_packets" },
8885 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8886 8, 0, "tx_128_to_255_byte_packets" },
8887 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8888 8, 0, "tx_256_to_511_byte_packets" },
8889 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8890 8, 0, "tx_512_to_1023_byte_packets" },
8891 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8892 8, 0, "tx_1024_to_1522_byte_packets" },
8893 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8894 8, 0, "tx_1523_to_9022_byte_packets" },
8895/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8896 8, 0, "rx_xon_frames" },
8897 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8898 8, 0, "rx_xoff_frames" },
8899 { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" },
8900 { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" },
8901 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8902 8, 0, "rx_mac_ctrl_frames" },
8903 { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" },
8904 { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" },
8905 { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" },
8906 { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" },
8907/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" }
a2fbb9ea
ET
8908};
8909
8910static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8911{
bb2a0f7a
YG
8912 struct bnx2x *bp = netdev_priv(dev);
8913 int i, j;
8914
a2fbb9ea
ET
8915 switch (stringset) {
8916 case ETH_SS_STATS:
bb2a0f7a
YG
8917 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8918 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
8919 continue;
8920 strcpy(buf + j*ETH_GSTRING_LEN,
8921 bnx2x_stats_arr[i].string);
8922 j++;
8923 }
a2fbb9ea
ET
8924 break;
8925
8926 case ETH_SS_TEST:
8927 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8928 break;
8929 }
8930}
8931
8932static int bnx2x_get_stats_count(struct net_device *dev)
8933{
bb2a0f7a
YG
8934 struct bnx2x *bp = netdev_priv(dev);
8935 int i, num_stats = 0;
8936
8937 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8938 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
8939 continue;
8940 num_stats++;
8941 }
8942 return num_stats;
a2fbb9ea
ET
8943}
8944
8945static void bnx2x_get_ethtool_stats(struct net_device *dev,
8946 struct ethtool_stats *stats, u64 *buf)
8947{
8948 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
8949 u32 *hw_stats = (u32 *)&bp->eth_stats;
8950 int i, j;
a2fbb9ea 8951
bb2a0f7a
YG
8952 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8953 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
a2fbb9ea 8954 continue;
bb2a0f7a
YG
8955
8956 if (bnx2x_stats_arr[i].size == 0) {
8957 /* skip this counter */
8958 buf[j] = 0;
8959 j++;
a2fbb9ea
ET
8960 continue;
8961 }
bb2a0f7a 8962 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 8963 /* 4-byte counter */
bb2a0f7a
YG
8964 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
8965 j++;
a2fbb9ea
ET
8966 continue;
8967 }
8968 /* 8-byte counter */
bb2a0f7a
YG
8969 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
8970 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
8971 j++;
a2fbb9ea
ET
8972 }
8973}
8974
8975static int bnx2x_phys_id(struct net_device *dev, u32 data)
8976{
8977 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8978 int port = BP_PORT(bp);
a2fbb9ea
ET
8979 int i;
8980
34f80b04
EG
8981 if (!netif_running(dev))
8982 return 0;
8983
8984 if (!bp->port.pmf)
8985 return 0;
8986
a2fbb9ea
ET
8987 if (data == 0)
8988 data = 2;
8989
8990 for (i = 0; i < (data * 2); i++) {
c18487ee 8991 if ((i % 2) == 0)
34f80b04 8992 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
8993 bp->link_params.hw_led_mode,
8994 bp->link_params.chip_id);
8995 else
34f80b04 8996 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
8997 bp->link_params.hw_led_mode,
8998 bp->link_params.chip_id);
8999
a2fbb9ea
ET
9000 msleep_interruptible(500);
9001 if (signal_pending(current))
9002 break;
9003 }
9004
c18487ee 9005 if (bp->link_vars.link_up)
34f80b04 9006 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9007 bp->link_vars.line_speed,
9008 bp->link_params.hw_led_mode,
9009 bp->link_params.chip_id);
a2fbb9ea
ET
9010
9011 return 0;
9012}
9013
9014static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9015 .get_settings = bnx2x_get_settings,
9016 .set_settings = bnx2x_set_settings,
9017 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9018 .get_wol = bnx2x_get_wol,
9019 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9020 .get_msglevel = bnx2x_get_msglevel,
9021 .set_msglevel = bnx2x_set_msglevel,
9022 .nway_reset = bnx2x_nway_reset,
9023 .get_link = ethtool_op_get_link,
9024 .get_eeprom_len = bnx2x_get_eeprom_len,
9025 .get_eeprom = bnx2x_get_eeprom,
9026 .set_eeprom = bnx2x_set_eeprom,
9027 .get_coalesce = bnx2x_get_coalesce,
9028 .set_coalesce = bnx2x_set_coalesce,
9029 .get_ringparam = bnx2x_get_ringparam,
9030 .set_ringparam = bnx2x_set_ringparam,
9031 .get_pauseparam = bnx2x_get_pauseparam,
9032 .set_pauseparam = bnx2x_set_pauseparam,
9033 .get_rx_csum = bnx2x_get_rx_csum,
9034 .set_rx_csum = bnx2x_set_rx_csum,
9035 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9036 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9037 .set_flags = bnx2x_set_flags,
9038 .get_flags = ethtool_op_get_flags,
9039 .get_sg = ethtool_op_get_sg,
9040 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9041 .get_tso = ethtool_op_get_tso,
9042 .set_tso = bnx2x_set_tso,
9043 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9044 .self_test = bnx2x_self_test,
9045 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9046 .phys_id = bnx2x_phys_id,
9047 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9048 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9049};
9050
9051/* end of ethtool_ops */
9052
9053/****************************************************************************
9054* General service functions
9055****************************************************************************/
9056
9057static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9058{
9059 u16 pmcsr;
9060
9061 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9062
9063 switch (state) {
9064 case PCI_D0:
34f80b04 9065 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9066 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9067 PCI_PM_CTRL_PME_STATUS));
9068
9069 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9070 /* delay required during transition out of D3hot */
9071 msleep(20);
34f80b04 9072 break;
a2fbb9ea 9073
34f80b04
EG
9074 case PCI_D3hot:
9075 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9076 pmcsr |= 3;
a2fbb9ea 9077
34f80b04
EG
9078 if (bp->wol)
9079 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9080
34f80b04
EG
9081 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9082 pmcsr);
a2fbb9ea 9083
34f80b04
EG
9084 /* No more memory access after this point until
9085 * device is brought back to D0.
9086 */
9087 break;
9088
9089 default:
9090 return -EINVAL;
9091 }
9092 return 0;
a2fbb9ea
ET
9093}
9094
34f80b04
EG
9095/*
9096 * net_device service functions
9097 */
9098
a2fbb9ea
ET
9099static int bnx2x_poll(struct napi_struct *napi, int budget)
9100{
9101 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9102 napi);
9103 struct bnx2x *bp = fp->bp;
9104 int work_done = 0;
9105
9106#ifdef BNX2X_STOP_ON_ERROR
9107 if (unlikely(bp->panic))
34f80b04 9108 goto poll_panic;
a2fbb9ea
ET
9109#endif
9110
9111 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9112 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9113 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9114
9115 bnx2x_update_fpsb_idx(fp);
9116
34f80b04
EG
9117 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
9118 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
9119 bnx2x_tx_int(fp, budget);
9120
a2fbb9ea
ET
9121 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9122 work_done = bnx2x_rx_int(fp, budget);
9123
a2fbb9ea
ET
9124 rmb(); /* bnx2x_has_work() reads the status block */
9125
9126 /* must not complete if we consumed full budget */
9127 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9128
9129#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9130poll_panic:
a2fbb9ea
ET
9131#endif
9132 netif_rx_complete(bp->dev, napi);
9133
34f80b04 9134 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9135 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9136 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9137 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9138 }
a2fbb9ea
ET
9139 return work_done;
9140}
9141
755735eb
EG
9142
9143/* we split the first BD into headers and data BDs
9144 * to ease the pain of our fellow micocode engineers
9145 * we use one mapping for both BDs
9146 * So far this has only been observed to happen
9147 * in Other Operating Systems(TM)
9148 */
9149static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9150 struct bnx2x_fastpath *fp,
9151 struct eth_tx_bd **tx_bd, u16 hlen,
9152 u16 bd_prod, int nbd)
9153{
9154 struct eth_tx_bd *h_tx_bd = *tx_bd;
9155 struct eth_tx_bd *d_tx_bd;
9156 dma_addr_t mapping;
9157 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9158
9159 /* first fix first BD */
9160 h_tx_bd->nbd = cpu_to_le16(nbd);
9161 h_tx_bd->nbytes = cpu_to_le16(hlen);
9162
9163 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9164 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9165 h_tx_bd->addr_lo, h_tx_bd->nbd);
9166
9167 /* now get a new data BD
9168 * (after the pbd) and fill it */
9169 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9170 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9171
9172 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9173 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9174
9175 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9176 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9177 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9178 d_tx_bd->vlan = 0;
9179 /* this marks the BD as one that has no individual mapping
9180 * the FW ignores this flag in a BD not marked start
9181 */
9182 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9183 DP(NETIF_MSG_TX_QUEUED,
9184 "TSO split data size is %d (%x:%x)\n",
9185 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9186
9187 /* update tx_bd for marking the last BD flag */
9188 *tx_bd = d_tx_bd;
9189
9190 return bd_prod;
9191}
9192
9193static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9194{
9195 if (fix > 0)
9196 csum = (u16) ~csum_fold(csum_sub(csum,
9197 csum_partial(t_header - fix, fix, 0)));
9198
9199 else if (fix < 0)
9200 csum = (u16) ~csum_fold(csum_add(csum,
9201 csum_partial(t_header, -fix, 0)));
9202
9203 return swab16(csum);
9204}
9205
9206static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9207{
9208 u32 rc;
9209
9210 if (skb->ip_summed != CHECKSUM_PARTIAL)
9211 rc = XMIT_PLAIN;
9212
9213 else {
9214 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9215 rc = XMIT_CSUM_V6;
9216 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9217 rc |= XMIT_CSUM_TCP;
9218
9219 } else {
9220 rc = XMIT_CSUM_V4;
9221 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9222 rc |= XMIT_CSUM_TCP;
9223 }
9224 }
9225
9226 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9227 rc |= XMIT_GSO_V4;
9228
9229 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9230 rc |= XMIT_GSO_V6;
9231
9232 return rc;
9233}
9234
9235/* check if packet requires linearization (packet is too fragmented) */
9236static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9237 u32 xmit_type)
9238{
9239 int to_copy = 0;
9240 int hlen = 0;
9241 int first_bd_sz = 0;
9242
9243 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9244 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9245
9246 if (xmit_type & XMIT_GSO) {
9247 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9248 /* Check if LSO packet needs to be copied:
9249 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9250 int wnd_size = MAX_FETCH_BD - 3;
9251 /* Number of widnows to check */
9252 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9253 int wnd_idx = 0;
9254 int frag_idx = 0;
9255 u32 wnd_sum = 0;
9256
9257 /* Headers length */
9258 hlen = (int)(skb_transport_header(skb) - skb->data) +
9259 tcp_hdrlen(skb);
9260
9261 /* Amount of data (w/o headers) on linear part of SKB*/
9262 first_bd_sz = skb_headlen(skb) - hlen;
9263
9264 wnd_sum = first_bd_sz;
9265
9266 /* Calculate the first sum - it's special */
9267 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9268 wnd_sum +=
9269 skb_shinfo(skb)->frags[frag_idx].size;
9270
9271 /* If there was data on linear skb data - check it */
9272 if (first_bd_sz > 0) {
9273 if (unlikely(wnd_sum < lso_mss)) {
9274 to_copy = 1;
9275 goto exit_lbl;
9276 }
9277
9278 wnd_sum -= first_bd_sz;
9279 }
9280
9281 /* Others are easier: run through the frag list and
9282 check all windows */
9283 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9284 wnd_sum +=
9285 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9286
9287 if (unlikely(wnd_sum < lso_mss)) {
9288 to_copy = 1;
9289 break;
9290 }
9291 wnd_sum -=
9292 skb_shinfo(skb)->frags[wnd_idx].size;
9293 }
9294
9295 } else {
9296 /* in non-LSO too fragmented packet should always
9297 be linearized */
9298 to_copy = 1;
9299 }
9300 }
9301
9302exit_lbl:
9303 if (unlikely(to_copy))
9304 DP(NETIF_MSG_TX_QUEUED,
9305 "Linearization IS REQUIRED for %s packet. "
9306 "num_frags %d hlen %d first_bd_sz %d\n",
9307 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9308 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9309
9310 return to_copy;
9311}
9312
9313/* called with netif_tx_lock
a2fbb9ea 9314 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9315 * netif_wake_queue()
a2fbb9ea
ET
9316 */
9317static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9318{
9319 struct bnx2x *bp = netdev_priv(dev);
9320 struct bnx2x_fastpath *fp;
9321 struct sw_tx_bd *tx_buf;
9322 struct eth_tx_bd *tx_bd;
9323 struct eth_tx_parse_bd *pbd = NULL;
9324 u16 pkt_prod, bd_prod;
755735eb 9325 int nbd, fp_index;
a2fbb9ea 9326 dma_addr_t mapping;
755735eb
EG
9327 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9328 int vlan_off = (bp->e1hov ? 4 : 0);
9329 int i;
9330 u8 hlen = 0;
a2fbb9ea
ET
9331
9332#ifdef BNX2X_STOP_ON_ERROR
9333 if (unlikely(bp->panic))
9334 return NETDEV_TX_BUSY;
9335#endif
9336
755735eb 9337 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9338 fp = &bp->fp[fp_index];
755735eb 9339
a2fbb9ea
ET
9340 if (unlikely(bnx2x_tx_avail(bp->fp) <
9341 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9342 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9343 netif_stop_queue(dev);
9344 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9345 return NETDEV_TX_BUSY;
9346 }
9347
755735eb
EG
9348 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9349 " gso type %x xmit_type %x\n",
9350 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9351 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9352
9353 /* First, check if we need to linearaize the skb
9354 (due to FW restrictions) */
9355 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9356 /* Statistics of linearization */
9357 bp->lin_cnt++;
9358 if (skb_linearize(skb) != 0) {
9359 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9360 "silently dropping this SKB\n");
9361 dev_kfree_skb_any(skb);
9362 return 0;
9363 }
9364 }
9365
a2fbb9ea 9366 /*
755735eb 9367 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9368 then for TSO or xsum we have a parsing info BD,
755735eb 9369 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9370 (don't forget to mark the last one as last,
9371 and to unmap only AFTER you write to the BD ...)
755735eb 9372 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9373 */
9374
9375 pkt_prod = fp->tx_pkt_prod++;
755735eb 9376 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9377
755735eb 9378 /* get a tx_buf and first BD */
a2fbb9ea
ET
9379 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9380 tx_bd = &fp->tx_desc_ring[bd_prod];
9381
9382 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9383 tx_bd->general_data = (UNICAST_ADDRESS <<
9384 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9385 tx_bd->general_data |= 1; /* header nbd */
9386
755735eb
EG
9387 /* remember the first BD of the packet */
9388 tx_buf->first_bd = fp->tx_bd_prod;
9389 tx_buf->skb = skb;
a2fbb9ea
ET
9390
9391 DP(NETIF_MSG_TX_QUEUED,
9392 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9393 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9394
755735eb
EG
9395 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9396 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9397 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9398 vlan_off += 4;
9399 } else
9400 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9401
755735eb 9402 if (xmit_type) {
a2fbb9ea 9403
755735eb 9404 /* turn on parsing and get a BD */
a2fbb9ea
ET
9405 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9406 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9407
9408 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9409 }
9410
9411 if (xmit_type & XMIT_CSUM) {
9412 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9413
9414 /* for now NS flag is not used in Linux */
755735eb 9415 pbd->global_data = (hlen |
96fc1784 9416 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9417 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9418
755735eb
EG
9419 pbd->ip_hlen = (skb_transport_header(skb) -
9420 skb_network_header(skb)) / 2;
9421
9422 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9423
755735eb
EG
9424 pbd->total_hlen = cpu_to_le16(hlen);
9425 hlen = hlen*2 - vlan_off;
a2fbb9ea 9426
755735eb
EG
9427 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9428
9429 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9430 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9431 ETH_TX_BD_FLAGS_IP_CSUM;
9432 else
9433 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9434
9435 if (xmit_type & XMIT_CSUM_TCP) {
9436 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9437
9438 } else {
9439 s8 fix = SKB_CS_OFF(skb); /* signed! */
9440
a2fbb9ea 9441 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9442 pbd->cs_offset = fix / 2;
a2fbb9ea 9443
755735eb
EG
9444 DP(NETIF_MSG_TX_QUEUED,
9445 "hlen %d offset %d fix %d csum before fix %x\n",
9446 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9447 SKB_CS(skb));
9448
9449 /* HW bug: fixup the CSUM */
9450 pbd->tcp_pseudo_csum =
9451 bnx2x_csum_fix(skb_transport_header(skb),
9452 SKB_CS(skb), fix);
9453
9454 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9455 pbd->tcp_pseudo_csum);
9456 }
a2fbb9ea
ET
9457 }
9458
9459 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9460 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9461
9462 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9463 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9464 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9465 tx_bd->nbd = cpu_to_le16(nbd);
9466 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9467
9468 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9469 " nbytes %d flags %x vlan %x\n",
9470 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9471 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9472 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9473
755735eb 9474 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9475
9476 DP(NETIF_MSG_TX_QUEUED,
9477 "TSO packet len %d hlen %d total len %d tso size %d\n",
9478 skb->len, hlen, skb_headlen(skb),
9479 skb_shinfo(skb)->gso_size);
9480
9481 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9482
755735eb
EG
9483 if (unlikely(skb_headlen(skb) > hlen))
9484 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9485 bd_prod, ++nbd);
a2fbb9ea
ET
9486
9487 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9488 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9489 pbd->tcp_flags = pbd_tcp_flags(skb);
9490
9491 if (xmit_type & XMIT_GSO_V4) {
9492 pbd->ip_id = swab16(ip_hdr(skb)->id);
9493 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9494 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9495 ip_hdr(skb)->daddr,
9496 0, IPPROTO_TCP, 0));
755735eb
EG
9497
9498 } else
9499 pbd->tcp_pseudo_csum =
9500 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9501 &ipv6_hdr(skb)->daddr,
9502 0, IPPROTO_TCP, 0));
9503
a2fbb9ea
ET
9504 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9505 }
9506
755735eb
EG
9507 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9508 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9509
755735eb
EG
9510 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9511 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9512
755735eb
EG
9513 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9514 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9515
755735eb
EG
9516 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9517 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9518 tx_bd->nbytes = cpu_to_le16(frag->size);
9519 tx_bd->vlan = cpu_to_le16(pkt_prod);
9520 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9521
755735eb
EG
9522 DP(NETIF_MSG_TX_QUEUED,
9523 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9524 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9525 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9526 }
9527
755735eb 9528 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9529 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9530
9531 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9532 tx_bd, tx_bd->bd_flags.as_bitfield);
9533
a2fbb9ea
ET
9534 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9535
755735eb 9536 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9537 * if the packet contains or ends with it
9538 */
9539 if (TX_BD_POFF(bd_prod) < nbd)
9540 nbd++;
9541
9542 if (pbd)
9543 DP(NETIF_MSG_TX_QUEUED,
9544 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9545 " tcp_flags %x xsum %x seq %u hlen %u\n",
9546 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9547 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9548 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9549
755735eb 9550 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9551
96fc1784
ET
9552 fp->hw_tx_prods->bds_prod =
9553 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9554 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9555 fp->hw_tx_prods->packets_prod =
9556 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9557 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9558
9559 mmiowb();
9560
755735eb 9561 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9562 dev->trans_start = jiffies;
9563
9564 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9565 netif_stop_queue(dev);
bb2a0f7a 9566 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9567 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9568 netif_wake_queue(dev);
9569 }
9570 fp->tx_pkt++;
9571
9572 return NETDEV_TX_OK;
9573}
9574
bb2a0f7a 9575/* called with rtnl_lock */
a2fbb9ea
ET
9576static int bnx2x_open(struct net_device *dev)
9577{
9578 struct bnx2x *bp = netdev_priv(dev);
9579
9580 bnx2x_set_power_state(bp, PCI_D0);
9581
bb2a0f7a 9582 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9583}
9584
bb2a0f7a 9585/* called with rtnl_lock */
a2fbb9ea
ET
9586static int bnx2x_close(struct net_device *dev)
9587{
a2fbb9ea
ET
9588 struct bnx2x *bp = netdev_priv(dev);
9589
9590 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9591 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9592 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9593 if (!CHIP_REV_IS_SLOW(bp))
9594 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9595
9596 return 0;
9597}
9598
34f80b04
EG
9599/* called with netif_tx_lock from set_multicast */
9600static void bnx2x_set_rx_mode(struct net_device *dev)
9601{
9602 struct bnx2x *bp = netdev_priv(dev);
9603 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9604 int port = BP_PORT(bp);
9605
9606 if (bp->state != BNX2X_STATE_OPEN) {
9607 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9608 return;
9609 }
9610
9611 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9612
9613 if (dev->flags & IFF_PROMISC)
9614 rx_mode = BNX2X_RX_MODE_PROMISC;
9615
9616 else if ((dev->flags & IFF_ALLMULTI) ||
9617 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9618 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9619
9620 else { /* some multicasts */
9621 if (CHIP_IS_E1(bp)) {
9622 int i, old, offset;
9623 struct dev_mc_list *mclist;
9624 struct mac_configuration_cmd *config =
9625 bnx2x_sp(bp, mcast_config);
9626
9627 for (i = 0, mclist = dev->mc_list;
9628 mclist && (i < dev->mc_count);
9629 i++, mclist = mclist->next) {
9630
9631 config->config_table[i].
9632 cam_entry.msb_mac_addr =
9633 swab16(*(u16 *)&mclist->dmi_addr[0]);
9634 config->config_table[i].
9635 cam_entry.middle_mac_addr =
9636 swab16(*(u16 *)&mclist->dmi_addr[2]);
9637 config->config_table[i].
9638 cam_entry.lsb_mac_addr =
9639 swab16(*(u16 *)&mclist->dmi_addr[4]);
9640 config->config_table[i].cam_entry.flags =
9641 cpu_to_le16(port);
9642 config->config_table[i].
9643 target_table_entry.flags = 0;
9644 config->config_table[i].
9645 target_table_entry.client_id = 0;
9646 config->config_table[i].
9647 target_table_entry.vlan_id = 0;
9648
9649 DP(NETIF_MSG_IFUP,
9650 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9651 config->config_table[i].
9652 cam_entry.msb_mac_addr,
9653 config->config_table[i].
9654 cam_entry.middle_mac_addr,
9655 config->config_table[i].
9656 cam_entry.lsb_mac_addr);
9657 }
9658 old = config->hdr.length_6b;
9659 if (old > i) {
9660 for (; i < old; i++) {
9661 if (CAM_IS_INVALID(config->
9662 config_table[i])) {
9663 i--; /* already invalidated */
9664 break;
9665 }
9666 /* invalidate */
9667 CAM_INVALIDATE(config->
9668 config_table[i]);
9669 }
9670 }
9671
9672 if (CHIP_REV_IS_SLOW(bp))
9673 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9674 else
9675 offset = BNX2X_MAX_MULTICAST*(1 + port);
9676
9677 config->hdr.length_6b = i;
9678 config->hdr.offset = offset;
9679 config->hdr.client_id = BP_CL_ID(bp);
9680 config->hdr.reserved1 = 0;
9681
9682 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9683 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9684 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9685 0);
9686 } else { /* E1H */
9687 /* Accept one or more multicasts */
9688 struct dev_mc_list *mclist;
9689 u32 mc_filter[MC_HASH_SIZE];
9690 u32 crc, bit, regidx;
9691 int i;
9692
9693 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9694
9695 for (i = 0, mclist = dev->mc_list;
9696 mclist && (i < dev->mc_count);
9697 i++, mclist = mclist->next) {
9698
9699 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9700 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9701 mclist->dmi_addr[0], mclist->dmi_addr[1],
9702 mclist->dmi_addr[2], mclist->dmi_addr[3],
9703 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9704
9705 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9706 bit = (crc >> 24) & 0xff;
9707 regidx = bit >> 5;
9708 bit &= 0x1f;
9709 mc_filter[regidx] |= (1 << bit);
9710 }
9711
9712 for (i = 0; i < MC_HASH_SIZE; i++)
9713 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9714 mc_filter[i]);
9715 }
9716 }
9717
9718 bp->rx_mode = rx_mode;
9719 bnx2x_set_storm_rx_mode(bp);
9720}
9721
9722/* called with rtnl_lock */
a2fbb9ea
ET
9723static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9724{
9725 struct sockaddr *addr = p;
9726 struct bnx2x *bp = netdev_priv(dev);
9727
34f80b04 9728 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9729 return -EINVAL;
9730
9731 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9732 if (netif_running(dev)) {
9733 if (CHIP_IS_E1(bp))
9734 bnx2x_set_mac_addr_e1(bp);
9735 else
9736 bnx2x_set_mac_addr_e1h(bp);
9737 }
a2fbb9ea
ET
9738
9739 return 0;
9740}
9741
c18487ee 9742/* called with rtnl_lock */
a2fbb9ea
ET
9743static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9744{
9745 struct mii_ioctl_data *data = if_mii(ifr);
9746 struct bnx2x *bp = netdev_priv(dev);
9747 int err;
9748
9749 switch (cmd) {
9750 case SIOCGMIIPHY:
34f80b04 9751 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9752
c14423fe 9753 /* fallthrough */
c18487ee 9754
a2fbb9ea 9755 case SIOCGMIIREG: {
c18487ee 9756 u16 mii_regval;
a2fbb9ea 9757
c18487ee
YR
9758 if (!netif_running(dev))
9759 return -EAGAIN;
a2fbb9ea 9760
34f80b04
EG
9761 mutex_lock(&bp->port.phy_mutex);
9762 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9763 DEFAULT_PHY_DEV_ADDR,
9764 (data->reg_num & 0x1f), &mii_regval);
9765 data->val_out = mii_regval;
34f80b04 9766 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9767 return err;
9768 }
9769
9770 case SIOCSMIIREG:
9771 if (!capable(CAP_NET_ADMIN))
9772 return -EPERM;
9773
c18487ee
YR
9774 if (!netif_running(dev))
9775 return -EAGAIN;
9776
34f80b04
EG
9777 mutex_lock(&bp->port.phy_mutex);
9778 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9779 DEFAULT_PHY_DEV_ADDR,
9780 (data->reg_num & 0x1f), data->val_in);
34f80b04 9781 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9782 return err;
9783
9784 default:
9785 /* do nothing */
9786 break;
9787 }
9788
9789 return -EOPNOTSUPP;
9790}
9791
34f80b04 9792/* called with rtnl_lock */
a2fbb9ea
ET
9793static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9794{
9795 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9796 int rc = 0;
a2fbb9ea
ET
9797
9798 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9799 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9800 return -EINVAL;
9801
9802 /* This does not race with packet allocation
c14423fe 9803 * because the actual alloc size is
a2fbb9ea
ET
9804 * only updated as part of load
9805 */
9806 dev->mtu = new_mtu;
9807
9808 if (netif_running(dev)) {
34f80b04
EG
9809 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9810 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9811 }
34f80b04
EG
9812
9813 return rc;
a2fbb9ea
ET
9814}
9815
9816static void bnx2x_tx_timeout(struct net_device *dev)
9817{
9818 struct bnx2x *bp = netdev_priv(dev);
9819
9820#ifdef BNX2X_STOP_ON_ERROR
9821 if (!bp->panic)
9822 bnx2x_panic();
9823#endif
9824 /* This allows the netif to be shutdown gracefully before resetting */
9825 schedule_work(&bp->reset_task);
9826}
9827
9828#ifdef BCM_VLAN
34f80b04 9829/* called with rtnl_lock */
a2fbb9ea
ET
9830static void bnx2x_vlan_rx_register(struct net_device *dev,
9831 struct vlan_group *vlgrp)
9832{
9833 struct bnx2x *bp = netdev_priv(dev);
9834
9835 bp->vlgrp = vlgrp;
9836 if (netif_running(dev))
49d66772 9837 bnx2x_set_client_config(bp);
a2fbb9ea 9838}
34f80b04 9839
a2fbb9ea
ET
9840#endif
9841
9842#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9843static void poll_bnx2x(struct net_device *dev)
9844{
9845 struct bnx2x *bp = netdev_priv(dev);
9846
9847 disable_irq(bp->pdev->irq);
9848 bnx2x_interrupt(bp->pdev->irq, dev);
9849 enable_irq(bp->pdev->irq);
9850}
9851#endif
9852
34f80b04
EG
9853static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9854 struct net_device *dev)
a2fbb9ea
ET
9855{
9856 struct bnx2x *bp;
9857 int rc;
9858
9859 SET_NETDEV_DEV(dev, &pdev->dev);
9860 bp = netdev_priv(dev);
9861
34f80b04
EG
9862 bp->dev = dev;
9863 bp->pdev = pdev;
a2fbb9ea 9864 bp->flags = 0;
34f80b04 9865 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9866
9867 rc = pci_enable_device(pdev);
9868 if (rc) {
9869 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9870 goto err_out;
9871 }
9872
9873 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9874 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9875 " aborting\n");
9876 rc = -ENODEV;
9877 goto err_out_disable;
9878 }
9879
9880 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9881 printk(KERN_ERR PFX "Cannot find second PCI device"
9882 " base address, aborting\n");
9883 rc = -ENODEV;
9884 goto err_out_disable;
9885 }
9886
34f80b04
EG
9887 if (atomic_read(&pdev->enable_cnt) == 1) {
9888 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9889 if (rc) {
9890 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9891 " aborting\n");
9892 goto err_out_disable;
9893 }
a2fbb9ea 9894
34f80b04
EG
9895 pci_set_master(pdev);
9896 pci_save_state(pdev);
9897 }
a2fbb9ea
ET
9898
9899 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9900 if (bp->pm_cap == 0) {
9901 printk(KERN_ERR PFX "Cannot find power management"
9902 " capability, aborting\n");
9903 rc = -EIO;
9904 goto err_out_release;
9905 }
9906
9907 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9908 if (bp->pcie_cap == 0) {
9909 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9910 " aborting\n");
9911 rc = -EIO;
9912 goto err_out_release;
9913 }
9914
9915 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9916 bp->flags |= USING_DAC_FLAG;
9917 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9918 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9919 " failed, aborting\n");
9920 rc = -EIO;
9921 goto err_out_release;
9922 }
9923
9924 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9925 printk(KERN_ERR PFX "System does not support DMA,"
9926 " aborting\n");
9927 rc = -EIO;
9928 goto err_out_release;
9929 }
9930
34f80b04
EG
9931 dev->mem_start = pci_resource_start(pdev, 0);
9932 dev->base_addr = dev->mem_start;
9933 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
9934
9935 dev->irq = pdev->irq;
9936
9937 bp->regview = ioremap_nocache(dev->base_addr,
9938 pci_resource_len(pdev, 0));
9939 if (!bp->regview) {
9940 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9941 rc = -ENOMEM;
9942 goto err_out_release;
9943 }
9944
34f80b04
EG
9945 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9946 min_t(u64, BNX2X_DB_SIZE,
9947 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
9948 if (!bp->doorbells) {
9949 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9950 rc = -ENOMEM;
9951 goto err_out_unmap;
9952 }
9953
9954 bnx2x_set_power_state(bp, PCI_D0);
9955
34f80b04
EG
9956 /* clean indirect addresses */
9957 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9958 PCICFG_VENDOR_ID_OFFSET);
9959 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9960 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9961 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9962 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 9963
34f80b04
EG
9964 dev->hard_start_xmit = bnx2x_start_xmit;
9965 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 9966
34f80b04
EG
9967 dev->ethtool_ops = &bnx2x_ethtool_ops;
9968 dev->open = bnx2x_open;
9969 dev->stop = bnx2x_close;
9970 dev->set_multicast_list = bnx2x_set_rx_mode;
9971 dev->set_mac_address = bnx2x_change_mac_addr;
9972 dev->do_ioctl = bnx2x_ioctl;
9973 dev->change_mtu = bnx2x_change_mtu;
9974 dev->tx_timeout = bnx2x_tx_timeout;
9975#ifdef BCM_VLAN
9976 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9977#endif
9978#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9979 dev->poll_controller = poll_bnx2x;
9980#endif
9981 dev->features |= NETIF_F_SG;
9982 dev->features |= NETIF_F_HW_CSUM;
9983 if (bp->flags & USING_DAC_FLAG)
9984 dev->features |= NETIF_F_HIGHDMA;
9985#ifdef BCM_VLAN
9986 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9987#endif
9988 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 9989 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
9990
9991 return 0;
9992
9993err_out_unmap:
9994 if (bp->regview) {
9995 iounmap(bp->regview);
9996 bp->regview = NULL;
9997 }
a2fbb9ea
ET
9998 if (bp->doorbells) {
9999 iounmap(bp->doorbells);
10000 bp->doorbells = NULL;
10001 }
10002
10003err_out_release:
34f80b04
EG
10004 if (atomic_read(&pdev->enable_cnt) == 1)
10005 pci_release_regions(pdev);
a2fbb9ea
ET
10006
10007err_out_disable:
10008 pci_disable_device(pdev);
10009 pci_set_drvdata(pdev, NULL);
10010
10011err_out:
10012 return rc;
10013}
10014
25047950
ET
10015static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10016{
10017 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10018
10019 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10020 return val;
10021}
10022
10023/* return value of 1=2.5GHz 2=5GHz */
10024static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10025{
10026 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10027
10028 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10029 return val;
10030}
10031
a2fbb9ea
ET
10032static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10033 const struct pci_device_id *ent)
10034{
10035 static int version_printed;
10036 struct net_device *dev = NULL;
10037 struct bnx2x *bp;
25047950 10038 int rc;
25047950 10039 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10040
10041 if (version_printed++ == 0)
10042 printk(KERN_INFO "%s", version);
10043
10044 /* dev zeroed in init_etherdev */
10045 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10046 if (!dev) {
10047 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10048 return -ENOMEM;
34f80b04 10049 }
a2fbb9ea
ET
10050
10051 netif_carrier_off(dev);
10052
10053 bp = netdev_priv(dev);
10054 bp->msglevel = debug;
10055
34f80b04 10056 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10057 if (rc < 0) {
10058 free_netdev(dev);
10059 return rc;
10060 }
10061
a2fbb9ea
ET
10062 rc = register_netdev(dev);
10063 if (rc) {
c14423fe 10064 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10065 goto init_one_exit;
a2fbb9ea
ET
10066 }
10067
10068 pci_set_drvdata(pdev, dev);
10069
34f80b04
EG
10070 rc = bnx2x_init_bp(bp);
10071 if (rc) {
10072 unregister_netdev(dev);
10073 goto init_one_exit;
10074 }
10075
10076 bp->common.name = board_info[ent->driver_data].name;
25047950 10077 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10078 " IRQ %d, ", dev->name, bp->common.name,
10079 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10080 bnx2x_get_pcie_width(bp),
10081 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10082 dev->base_addr, bp->pdev->irq);
10083 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10084 return 0;
34f80b04
EG
10085
10086init_one_exit:
10087 if (bp->regview)
10088 iounmap(bp->regview);
10089
10090 if (bp->doorbells)
10091 iounmap(bp->doorbells);
10092
10093 free_netdev(dev);
10094
10095 if (atomic_read(&pdev->enable_cnt) == 1)
10096 pci_release_regions(pdev);
10097
10098 pci_disable_device(pdev);
10099 pci_set_drvdata(pdev, NULL);
10100
10101 return rc;
a2fbb9ea
ET
10102}
10103
10104static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10105{
10106 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10107 struct bnx2x *bp;
10108
10109 if (!dev) {
228241eb
ET
10110 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10111 return;
10112 }
228241eb 10113 bp = netdev_priv(dev);
a2fbb9ea 10114
a2fbb9ea
ET
10115 unregister_netdev(dev);
10116
10117 if (bp->regview)
10118 iounmap(bp->regview);
10119
10120 if (bp->doorbells)
10121 iounmap(bp->doorbells);
10122
10123 free_netdev(dev);
34f80b04
EG
10124
10125 if (atomic_read(&pdev->enable_cnt) == 1)
10126 pci_release_regions(pdev);
10127
a2fbb9ea
ET
10128 pci_disable_device(pdev);
10129 pci_set_drvdata(pdev, NULL);
10130}
10131
10132static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10133{
10134 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10135 struct bnx2x *bp;
10136
34f80b04
EG
10137 if (!dev) {
10138 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10139 return -ENODEV;
10140 }
10141 bp = netdev_priv(dev);
a2fbb9ea 10142
34f80b04 10143 rtnl_lock();
a2fbb9ea 10144
34f80b04 10145 pci_save_state(pdev);
228241eb 10146
34f80b04
EG
10147 if (!netif_running(dev)) {
10148 rtnl_unlock();
10149 return 0;
10150 }
a2fbb9ea
ET
10151
10152 netif_device_detach(dev);
a2fbb9ea 10153
34f80b04
EG
10154 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10155
a2fbb9ea 10156 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10157
34f80b04
EG
10158 rtnl_unlock();
10159
a2fbb9ea
ET
10160 return 0;
10161}
10162
10163static int bnx2x_resume(struct pci_dev *pdev)
10164{
10165 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10166 struct bnx2x *bp;
a2fbb9ea
ET
10167 int rc;
10168
228241eb
ET
10169 if (!dev) {
10170 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10171 return -ENODEV;
10172 }
228241eb 10173 bp = netdev_priv(dev);
a2fbb9ea 10174
34f80b04
EG
10175 rtnl_lock();
10176
228241eb 10177 pci_restore_state(pdev);
34f80b04
EG
10178
10179 if (!netif_running(dev)) {
10180 rtnl_unlock();
10181 return 0;
10182 }
10183
a2fbb9ea
ET
10184 bnx2x_set_power_state(bp, PCI_D0);
10185 netif_device_attach(dev);
10186
34f80b04 10187 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10188
34f80b04
EG
10189 rtnl_unlock();
10190
10191 return rc;
a2fbb9ea
ET
10192}
10193
493adb1f
WX
10194/**
10195 * bnx2x_io_error_detected - called when PCI error is detected
10196 * @pdev: Pointer to PCI device
10197 * @state: The current pci connection state
10198 *
10199 * This function is called after a PCI bus error affecting
10200 * this device has been detected.
10201 */
10202static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10203 pci_channel_state_t state)
10204{
10205 struct net_device *dev = pci_get_drvdata(pdev);
10206 struct bnx2x *bp = netdev_priv(dev);
10207
10208 rtnl_lock();
10209
10210 netif_device_detach(dev);
10211
10212 if (netif_running(dev))
10213 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10214
10215 pci_disable_device(pdev);
10216
10217 rtnl_unlock();
10218
10219 /* Request a slot reset */
10220 return PCI_ERS_RESULT_NEED_RESET;
10221}
10222
10223/**
10224 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10225 * @pdev: Pointer to PCI device
10226 *
10227 * Restart the card from scratch, as if from a cold-boot.
10228 */
10229static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10230{
10231 struct net_device *dev = pci_get_drvdata(pdev);
10232 struct bnx2x *bp = netdev_priv(dev);
10233
10234 rtnl_lock();
10235
10236 if (pci_enable_device(pdev)) {
10237 dev_err(&pdev->dev,
10238 "Cannot re-enable PCI device after reset\n");
10239 rtnl_unlock();
10240 return PCI_ERS_RESULT_DISCONNECT;
10241 }
10242
10243 pci_set_master(pdev);
10244 pci_restore_state(pdev);
10245
10246 if (netif_running(dev))
10247 bnx2x_set_power_state(bp, PCI_D0);
10248
10249 rtnl_unlock();
10250
10251 return PCI_ERS_RESULT_RECOVERED;
10252}
10253
10254/**
10255 * bnx2x_io_resume - called when traffic can start flowing again
10256 * @pdev: Pointer to PCI device
10257 *
10258 * This callback is called when the error recovery driver tells us that
10259 * its OK to resume normal operation.
10260 */
10261static void bnx2x_io_resume(struct pci_dev *pdev)
10262{
10263 struct net_device *dev = pci_get_drvdata(pdev);
10264 struct bnx2x *bp = netdev_priv(dev);
10265
10266 rtnl_lock();
10267
10268 if (netif_running(dev))
10269 bnx2x_nic_load(bp, LOAD_OPEN);
10270
10271 netif_device_attach(dev);
10272
10273 rtnl_unlock();
10274}
10275
10276static struct pci_error_handlers bnx2x_err_handler = {
10277 .error_detected = bnx2x_io_error_detected,
10278 .slot_reset = bnx2x_io_slot_reset,
10279 .resume = bnx2x_io_resume,
10280};
10281
a2fbb9ea 10282static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10283 .name = DRV_MODULE_NAME,
10284 .id_table = bnx2x_pci_tbl,
10285 .probe = bnx2x_init_one,
10286 .remove = __devexit_p(bnx2x_remove_one),
10287 .suspend = bnx2x_suspend,
10288 .resume = bnx2x_resume,
10289 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10290};
10291
10292static int __init bnx2x_init(void)
10293{
10294 return pci_register_driver(&bnx2x_pci_driver);
10295}
10296
10297static void __exit bnx2x_cleanup(void)
10298{
10299 pci_unregister_driver(&bnx2x_pci_driver);
10300}
10301
10302module_init(bnx2x_init);
10303module_exit(bnx2x_cleanup);
10304