]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Self-test false positive
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
e35c3269
EG
63#define DRV_MODULE_VERSION "1.45.6"
64#define DRV_MODULE_RELDATE "2008/06/23"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 78
19680c48 79static int disable_tpa;
a2fbb9ea
ET
80static int use_inta;
81static int poll;
a2fbb9ea 82static int debug;
34f80b04 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
19680c48 86module_param(disable_tpa, int, 0);
a2fbb9ea
ET
87module_param(use_inta, int, 0);
88module_param(poll, int, 0);
a2fbb9ea 89module_param(debug, int, 0);
19680c48 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 93MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
240 msleep(100);
241 else
242 udelay(5);
243
244 if (!cnt) {
a2fbb9ea
ET
245 BNX2X_ERR("dmae timeout!\n");
246 break;
247 }
ad8d3948 248 cnt--;
a2fbb9ea 249 }
ad8d3948
EG
250
251 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
252}
253
c18487ee 254void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 255{
ad8d3948 256 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
258 int cnt = 200;
259
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 int i;
263
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 return;
269 }
270
271 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
272
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
275
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
34f80b04
EG
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->len = len32;
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 293 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 294
ad8d3948 295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
302
303 *wb_comp = 0;
304
34f80b04 305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
306
307 udelay(5);
ad8d3948
EG
308
309 while (*wb_comp != DMAE_COMP_VAL) {
310
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
313 msleep(100);
314 else
315 udelay(5);
316
317 if (!cnt) {
a2fbb9ea
ET
318 BNX2X_ERR("dmae timeout!\n");
319 break;
320 }
ad8d3948 321 cnt--;
a2fbb9ea 322 }
ad8d3948 323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
326
327 mutex_unlock(&bp->dmae_mutex);
328}
329
330/* used only for slowpath so not inlined */
331static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332{
333 u32 wb_write[2];
334
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 338}
a2fbb9ea 339
ad8d3948
EG
340#ifdef USE_WB_RD
341static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342{
343 u32 wb_data[2];
344
345 REG_RD_DMAE(bp, reg, wb_data, 2);
346
347 return HILO_U64(wb_data[0], wb_data[1]);
348}
349#endif
350
a2fbb9ea
ET
351static int bnx2x_mc_assert(struct bnx2x *bp)
352{
a2fbb9ea 353 char last_idx;
34f80b04
EG
354 int i, rc = 0;
355 u32 row0, row1, row2, row3;
356
357 /* XSTORM */
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 if (last_idx)
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
379 rc++;
380 } else {
381 break;
382 }
383 }
384
385 /* TSTORM */
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 if (last_idx)
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
407 rc++;
408 } else {
409 break;
410 }
411 }
412
413 /* CSTORM */
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 if (last_idx)
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
435 rc++;
436 } else {
437 break;
438 }
439 }
440
441 /* USTORM */
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 if (last_idx)
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
463 rc++;
464 } else {
465 break;
a2fbb9ea
ET
466 }
467 }
34f80b04 468
a2fbb9ea
ET
469 return rc;
470}
c14423fe 471
a2fbb9ea
ET
472static void bnx2x_fw_dump(struct bnx2x *bp)
473{
474 u32 mark, offset;
475 u32 data[9];
476 int word;
477
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
481
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 offset + 4*word));
486 data[8] = 0x0;
49d66772 487 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
488 }
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
497}
498
499static void bnx2x_panic_dump(struct bnx2x *bp)
500{
501 int i;
502 u16 j, start, end;
503
66e855f3
YG
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
506
a2fbb9ea
ET
507 BNX2X_ERR("begin crash dump -----------------\n");
508
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
512
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
528 fp->fp_u_idx,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
531
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
536
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
539 }
540
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
545
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548 }
549
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
555
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
558 }
559
7a9b2557
VZ
560 start = 0;
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
565
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
568 }
569
a2fbb9ea
ET
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
574
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
577 }
578 }
579
49d66772
ET
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 582 " spq_prod_idx(%u)\n",
49d66772 583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
585
34f80b04 586 bnx2x_fw_dump(bp);
a2fbb9ea
ET
587 bnx2x_mc_assert(bp);
588 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
589}
590
615f8fd9 591static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 592{
34f80b04 593 int port = BP_PORT(bp);
a2fbb9ea
ET
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597
598 if (msix) {
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
602 } else {
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 607
615f8fd9
ET
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
610
611 REG_WR(bp, addr, val);
612
a2fbb9ea
ET
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614 }
615
615f8fd9 616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
617 val, port, addr, msix);
618
619 REG_WR(bp, addr, val);
34f80b04
EG
620
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
623 if (IS_E1HMF(bp)) {
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
625 if (bp->port.pmf)
626 /* enable nig attention */
627 val |= 0x0100;
628 } else
629 val = 0xffff;
630
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
633 }
a2fbb9ea
ET
634}
635
615f8fd9 636static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 637{
34f80b04 638 int port = BP_PORT(bp);
a2fbb9ea
ET
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
641
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
646
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648 val, port, addr);
649
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653}
654
615f8fd9 655static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 656{
a2fbb9ea
ET
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658 int i;
659
34f80b04 660 /* disable interrupt handling */
a2fbb9ea 661 atomic_inc(&bp->intr_sem);
c14423fe 662 /* prevent the HW from sending interrupts */
615f8fd9 663 bnx2x_int_disable(bp);
a2fbb9ea
ET
664
665 /* make sure all ISRs are done */
666 if (msix) {
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
669
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
672 } else
673 synchronize_irq(bp->pdev->irq);
674
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
34f80b04 688 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
689 struct igu_ack_register igu_ack;
690
691 igu_ack.status_block_index = index;
692 igu_ack.sb_id_and_flags =
34f80b04 693 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
694 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697
34f80b04
EG
698 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
700 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
701}
702
703static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704{
705 struct host_status_block *fpsb = fp->status_blk;
706 u16 rc = 0;
707
708 barrier(); /* status block is written to by the chip */
709 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
711 rc |= 1;
712 }
713 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
715 rc |= 2;
716 }
717 return rc;
718}
719
a2fbb9ea
ET
720static u16 bnx2x_ack_int(struct bnx2x *bp)
721{
34f80b04 722 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
723 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
724
34f80b04
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
726 result, BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
727
728#ifdef IGU_DEBUG
729#warning IGU_DEBUG active
730 if (result == 0) {
731 BNX2X_ERR("read %x from IGU\n", result);
732 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
733 }
734#endif
735 return result;
736}
737
738
739/*
740 * fast path service functions
741 */
742
743/* free skb in the packet ring at pos idx
744 * return idx of last bd freed
745 */
746static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
747 u16 idx)
748{
749 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
750 struct eth_tx_bd *tx_bd;
751 struct sk_buff *skb = tx_buf->skb;
34f80b04 752 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
753 int nbd;
754
755 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
756 idx, tx_buf, skb);
757
758 /* unmap first bd */
759 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
760 tx_bd = &fp->tx_desc_ring[bd_idx];
761 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
762 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
763
764 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 765 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
766#ifdef BNX2X_STOP_ON_ERROR
767 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 768 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
769 bnx2x_panic();
770 }
771#endif
772
773 /* Skip a parse bd and the TSO split header bd
774 since they have no mapping */
775 if (nbd)
776 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
777
778 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
779 ETH_TX_BD_FLAGS_TCP_CSUM |
780 ETH_TX_BD_FLAGS_SW_LSO)) {
781 if (--nbd)
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 tx_bd = &fp->tx_desc_ring[bd_idx];
784 /* is this a TSO split header bd? */
785 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
786 if (--nbd)
787 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
788 }
789 }
790
791 /* now free frags */
792 while (nbd > 0) {
793
794 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
795 tx_bd = &fp->tx_desc_ring[bd_idx];
796 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
797 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
798 if (--nbd)
799 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
800 }
801
802 /* release skb */
53e5e96e 803 WARN_ON(!skb);
a2fbb9ea
ET
804 dev_kfree_skb(skb);
805 tx_buf->first_bd = 0;
806 tx_buf->skb = NULL;
807
34f80b04 808 return new_cons;
a2fbb9ea
ET
809}
810
34f80b04 811static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 812{
34f80b04
EG
813 s16 used;
814 u16 prod;
815 u16 cons;
a2fbb9ea 816
34f80b04 817 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
818 prod = fp->tx_bd_prod;
819 cons = fp->tx_bd_cons;
820
34f80b04
EG
821 /* NUM_TX_RINGS = number of "next-page" entries
822 It will be used as a threshold */
823 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 824
34f80b04 825#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
826 WARN_ON(used < 0);
827 WARN_ON(used > fp->bp->tx_ring_size);
828 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 829#endif
a2fbb9ea 830
34f80b04 831 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
832}
833
834static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
835{
836 struct bnx2x *bp = fp->bp;
837 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
838 int done = 0;
839
840#ifdef BNX2X_STOP_ON_ERROR
841 if (unlikely(bp->panic))
842 return;
843#endif
844
845 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
846 sw_cons = fp->tx_pkt_cons;
847
848 while (sw_cons != hw_cons) {
849 u16 pkt_cons;
850
851 pkt_cons = TX_BD(sw_cons);
852
853 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
854
34f80b04 855 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
856 hw_cons, sw_cons, pkt_cons);
857
34f80b04 858/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
859 rmb();
860 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
861 }
862*/
863 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
864 sw_cons++;
865 done++;
866
867 if (done == work)
868 break;
869 }
870
871 fp->tx_pkt_cons = sw_cons;
872 fp->tx_bd_cons = bd_cons;
873
874 /* Need to make the tx_cons update visible to start_xmit()
875 * before checking for netif_queue_stopped(). Without the
876 * memory barrier, there is a small possibility that start_xmit()
877 * will miss it and cause the queue to be stopped forever.
878 */
879 smp_mb();
880
881 /* TBD need a thresh? */
882 if (unlikely(netif_queue_stopped(bp->dev))) {
883
884 netif_tx_lock(bp->dev);
885
886 if (netif_queue_stopped(bp->dev) &&
da5a662a 887 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
888 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
889 netif_wake_queue(bp->dev);
890
891 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
892 }
893}
894
895static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
896 union eth_rx_cqe *rr_cqe)
897{
898 struct bnx2x *bp = fp->bp;
899 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
900 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
901
34f80b04 902 DP(BNX2X_MSG_SP,
a2fbb9ea 903 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
904 FP_IDX(fp), cid, command, bp->state,
905 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
906
907 bp->spq_left++;
908
34f80b04 909 if (FP_IDX(fp)) {
a2fbb9ea
ET
910 switch (command | fp->state) {
911 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
912 BNX2X_FP_STATE_OPENING):
913 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
914 cid);
915 fp->state = BNX2X_FP_STATE_OPEN;
916 break;
917
918 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
919 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
920 cid);
921 fp->state = BNX2X_FP_STATE_HALTED;
922 break;
923
924 default:
34f80b04
EG
925 BNX2X_ERR("unexpected MC reply (%d) "
926 "fp->state is %x\n", command, fp->state);
927 break;
a2fbb9ea 928 }
34f80b04 929 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
930 return;
931 }
c14423fe 932
a2fbb9ea
ET
933 switch (command | bp->state) {
934 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
935 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
936 bp->state = BNX2X_STATE_OPEN;
937 break;
938
939 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
941 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
942 fp->state = BNX2X_FP_STATE_HALTED;
943 break;
944
a2fbb9ea 945 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 946 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 947 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
948 break;
949
950 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 952 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 953 bp->set_mac_pending = 0;
a2fbb9ea
ET
954 break;
955
49d66772 956 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 957 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
958 break;
959
a2fbb9ea 960 default:
34f80b04 961 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 962 command, bp->state);
34f80b04 963 break;
a2fbb9ea 964 }
34f80b04 965 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
966}
967
7a9b2557
VZ
968static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
969 struct bnx2x_fastpath *fp, u16 index)
970{
971 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
972 struct page *page = sw_buf->page;
973 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
974
975 /* Skip "next page" elements */
976 if (!page)
977 return;
978
979 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
980 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
981 __free_pages(page, PAGES_PER_SGE_SHIFT);
982
983 sw_buf->page = NULL;
984 sge->addr_hi = 0;
985 sge->addr_lo = 0;
986}
987
988static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
989 struct bnx2x_fastpath *fp, int last)
990{
991 int i;
992
993 for (i = 0; i < last; i++)
994 bnx2x_free_rx_sge(bp, fp, i);
995}
996
997static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
998 struct bnx2x_fastpath *fp, u16 index)
999{
1000 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1001 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1002 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1003 dma_addr_t mapping;
1004
1005 if (unlikely(page == NULL))
1006 return -ENOMEM;
1007
1008 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1009 PCI_DMA_FROMDEVICE);
8d8bb39b 1010 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1011 __free_pages(page, PAGES_PER_SGE_SHIFT);
1012 return -ENOMEM;
1013 }
1014
1015 sw_buf->page = page;
1016 pci_unmap_addr_set(sw_buf, mapping, mapping);
1017
1018 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1019 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1020
1021 return 0;
1022}
1023
a2fbb9ea
ET
1024static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1025 struct bnx2x_fastpath *fp, u16 index)
1026{
1027 struct sk_buff *skb;
1028 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1029 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1030 dma_addr_t mapping;
1031
1032 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1033 if (unlikely(skb == NULL))
1034 return -ENOMEM;
1035
1036 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1037 PCI_DMA_FROMDEVICE);
8d8bb39b 1038 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1039 dev_kfree_skb(skb);
1040 return -ENOMEM;
1041 }
1042
1043 rx_buf->skb = skb;
1044 pci_unmap_addr_set(rx_buf, mapping, mapping);
1045
1046 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1047 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1048
1049 return 0;
1050}
1051
1052/* note that we are not allocating a new skb,
1053 * we are just moving one from cons to prod
1054 * we are not creating a new mapping,
1055 * so there is no need to check for dma_mapping_error().
1056 */
1057static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1058 struct sk_buff *skb, u16 cons, u16 prod)
1059{
1060 struct bnx2x *bp = fp->bp;
1061 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1062 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1063 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1064 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1065
1066 pci_dma_sync_single_for_device(bp->pdev,
1067 pci_unmap_addr(cons_rx_buf, mapping),
1068 bp->rx_offset + RX_COPY_THRESH,
1069 PCI_DMA_FROMDEVICE);
1070
1071 prod_rx_buf->skb = cons_rx_buf->skb;
1072 pci_unmap_addr_set(prod_rx_buf, mapping,
1073 pci_unmap_addr(cons_rx_buf, mapping));
1074 *prod_bd = *cons_bd;
1075}
1076
7a9b2557
VZ
1077static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1078 u16 idx)
1079{
1080 u16 last_max = fp->last_max_sge;
1081
1082 if (SUB_S16(idx, last_max) > 0)
1083 fp->last_max_sge = idx;
1084}
1085
1086static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1087{
1088 int i, j;
1089
1090 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1091 int idx = RX_SGE_CNT * i - 1;
1092
1093 for (j = 0; j < 2; j++) {
1094 SGE_MASK_CLEAR_BIT(fp, idx);
1095 idx--;
1096 }
1097 }
1098}
1099
1100static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1101 struct eth_fast_path_rx_cqe *fp_cqe)
1102{
1103 struct bnx2x *bp = fp->bp;
1104 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1105 le16_to_cpu(fp_cqe->len_on_bd)) >>
1106 BCM_PAGE_SHIFT;
1107 u16 last_max, last_elem, first_elem;
1108 u16 delta = 0;
1109 u16 i;
1110
1111 if (!sge_len)
1112 return;
1113
1114 /* First mark all used pages */
1115 for (i = 0; i < sge_len; i++)
1116 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1117
1118 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1119 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1120
1121 /* Here we assume that the last SGE index is the biggest */
1122 prefetch((void *)(fp->sge_mask));
1123 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1124
1125 last_max = RX_SGE(fp->last_max_sge);
1126 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1127 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1128
1129 /* If ring is not full */
1130 if (last_elem + 1 != first_elem)
1131 last_elem++;
1132
1133 /* Now update the prod */
1134 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1135 if (likely(fp->sge_mask[i]))
1136 break;
1137
1138 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1139 delta += RX_SGE_MASK_ELEM_SZ;
1140 }
1141
1142 if (delta > 0) {
1143 fp->rx_sge_prod += delta;
1144 /* clear page-end entries */
1145 bnx2x_clear_sge_mask_next_elems(fp);
1146 }
1147
1148 DP(NETIF_MSG_RX_STATUS,
1149 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1150 fp->last_max_sge, fp->rx_sge_prod);
1151}
1152
1153static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1154{
1155 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1156 memset(fp->sge_mask, 0xff,
1157 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1158
1159 /* Clear the two last indeces in the page to 1:
1160 these are the indeces that correspond to the "next" element,
1161 hence will never be indicated and should be removed from
1162 the calculations. */
1163 bnx2x_clear_sge_mask_next_elems(fp);
1164}
1165
1166static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1167 struct sk_buff *skb, u16 cons, u16 prod)
1168{
1169 struct bnx2x *bp = fp->bp;
1170 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1171 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1172 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1173 dma_addr_t mapping;
1174
1175 /* move empty skb from pool to prod and map it */
1176 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1177 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1178 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1179 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1180
1181 /* move partial skb from cons to pool (don't unmap yet) */
1182 fp->tpa_pool[queue] = *cons_rx_buf;
1183
1184 /* mark bin state as start - print error if current state != stop */
1185 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1186 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1187
1188 fp->tpa_state[queue] = BNX2X_TPA_START;
1189
1190 /* point prod_bd to new skb */
1191 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1192 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1193
1194#ifdef BNX2X_STOP_ON_ERROR
1195 fp->tpa_queue_used |= (1 << queue);
1196#ifdef __powerpc64__
1197 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1198#else
1199 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1200#endif
1201 fp->tpa_queue_used);
1202#endif
1203}
1204
1205static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1206 struct sk_buff *skb,
1207 struct eth_fast_path_rx_cqe *fp_cqe,
1208 u16 cqe_idx)
1209{
1210 struct sw_rx_page *rx_pg, old_rx_pg;
1211 struct page *sge;
1212 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1213 u32 i, frag_len, frag_size, pages;
1214 int err;
1215 int j;
1216
1217 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1218 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1219
1220 /* This is needed in order to enable forwarding support */
1221 if (frag_size)
1222 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1223 max(frag_size, (u32)len_on_bd));
1224
1225#ifdef BNX2X_STOP_ON_ERROR
1226 if (pages > 8*PAGES_PER_SGE) {
1227 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1228 pages, cqe_idx);
1229 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1230 fp_cqe->pkt_len, len_on_bd);
1231 bnx2x_panic();
1232 return -EINVAL;
1233 }
1234#endif
1235
1236 /* Run through the SGL and compose the fragmented skb */
1237 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1238 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1239
1240 /* FW gives the indices of the SGE as if the ring is an array
1241 (meaning that "next" element will consume 2 indices) */
1242 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1243 rx_pg = &fp->rx_page_ring[sge_idx];
1244 sge = rx_pg->page;
1245 old_rx_pg = *rx_pg;
1246
1247 /* If we fail to allocate a substitute page, we simply stop
1248 where we are and drop the whole packet */
1249 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1250 if (unlikely(err)) {
66e855f3 1251 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1252 return err;
1253 }
1254
1255 /* Unmap the page as we r going to pass it to the stack */
1256 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1257 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1258
1259 /* Add one frag and update the appropriate fields in the skb */
1260 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1261
1262 skb->data_len += frag_len;
1263 skb->truesize += frag_len;
1264 skb->len += frag_len;
1265
1266 frag_size -= frag_len;
1267 }
1268
1269 return 0;
1270}
1271
1272static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1274 u16 cqe_idx)
1275{
1276 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1277 struct sk_buff *skb = rx_buf->skb;
1278 /* alloc new skb */
1279 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1280
1281 /* Unmap skb in the pool anyway, as we are going to change
1282 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1283 fails. */
1284 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1285 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1286
7a9b2557 1287 if (likely(new_skb)) {
66e855f3
YG
1288 /* fix ip xsum and give it to the stack */
1289 /* (no need to map the new skb) */
7a9b2557
VZ
1290
1291 prefetch(skb);
1292 prefetch(((char *)(skb)) + 128);
1293
7a9b2557
VZ
1294#ifdef BNX2X_STOP_ON_ERROR
1295 if (pad + len > bp->rx_buf_size) {
1296 BNX2X_ERR("skb_put is about to fail... "
1297 "pad %d len %d rx_buf_size %d\n",
1298 pad, len, bp->rx_buf_size);
1299 bnx2x_panic();
1300 return;
1301 }
1302#endif
1303
1304 skb_reserve(skb, pad);
1305 skb_put(skb, len);
1306
1307 skb->protocol = eth_type_trans(skb, bp->dev);
1308 skb->ip_summed = CHECKSUM_UNNECESSARY;
1309
1310 {
1311 struct iphdr *iph;
1312
1313 iph = (struct iphdr *)skb->data;
1314 iph->check = 0;
1315 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1316 }
1317
1318 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1319 &cqe->fast_path_cqe, cqe_idx)) {
1320#ifdef BCM_VLAN
1321 if ((bp->vlgrp != NULL) &&
1322 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1323 PARSING_FLAGS_VLAN))
1324 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1325 le16_to_cpu(cqe->fast_path_cqe.
1326 vlan_tag));
1327 else
1328#endif
1329 netif_receive_skb(skb);
1330 } else {
1331 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1332 " - dropping packet!\n");
1333 dev_kfree_skb(skb);
1334 }
1335
1336 bp->dev->last_rx = jiffies;
1337
1338 /* put new skb in bin */
1339 fp->tpa_pool[queue].skb = new_skb;
1340
1341 } else {
66e855f3 1342 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1343 DP(NETIF_MSG_RX_STATUS,
1344 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1345 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1346 }
1347
1348 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1349}
1350
1351static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1352 struct bnx2x_fastpath *fp,
1353 u16 bd_prod, u16 rx_comp_prod,
1354 u16 rx_sge_prod)
1355{
1356 struct tstorm_eth_rx_producers rx_prods = {0};
1357 int i;
1358
1359 /* Update producers */
1360 rx_prods.bd_prod = bd_prod;
1361 rx_prods.cqe_prod = rx_comp_prod;
1362 rx_prods.sge_prod = rx_sge_prod;
1363
1364 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1365 REG_WR(bp, BAR_TSTRORM_INTMEM +
1366 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1367 ((u32 *)&rx_prods)[i]);
1368
1369 DP(NETIF_MSG_RX_STATUS,
1370 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1371 bd_prod, rx_comp_prod, rx_sge_prod);
1372}
1373
a2fbb9ea
ET
1374static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1375{
1376 struct bnx2x *bp = fp->bp;
34f80b04 1377 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1378 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1379 int rx_pkt = 0;
7a9b2557 1380 u16 queue;
a2fbb9ea
ET
1381
1382#ifdef BNX2X_STOP_ON_ERROR
1383 if (unlikely(bp->panic))
1384 return 0;
1385#endif
1386
34f80b04
EG
1387 /* CQ "next element" is of the size of the regular element,
1388 that's why it's ok here */
a2fbb9ea
ET
1389 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1390 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1391 hw_comp_cons++;
1392
1393 bd_cons = fp->rx_bd_cons;
1394 bd_prod = fp->rx_bd_prod;
34f80b04 1395 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1396 sw_comp_cons = fp->rx_comp_cons;
1397 sw_comp_prod = fp->rx_comp_prod;
1398
1399 /* Memory barrier necessary as speculative reads of the rx
1400 * buffer can be ahead of the index in the status block
1401 */
1402 rmb();
1403
1404 DP(NETIF_MSG_RX_STATUS,
1405 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1406 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1407
1408 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1409 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1410 struct sk_buff *skb;
1411 union eth_rx_cqe *cqe;
34f80b04
EG
1412 u8 cqe_fp_flags;
1413 u16 len, pad;
a2fbb9ea
ET
1414
1415 comp_ring_cons = RCQ_BD(sw_comp_cons);
1416 bd_prod = RX_BD(bd_prod);
1417 bd_cons = RX_BD(bd_cons);
1418
1419 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1420 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1421
a2fbb9ea 1422 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1423 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1424 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1425 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1426 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1427 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1428
1429 /* is this a slowpath msg? */
34f80b04 1430 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1431 bnx2x_sp_event(fp, cqe);
1432 goto next_cqe;
1433
1434 /* this is an rx packet */
1435 } else {
1436 rx_buf = &fp->rx_buf_ring[bd_cons];
1437 skb = rx_buf->skb;
a2fbb9ea
ET
1438 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1439 pad = cqe->fast_path_cqe.placement_offset;
1440
7a9b2557
VZ
1441 /* If CQE is marked both TPA_START and TPA_END
1442 it is a non-TPA CQE */
1443 if ((!fp->disable_tpa) &&
1444 (TPA_TYPE(cqe_fp_flags) !=
1445 (TPA_TYPE_START | TPA_TYPE_END))) {
1446 queue = cqe->fast_path_cqe.queue_index;
1447
1448 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1449 DP(NETIF_MSG_RX_STATUS,
1450 "calling tpa_start on queue %d\n",
1451 queue);
1452
1453 bnx2x_tpa_start(fp, queue, skb,
1454 bd_cons, bd_prod);
1455 goto next_rx;
1456 }
1457
1458 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1459 DP(NETIF_MSG_RX_STATUS,
1460 "calling tpa_stop on queue %d\n",
1461 queue);
1462
1463 if (!BNX2X_RX_SUM_FIX(cqe))
1464 BNX2X_ERR("STOP on none TCP "
1465 "data\n");
1466
1467 /* This is a size of the linear data
1468 on this skb */
1469 len = le16_to_cpu(cqe->fast_path_cqe.
1470 len_on_bd);
1471 bnx2x_tpa_stop(bp, fp, queue, pad,
1472 len, cqe, comp_ring_cons);
1473#ifdef BNX2X_STOP_ON_ERROR
1474 if (bp->panic)
1475 return -EINVAL;
1476#endif
1477
1478 bnx2x_update_sge_prod(fp,
1479 &cqe->fast_path_cqe);
1480 goto next_cqe;
1481 }
1482 }
1483
a2fbb9ea
ET
1484 pci_dma_sync_single_for_device(bp->pdev,
1485 pci_unmap_addr(rx_buf, mapping),
1486 pad + RX_COPY_THRESH,
1487 PCI_DMA_FROMDEVICE);
1488 prefetch(skb);
1489 prefetch(((char *)(skb)) + 128);
1490
1491 /* is this an error packet? */
34f80b04 1492 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1493 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1494 "ERROR flags %x rx packet %u\n",
1495 cqe_fp_flags, sw_comp_cons);
66e855f3 1496 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1497 goto reuse_rx;
1498 }
1499
1500 /* Since we don't have a jumbo ring
1501 * copy small packets if mtu > 1500
1502 */
1503 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1504 (len <= RX_COPY_THRESH)) {
1505 struct sk_buff *new_skb;
1506
1507 new_skb = netdev_alloc_skb(bp->dev,
1508 len + pad);
1509 if (new_skb == NULL) {
1510 DP(NETIF_MSG_RX_ERR,
34f80b04 1511 "ERROR packet dropped "
a2fbb9ea 1512 "because of alloc failure\n");
66e855f3 1513 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1514 goto reuse_rx;
1515 }
1516
1517 /* aligned copy */
1518 skb_copy_from_linear_data_offset(skb, pad,
1519 new_skb->data + pad, len);
1520 skb_reserve(new_skb, pad);
1521 skb_put(new_skb, len);
1522
1523 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1524
1525 skb = new_skb;
1526
1527 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1528 pci_unmap_single(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 bp->rx_buf_use_size,
1531 PCI_DMA_FROMDEVICE);
1532 skb_reserve(skb, pad);
1533 skb_put(skb, len);
1534
1535 } else {
1536 DP(NETIF_MSG_RX_ERR,
34f80b04 1537 "ERROR packet dropped because "
a2fbb9ea 1538 "of alloc failure\n");
66e855f3 1539 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1540reuse_rx:
1541 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1542 goto next_rx;
1543 }
1544
1545 skb->protocol = eth_type_trans(skb, bp->dev);
1546
1547 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1548 if (bp->rx_csum) {
1adcd8be
EG
1549 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1551 else
1552 bp->eth_stats.hw_csum_err++;
1553 }
a2fbb9ea
ET
1554 }
1555
1556#ifdef BCM_VLAN
34f80b04
EG
1557 if ((bp->vlgrp != NULL) &&
1558 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1559 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1560 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1561 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1562 else
1563#endif
34f80b04 1564 netif_receive_skb(skb);
a2fbb9ea
ET
1565
1566 bp->dev->last_rx = jiffies;
1567
1568next_rx:
1569 rx_buf->skb = NULL;
1570
1571 bd_cons = NEXT_RX_IDX(bd_cons);
1572 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1573 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1574 rx_pkt++;
a2fbb9ea
ET
1575next_cqe:
1576 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1577 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1578
34f80b04 1579 if (rx_pkt == budget)
a2fbb9ea
ET
1580 break;
1581 } /* while */
1582
1583 fp->rx_bd_cons = bd_cons;
34f80b04 1584 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1585 fp->rx_comp_cons = sw_comp_cons;
1586 fp->rx_comp_prod = sw_comp_prod;
1587
7a9b2557
VZ
1588 /* Update producers */
1589 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1590 fp->rx_sge_prod);
a2fbb9ea
ET
1591 mmiowb(); /* keep prod updates ordered */
1592
1593 fp->rx_pkt += rx_pkt;
1594 fp->rx_calls++;
1595
1596 return rx_pkt;
1597}
1598
1599static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1600{
1601 struct bnx2x_fastpath *fp = fp_cookie;
1602 struct bnx2x *bp = fp->bp;
1603 struct net_device *dev = bp->dev;
34f80b04 1604 int index = FP_IDX(fp);
a2fbb9ea 1605
da5a662a
VZ
1606 /* Return here if interrupt is disabled */
1607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1609 return IRQ_HANDLED;
1610 }
1611
34f80b04
EG
1612 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1613 index, FP_SB_ID(fp));
1614 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1615
1616#ifdef BNX2X_STOP_ON_ERROR
1617 if (unlikely(bp->panic))
1618 return IRQ_HANDLED;
1619#endif
1620
1621 prefetch(fp->rx_cons_sb);
1622 prefetch(fp->tx_cons_sb);
1623 prefetch(&fp->status_blk->c_status_block.status_block_index);
1624 prefetch(&fp->status_blk->u_status_block.status_block_index);
1625
1626 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1627
a2fbb9ea
ET
1628 return IRQ_HANDLED;
1629}
1630
1631static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1632{
1633 struct net_device *dev = dev_instance;
1634 struct bnx2x *bp = netdev_priv(dev);
1635 u16 status = bnx2x_ack_int(bp);
34f80b04 1636 u16 mask;
a2fbb9ea 1637
34f80b04 1638 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1639 if (unlikely(status == 0)) {
1640 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1641 return IRQ_NONE;
1642 }
34f80b04 1643 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1644
1645#ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1647 return IRQ_HANDLED;
1648#endif
1649
34f80b04 1650 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1651 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1652 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1653 return IRQ_HANDLED;
1654 }
1655
34f80b04
EG
1656 mask = 0x2 << bp->fp[0].sb_id;
1657 if (status & mask) {
a2fbb9ea
ET
1658 struct bnx2x_fastpath *fp = &bp->fp[0];
1659
1660 prefetch(fp->rx_cons_sb);
1661 prefetch(fp->tx_cons_sb);
1662 prefetch(&fp->status_blk->c_status_block.status_block_index);
1663 prefetch(&fp->status_blk->u_status_block.status_block_index);
1664
1665 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1666
34f80b04 1667 status &= ~mask;
a2fbb9ea
ET
1668 }
1669
a2fbb9ea 1670
34f80b04 1671 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1672 schedule_work(&bp->sp_task);
1673
1674 status &= ~0x1;
1675 if (!status)
1676 return IRQ_HANDLED;
1677 }
1678
34f80b04
EG
1679 if (status)
1680 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1681 status);
a2fbb9ea 1682
c18487ee 1683 return IRQ_HANDLED;
a2fbb9ea
ET
1684}
1685
c18487ee 1686/* end of fast path */
a2fbb9ea 1687
bb2a0f7a 1688static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1689
c18487ee
YR
1690/* Link */
1691
1692/*
1693 * General service functions
1694 */
a2fbb9ea 1695
4a37fb66 1696static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1697{
1698 u32 lock_status;
1699 u32 resource_bit = (1 << resource);
4a37fb66
YG
1700 int func = BP_FUNC(bp);
1701 u32 hw_lock_control_reg;
c18487ee 1702 int cnt;
a2fbb9ea 1703
c18487ee
YR
1704 /* Validating that the resource is within range */
1705 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1706 DP(NETIF_MSG_HW,
1707 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1708 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1709 return -EINVAL;
1710 }
a2fbb9ea 1711
4a37fb66
YG
1712 if (func <= 5) {
1713 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1714 } else {
1715 hw_lock_control_reg =
1716 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1717 }
1718
c18487ee 1719 /* Validating that the resource is not already taken */
4a37fb66 1720 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1721 if (lock_status & resource_bit) {
1722 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1723 lock_status, resource_bit);
1724 return -EEXIST;
1725 }
a2fbb9ea 1726
c18487ee
YR
1727 /* Try for 1 second every 5ms */
1728 for (cnt = 0; cnt < 200; cnt++) {
1729 /* Try to acquire the lock */
4a37fb66
YG
1730 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1731 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1732 if (lock_status & resource_bit)
1733 return 0;
a2fbb9ea 1734
c18487ee 1735 msleep(5);
a2fbb9ea 1736 }
c18487ee
YR
1737 DP(NETIF_MSG_HW, "Timeout\n");
1738 return -EAGAIN;
1739}
a2fbb9ea 1740
4a37fb66 1741static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1742{
1743 u32 lock_status;
1744 u32 resource_bit = (1 << resource);
4a37fb66
YG
1745 int func = BP_FUNC(bp);
1746 u32 hw_lock_control_reg;
a2fbb9ea 1747
c18487ee
YR
1748 /* Validating that the resource is within range */
1749 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1750 DP(NETIF_MSG_HW,
1751 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1752 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1753 return -EINVAL;
1754 }
1755
4a37fb66
YG
1756 if (func <= 5) {
1757 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1758 } else {
1759 hw_lock_control_reg =
1760 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1761 }
1762
c18487ee 1763 /* Validating that the resource is currently taken */
4a37fb66 1764 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1765 if (!(lock_status & resource_bit)) {
1766 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1767 lock_status, resource_bit);
1768 return -EFAULT;
a2fbb9ea
ET
1769 }
1770
4a37fb66 1771 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1772 return 0;
1773}
1774
1775/* HW Lock for shared dual port PHYs */
4a37fb66 1776static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1777{
1778 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1779
34f80b04 1780 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1781
c18487ee
YR
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1785}
a2fbb9ea 1786
4a37fb66 1787static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1788{
1789 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1790
c18487ee
YR
1791 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1792 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1793 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1794
34f80b04 1795 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1796}
a2fbb9ea 1797
c18487ee
YR
1798int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1799{
1800 /* The GPIO should be swapped if swap register is set and active */
1801 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1802 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1803 int gpio_shift = gpio_num +
1804 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1805 u32 gpio_mask = (1 << gpio_shift);
1806 u32 gpio_reg;
a2fbb9ea 1807
c18487ee
YR
1808 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1809 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1810 return -EINVAL;
1811 }
a2fbb9ea 1812
4a37fb66 1813 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1814 /* read GPIO and mask except the float bits */
1815 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1816
c18487ee
YR
1817 switch (mode) {
1818 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1819 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1820 gpio_num, gpio_shift);
1821 /* clear FLOAT and set CLR */
1822 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1823 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1824 break;
a2fbb9ea 1825
c18487ee
YR
1826 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1827 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1828 gpio_num, gpio_shift);
1829 /* clear FLOAT and set SET */
1830 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1832 break;
a2fbb9ea 1833
c18487ee
YR
1834 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1835 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1836 gpio_num, gpio_shift);
1837 /* set FLOAT */
1838 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1839 break;
a2fbb9ea 1840
c18487ee
YR
1841 default:
1842 break;
a2fbb9ea
ET
1843 }
1844
c18487ee 1845 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1846 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1847
c18487ee 1848 return 0;
a2fbb9ea
ET
1849}
1850
c18487ee 1851static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1852{
c18487ee
YR
1853 u32 spio_mask = (1 << spio_num);
1854 u32 spio_reg;
a2fbb9ea 1855
c18487ee
YR
1856 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1857 (spio_num > MISC_REGISTERS_SPIO_7)) {
1858 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1859 return -EINVAL;
a2fbb9ea
ET
1860 }
1861
4a37fb66 1862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1863 /* read SPIO and mask except the float bits */
1864 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1865
c18487ee
YR
1866 switch (mode) {
1867 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1869 /* clear FLOAT and set CLR */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1872 break;
a2fbb9ea 1873
c18487ee
YR
1874 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1876 /* clear FLOAT and set SET */
1877 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1879 break;
a2fbb9ea 1880
c18487ee
YR
1881 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1882 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1883 /* set FLOAT */
1884 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1885 break;
a2fbb9ea 1886
c18487ee
YR
1887 default:
1888 break;
a2fbb9ea
ET
1889 }
1890
c18487ee 1891 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1892 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1893
a2fbb9ea
ET
1894 return 0;
1895}
1896
c18487ee 1897static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1898{
c18487ee
YR
1899 switch (bp->link_vars.ieee_fc) {
1900 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1901 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1902 ADVERTISED_Pause);
1903 break;
1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1905 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1909 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1910 break;
1911 default:
34f80b04 1912 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1913 ADVERTISED_Pause);
1914 break;
1915 }
1916}
f1410647 1917
c18487ee
YR
1918static void bnx2x_link_report(struct bnx2x *bp)
1919{
1920 if (bp->link_vars.link_up) {
1921 if (bp->state == BNX2X_STATE_OPEN)
1922 netif_carrier_on(bp->dev);
1923 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1924
c18487ee 1925 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1926
c18487ee
YR
1927 if (bp->link_vars.duplex == DUPLEX_FULL)
1928 printk("full duplex");
1929 else
1930 printk("half duplex");
f1410647 1931
c18487ee
YR
1932 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1933 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1934 printk(", receive ");
1935 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1936 printk("& transmit ");
1937 } else {
1938 printk(", transmit ");
1939 }
1940 printk("flow control ON");
1941 }
1942 printk("\n");
f1410647 1943
c18487ee
YR
1944 } else { /* link_down */
1945 netif_carrier_off(bp->dev);
1946 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1947 }
c18487ee
YR
1948}
1949
1950static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1951{
19680c48
EG
1952 if (!BP_NOMCP(bp)) {
1953 u8 rc;
a2fbb9ea 1954
19680c48
EG
1955 /* Initialize link parameters structure variables */
1956 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1957
4a37fb66 1958 bnx2x_acquire_phy_lock(bp);
19680c48 1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1960 bnx2x_release_phy_lock(bp);
a2fbb9ea 1961
19680c48
EG
1962 if (bp->link_vars.link_up)
1963 bnx2x_link_report(bp);
a2fbb9ea 1964
19680c48 1965 bnx2x_calc_fc_adv(bp);
34f80b04 1966
19680c48
EG
1967 return rc;
1968 }
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970 return -EINVAL;
a2fbb9ea
ET
1971}
1972
c18487ee 1973static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1974{
19680c48 1975 if (!BP_NOMCP(bp)) {
4a37fb66 1976 bnx2x_acquire_phy_lock(bp);
19680c48 1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1978 bnx2x_release_phy_lock(bp);
a2fbb9ea 1979
19680c48
EG
1980 bnx2x_calc_fc_adv(bp);
1981 } else
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1983}
a2fbb9ea 1984
c18487ee
YR
1985static void bnx2x__link_reset(struct bnx2x *bp)
1986{
19680c48 1987 if (!BP_NOMCP(bp)) {
4a37fb66 1988 bnx2x_acquire_phy_lock(bp);
19680c48 1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 1990 bnx2x_release_phy_lock(bp);
19680c48
EG
1991 } else
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1993}
a2fbb9ea 1994
c18487ee
YR
1995static u8 bnx2x_link_test(struct bnx2x *bp)
1996{
1997 u8 rc;
a2fbb9ea 1998
4a37fb66 1999 bnx2x_acquire_phy_lock(bp);
c18487ee 2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2001 bnx2x_release_phy_lock(bp);
a2fbb9ea 2002
c18487ee
YR
2003 return rc;
2004}
a2fbb9ea 2005
34f80b04
EG
2006/* Calculates the sum of vn_min_rates.
2007 It's needed for further normalizing of the min_rates.
2008
2009 Returns:
2010 sum of vn_min_rates
2011 or
2012 0 - if all the min_rates are 0.
2013 In the later case fainess algorithm should be deactivated.
2014 If not all min_rates are zero then those that are zeroes will
2015 be set to 1.
2016 */
2017static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2018{
2019 int i, port = BP_PORT(bp);
2020 u32 wsum = 0;
2021 int all_zero = 1;
2022
2023 for (i = 0; i < E1HVN_MAX; i++) {
2024 u32 vn_cfg =
2025 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029 /* If min rate is zero - set it to 1 */
2030 if (!vn_min_rate)
2031 vn_min_rate = DEF_MIN_RATE;
2032 else
2033 all_zero = 0;
2034
2035 wsum += vn_min_rate;
2036 }
2037 }
2038
2039 /* ... only if all min rates are zeros - disable FAIRNESS */
2040 if (all_zero)
2041 return 0;
2042
2043 return wsum;
2044}
2045
2046static void bnx2x_init_port_minmax(struct bnx2x *bp,
2047 int en_fness,
2048 u16 port_rate,
2049 struct cmng_struct_per_port *m_cmng_port)
2050{
2051 u32 r_param = port_rate / 8;
2052 int port = BP_PORT(bp);
2053 int i;
2054
2055 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2056
2057 /* Enable minmax only if we are in e1hmf mode */
2058 if (IS_E1HMF(bp)) {
2059 u32 fair_periodic_timeout_usec;
2060 u32 t_fair;
2061
2062 /* Enable rate shaping and fairness */
2063 m_cmng_port->flags.cmng_vn_enable = 1;
2064 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065 m_cmng_port->flags.rate_shaping_enable = 1;
2066
2067 if (!en_fness)
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2070
2071 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072 m_cmng_port->rs_vars.rs_periodic_timeout =
2073 RS_PERIODIC_TIMEOUT_USEC / 4;
2074
2075 /* this is the threshold below which no timer arming will occur
2076 1.25 coefficient is for the threshold to be a little bigger
2077 than the real time, to compensate for timer in-accuracy */
2078 m_cmng_port->rs_vars.rs_threshold =
2079 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2080
2081 /* resolution of fairness timer */
2082 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084 t_fair = T_FAIR_COEF / port_rate;
2085
2086 /* this is the threshold below which we won't arm
2087 the timer anymore */
2088 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2089
2090 /* we multiply by 1e3/8 to get bytes/msec.
2091 We don't want the credits to pass a credit
2092 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093 m_cmng_port->fair_vars.upper_bound =
2094 r_param * t_fair * FAIR_MEM;
2095 /* since each tick is 4 usec */
2096 m_cmng_port->fair_vars.fairness_timeout =
2097 fair_periodic_timeout_usec / 4;
2098
2099 } else {
2100 /* Disable rate shaping and fairness */
2101 m_cmng_port->flags.cmng_vn_enable = 0;
2102 m_cmng_port->flags.fairness_enable = 0;
2103 m_cmng_port->flags.rate_shaping_enable = 0;
2104
2105 DP(NETIF_MSG_IFUP,
2106 "Single function mode minmax will be disabled\n");
2107 }
2108
2109 /* Store it to internal memory */
2110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113 ((u32 *)(m_cmng_port))[i]);
2114}
2115
2116static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117 u32 wsum, u16 port_rate,
2118 struct cmng_struct_per_port *m_cmng_port)
2119{
2120 struct rate_shaping_vars_per_vn m_rs_vn;
2121 struct fairness_vars_per_vn m_fair_vn;
2122 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123 u16 vn_min_rate, vn_max_rate;
2124 int i;
2125
2126 /* If function is hidden - set min and max to zeroes */
2127 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2128 vn_min_rate = 0;
2129 vn_max_rate = 0;
2130
2131 } else {
2132 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135 if current min rate is zero - set it to 1.
2136 This is a requirment of the algorithm. */
2137 if ((vn_min_rate == 0) && wsum)
2138 vn_min_rate = DEF_MIN_RATE;
2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2141 }
2142
2143 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2144 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2145
2146 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2148
2149 /* global vn counter - maximal Mbps for this vn */
2150 m_rs_vn.vn_counter.rate = vn_max_rate;
2151
2152 /* quota - number of bytes transmitted in this period */
2153 m_rs_vn.vn_counter.quota =
2154 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2155
2156#ifdef BNX2X_PER_PROT_QOS
2157 /* per protocol counter */
2158 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159 /* maximal Mbps for this protocol */
2160 m_rs_vn.protocol_counters[protocol].rate =
2161 protocol_max_rate[protocol];
2162 /* the quota in each timer period -
2163 number of bytes transmitted in this period */
2164 m_rs_vn.protocol_counters[protocol].quota =
2165 (u32)(rs_periodic_timeout_usec *
2166 ((double)m_rs_vn.
2167 protocol_counters[protocol].rate/8));
2168 }
2169#endif
2170
2171 if (wsum) {
2172 /* credit for each period of the fairness algorithm:
2173 number of bytes in T_FAIR (the vn share the port rate).
2174 wsum should not be larger than 10000, thus
2175 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176 m_fair_vn.vn_credit_delta =
2177 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180 m_fair_vn.vn_credit_delta);
2181 }
2182
2183#ifdef BNX2X_PER_PROT_QOS
2184 do {
2185 u32 protocolWeightSum = 0;
2186
2187 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188 protocolWeightSum +=
2189 drvInit.protocol_min_rate[protocol];
2190 /* per protocol counter -
2191 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192 if (protocolWeightSum > 0) {
2193 for (protocol = 0;
2194 protocol < NUM_OF_PROTOCOLS; protocol++)
2195 /* credit for each period of the
2196 fairness algorithm - number of bytes in
2197 T_FAIR (the protocol share the vn rate) */
2198 m_fair_vn.protocol_credit_delta[protocol] =
2199 (u32)((vn_min_rate / 8) * t_fair *
2200 protocol_min_rate / protocolWeightSum);
2201 }
2202 } while (0);
2203#endif
2204
2205 /* Store it to internal memory */
2206 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209 ((u32 *)(&m_rs_vn))[i]);
2210
2211 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214 ((u32 *)(&m_fair_vn))[i]);
2215}
2216
c18487ee
YR
2217/* This function is called upon link interrupt */
2218static void bnx2x_link_attn(struct bnx2x *bp)
2219{
34f80b04
EG
2220 int vn;
2221
bb2a0f7a
YG
2222 /* Make sure that we are synced with the current statistics */
2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2224
4a37fb66 2225 bnx2x_acquire_phy_lock(bp);
c18487ee 2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2227 bnx2x_release_phy_lock(bp);
a2fbb9ea 2228
bb2a0f7a
YG
2229 if (bp->link_vars.link_up) {
2230
2231 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232 struct host_port_stats *pstats;
2233
2234 pstats = bnx2x_sp(bp, port_stats);
2235 /* reset old bmac stats */
2236 memset(&(pstats->mac_stx[0]), 0,
2237 sizeof(struct mac_stx));
2238 }
2239 if ((bp->state == BNX2X_STATE_OPEN) ||
2240 (bp->state == BNX2X_STATE_DISABLED))
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242 }
2243
c18487ee
YR
2244 /* indicate link status */
2245 bnx2x_link_report(bp);
34f80b04
EG
2246
2247 if (IS_E1HMF(bp)) {
2248 int func;
2249
2250 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251 if (vn == BP_E1HVN(bp))
2252 continue;
2253
2254 func = ((vn << 1) | BP_PORT(bp));
2255
2256 /* Set the attention towards other drivers
2257 on the same port */
2258 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2260 }
2261 }
2262
2263 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264 struct cmng_struct_per_port m_cmng_port;
2265 u32 wsum;
2266 int port = BP_PORT(bp);
2267
2268 /* Init RATE SHAPING and FAIRNESS contexts */
2269 wsum = bnx2x_calc_vn_wsum(bp);
2270 bnx2x_init_port_minmax(bp, (int)wsum,
2271 bp->link_vars.line_speed,
2272 &m_cmng_port);
2273 if (IS_E1HMF(bp))
2274 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276 wsum, bp->link_vars.line_speed,
2277 &m_cmng_port);
2278 }
c18487ee 2279}
a2fbb9ea 2280
c18487ee
YR
2281static void bnx2x__link_status_update(struct bnx2x *bp)
2282{
2283 if (bp->state != BNX2X_STATE_OPEN)
2284 return;
a2fbb9ea 2285
c18487ee 2286 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2287
bb2a0f7a
YG
2288 if (bp->link_vars.link_up)
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290 else
2291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2292
c18487ee
YR
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
a2fbb9ea 2295}
a2fbb9ea 2296
34f80b04
EG
2297static void bnx2x_pmf_update(struct bnx2x *bp)
2298{
2299 int port = BP_PORT(bp);
2300 u32 val;
2301
2302 bp->port.pmf = 1;
2303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2304
2305 /* enable nig attention */
2306 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2309
2310 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2311}
2312
c18487ee 2313/* end of Link */
a2fbb9ea
ET
2314
2315/* slow path */
2316
2317/*
2318 * General service functions
2319 */
2320
2321/* the slow path queue is odd since completions arrive on the fastpath ring */
2322static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323 u32 data_hi, u32 data_lo, int common)
2324{
34f80b04 2325 int func = BP_FUNC(bp);
a2fbb9ea 2326
34f80b04
EG
2327 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2329 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2332
2333#ifdef BNX2X_STOP_ON_ERROR
2334 if (unlikely(bp->panic))
2335 return -EIO;
2336#endif
2337
34f80b04 2338 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2339
2340 if (!bp->spq_left) {
2341 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2342 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2343 bnx2x_panic();
2344 return -EBUSY;
2345 }
f1410647 2346
a2fbb9ea
ET
2347 /* CID needs port number to be encoded int it */
2348 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2350 HW_CID(bp, cid)));
2351 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2352 if (common)
2353 bp->spq_prod_bd->hdr.type |=
2354 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2355
2356 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2358
2359 bp->spq_left--;
2360
2361 if (bp->spq_prod_bd == bp->spq_last_bd) {
2362 bp->spq_prod_bd = bp->spq;
2363 bp->spq_prod_idx = 0;
2364 DP(NETIF_MSG_TIMER, "end of spq\n");
2365
2366 } else {
2367 bp->spq_prod_bd++;
2368 bp->spq_prod_idx++;
2369 }
2370
34f80b04 2371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2372 bp->spq_prod_idx);
2373
34f80b04 2374 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2375 return 0;
2376}
2377
2378/* acquire split MCP access lock register */
4a37fb66 2379static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2380{
a2fbb9ea 2381 u32 i, j, val;
34f80b04 2382 int rc = 0;
a2fbb9ea
ET
2383
2384 might_sleep();
2385 i = 100;
2386 for (j = 0; j < i*10; j++) {
2387 val = (1UL << 31);
2388 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390 if (val & (1L << 31))
2391 break;
2392
2393 msleep(5);
2394 }
a2fbb9ea 2395 if (!(val & (1L << 31))) {
19680c48 2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2397 rc = -EBUSY;
2398 }
2399
2400 return rc;
2401}
2402
4a37fb66
YG
2403/* release split MCP access lock register */
2404static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2405{
2406 u32 val = 0;
2407
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409}
2410
2411static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2412{
2413 struct host_def_status_block *def_sb = bp->def_status_blk;
2414 u16 rc = 0;
2415
2416 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2419 rc |= 1;
2420 }
2421 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2423 rc |= 2;
2424 }
2425 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2427 rc |= 4;
2428 }
2429 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2431 rc |= 8;
2432 }
2433 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435 rc |= 16;
2436 }
2437 return rc;
2438}
2439
2440/*
2441 * slow path service functions
2442 */
2443
2444static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2445{
34f80b04
EG
2446 int port = BP_PORT(bp);
2447 int func = BP_FUNC(bp);
2448 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
a2fbb9ea
ET
2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2453 u32 aeu_mask;
a2fbb9ea 2454
a2fbb9ea
ET
2455 if (bp->attn_state & asserted)
2456 BNX2X_ERR("IGU ERROR\n");
2457
3fcaf2e5
EG
2458 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459 aeu_mask = REG_RD(bp, aeu_addr);
2460
a2fbb9ea 2461 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2462 aeu_mask, asserted);
2463 aeu_mask &= ~(asserted & 0xff);
2464 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2465
3fcaf2e5
EG
2466 REG_WR(bp, aeu_addr, aeu_mask);
2467 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2468
3fcaf2e5 2469 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2470 bp->attn_state |= asserted;
3fcaf2e5 2471 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2472
2473 if (asserted & ATTN_HARD_WIRED_MASK) {
2474 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2475
877e9aa4
ET
2476 /* save nig interrupt mask */
2477 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2479
c18487ee 2480 bnx2x_link_attn(bp);
a2fbb9ea
ET
2481
2482 /* handle unicore attn? */
2483 }
2484 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2486
2487 if (asserted & GPIO_2_FUNC)
2488 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2489
2490 if (asserted & GPIO_3_FUNC)
2491 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2492
2493 if (asserted & GPIO_4_FUNC)
2494 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2495
2496 if (port == 0) {
2497 if (asserted & ATTN_GENERAL_ATTN_1) {
2498 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2500 }
2501 if (asserted & ATTN_GENERAL_ATTN_2) {
2502 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2504 }
2505 if (asserted & ATTN_GENERAL_ATTN_3) {
2506 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2508 }
2509 } else {
2510 if (asserted & ATTN_GENERAL_ATTN_4) {
2511 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2513 }
2514 if (asserted & ATTN_GENERAL_ATTN_5) {
2515 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2517 }
2518 if (asserted & ATTN_GENERAL_ATTN_6) {
2519 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2521 }
2522 }
2523
2524 } /* if hardwired */
2525
2526 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2527 asserted, BAR_IGU_INTMEM + igu_addr);
2528 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2529
2530 /* now set back the mask */
2531 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2532 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2533}
2534
877e9aa4 2535static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2536{
34f80b04 2537 int port = BP_PORT(bp);
877e9aa4
ET
2538 int reg_offset;
2539 u32 val;
2540
34f80b04
EG
2541 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2543
34f80b04 2544 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2545
2546 val = REG_RD(bp, reg_offset);
2547 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548 REG_WR(bp, reg_offset, val);
2549
2550 BNX2X_ERR("SPIO5 hw attention\n");
2551
34f80b04 2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554 /* Fan failure attention */
2555
2556 /* The PHY reset is controled by GPIO 1 */
2557 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2558 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2559 /* Low power mode is controled by GPIO 2 */
2560 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2561 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2562 /* mark the failure */
c18487ee 2563 bp->link_params.ext_phy_config &=
877e9aa4 2564 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2565 bp->link_params.ext_phy_config |=
877e9aa4
ET
2566 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2567 SHMEM_WR(bp,
2568 dev_info.port_hw_config[port].
2569 external_phy_config,
c18487ee 2570 bp->link_params.ext_phy_config);
877e9aa4
ET
2571 /* log the failure */
2572 printk(KERN_ERR PFX "Fan Failure on Network"
2573 " Controller %s has caused the driver to"
2574 " shutdown the card to prevent permanent"
2575 " damage. Please contact Dell Support for"
2576 " assistance\n", bp->dev->name);
2577 break;
2578
2579 default:
2580 break;
2581 }
2582 }
34f80b04
EG
2583
2584 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2585
2586 val = REG_RD(bp, reg_offset);
2587 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2588 REG_WR(bp, reg_offset, val);
2589
2590 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2591 (attn & HW_INTERRUT_ASSERT_SET_0));
2592 bnx2x_panic();
2593 }
877e9aa4
ET
2594}
2595
2596static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2597{
2598 u32 val;
2599
2600 if (attn & BNX2X_DOORQ_ASSERT) {
2601
2602 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2603 BNX2X_ERR("DB hw attention 0x%x\n", val);
2604 /* DORQ discard attention */
2605 if (val & 0x2)
2606 BNX2X_ERR("FATAL error from DORQ\n");
2607 }
34f80b04
EG
2608
2609 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2610
2611 int port = BP_PORT(bp);
2612 int reg_offset;
2613
2614 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2615 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2616
2617 val = REG_RD(bp, reg_offset);
2618 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2619 REG_WR(bp, reg_offset, val);
2620
2621 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2622 (attn & HW_INTERRUT_ASSERT_SET_1));
2623 bnx2x_panic();
2624 }
877e9aa4
ET
2625}
2626
2627static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2628{
2629 u32 val;
2630
2631 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2632
2633 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2634 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2635 /* CFC error attention */
2636 if (val & 0x2)
2637 BNX2X_ERR("FATAL error from CFC\n");
2638 }
2639
2640 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2641
2642 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2643 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2644 /* RQ_USDMDP_FIFO_OVERFLOW */
2645 if (val & 0x18000)
2646 BNX2X_ERR("FATAL error from PXP\n");
2647 }
34f80b04
EG
2648
2649 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2650
2651 int port = BP_PORT(bp);
2652 int reg_offset;
2653
2654 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2655 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2656
2657 val = REG_RD(bp, reg_offset);
2658 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2659 REG_WR(bp, reg_offset, val);
2660
2661 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2662 (attn & HW_INTERRUT_ASSERT_SET_2));
2663 bnx2x_panic();
2664 }
877e9aa4
ET
2665}
2666
2667static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2668{
34f80b04
EG
2669 u32 val;
2670
877e9aa4
ET
2671 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2672
34f80b04
EG
2673 if (attn & BNX2X_PMF_LINK_ASSERT) {
2674 int func = BP_FUNC(bp);
2675
2676 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2677 bnx2x__link_status_update(bp);
2678 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2679 DRV_STATUS_PMF)
2680 bnx2x_pmf_update(bp);
2681
2682 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2683
2684 BNX2X_ERR("MC assert!\n");
2685 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2686 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2689 bnx2x_panic();
2690
2691 } else if (attn & BNX2X_MCP_ASSERT) {
2692
2693 BNX2X_ERR("MCP assert!\n");
2694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2695 bnx2x_fw_dump(bp);
877e9aa4
ET
2696
2697 } else
2698 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2699 }
2700
2701 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2702 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2703 if (attn & BNX2X_GRC_TIMEOUT) {
2704 val = CHIP_IS_E1H(bp) ?
2705 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2706 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2707 }
2708 if (attn & BNX2X_GRC_RSV) {
2709 val = CHIP_IS_E1H(bp) ?
2710 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2711 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2712 }
877e9aa4 2713 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2714 }
2715}
2716
2717static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2718{
a2fbb9ea
ET
2719 struct attn_route attn;
2720 struct attn_route group_mask;
34f80b04 2721 int port = BP_PORT(bp);
877e9aa4 2722 int index;
a2fbb9ea
ET
2723 u32 reg_addr;
2724 u32 val;
3fcaf2e5 2725 u32 aeu_mask;
a2fbb9ea
ET
2726
2727 /* need to take HW lock because MCP or other port might also
2728 try to handle this event */
4a37fb66 2729 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2730
2731 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2732 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2733 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2734 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2735 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2736 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2737
2738 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2739 if (deasserted & (1 << index)) {
2740 group_mask = bp->attn_group[index];
2741
34f80b04
EG
2742 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2743 index, group_mask.sig[0], group_mask.sig[1],
2744 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2745
877e9aa4
ET
2746 bnx2x_attn_int_deasserted3(bp,
2747 attn.sig[3] & group_mask.sig[3]);
2748 bnx2x_attn_int_deasserted1(bp,
2749 attn.sig[1] & group_mask.sig[1]);
2750 bnx2x_attn_int_deasserted2(bp,
2751 attn.sig[2] & group_mask.sig[2]);
2752 bnx2x_attn_int_deasserted0(bp,
2753 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2754
a2fbb9ea
ET
2755 if ((attn.sig[0] & group_mask.sig[0] &
2756 HW_PRTY_ASSERT_SET_0) ||
2757 (attn.sig[1] & group_mask.sig[1] &
2758 HW_PRTY_ASSERT_SET_1) ||
2759 (attn.sig[2] & group_mask.sig[2] &
2760 HW_PRTY_ASSERT_SET_2))
877e9aa4 2761 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2762 }
2763 }
2764
4a37fb66 2765 bnx2x_release_alr(bp);
a2fbb9ea 2766
34f80b04 2767 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
2768
2769 val = ~deasserted;
3fcaf2e5
EG
2770 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2771 val, reg_addr);
a2fbb9ea
ET
2772 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2773
a2fbb9ea 2774 if (~bp->attn_state & deasserted)
3fcaf2e5 2775 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2776
2777 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2778 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2779
3fcaf2e5
EG
2780 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2781 aeu_mask = REG_RD(bp, reg_addr);
2782
2783 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2784 aeu_mask, deasserted);
2785 aeu_mask |= (deasserted & 0xff);
2786 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2787
3fcaf2e5
EG
2788 REG_WR(bp, reg_addr, aeu_mask);
2789 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2790
2791 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2792 bp->attn_state &= ~deasserted;
2793 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2794}
2795
2796static void bnx2x_attn_int(struct bnx2x *bp)
2797{
2798 /* read local copy of bits */
2799 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2800 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2801 u32 attn_state = bp->attn_state;
2802
2803 /* look for changed bits */
2804 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2805 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2806
2807 DP(NETIF_MSG_HW,
2808 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2809 attn_bits, attn_ack, asserted, deasserted);
2810
2811 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2812 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2813
2814 /* handle bits that were raised */
2815 if (asserted)
2816 bnx2x_attn_int_asserted(bp, asserted);
2817
2818 if (deasserted)
2819 bnx2x_attn_int_deasserted(bp, deasserted);
2820}
2821
2822static void bnx2x_sp_task(struct work_struct *work)
2823{
2824 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2825 u16 status;
2826
34f80b04 2827
a2fbb9ea
ET
2828 /* Return here if interrupt is disabled */
2829 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2830 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2831 return;
2832 }
2833
2834 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2835/* if (status == 0) */
2836/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2837
34f80b04 2838 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2839
877e9aa4
ET
2840 /* HW attentions */
2841 if (status & 0x1)
a2fbb9ea 2842 bnx2x_attn_int(bp);
a2fbb9ea 2843
bb2a0f7a
YG
2844 /* CStorm events: query_stats, port delete ramrod */
2845 if (status & 0x2)
2846 bp->stats_pending = 0;
2847
a2fbb9ea
ET
2848 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2849 IGU_INT_NOP, 1);
2850 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2851 IGU_INT_NOP, 1);
2852 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2853 IGU_INT_NOP, 1);
2854 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2855 IGU_INT_NOP, 1);
2856 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2857 IGU_INT_ENABLE, 1);
877e9aa4 2858
a2fbb9ea
ET
2859}
2860
2861static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2862{
2863 struct net_device *dev = dev_instance;
2864 struct bnx2x *bp = netdev_priv(dev);
2865
2866 /* Return here if interrupt is disabled */
2867 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2868 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2869 return IRQ_HANDLED;
2870 }
2871
877e9aa4 2872 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2873
2874#ifdef BNX2X_STOP_ON_ERROR
2875 if (unlikely(bp->panic))
2876 return IRQ_HANDLED;
2877#endif
2878
2879 schedule_work(&bp->sp_task);
2880
2881 return IRQ_HANDLED;
2882}
2883
2884/* end of slow path */
2885
2886/* Statistics */
2887
2888/****************************************************************************
2889* Macros
2890****************************************************************************/
2891
a2fbb9ea
ET
2892/* sum[hi:lo] += add[hi:lo] */
2893#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2894 do { \
2895 s_lo += a_lo; \
2896 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2897 } while (0)
2898
2899/* difference = minuend - subtrahend */
2900#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2901 do { \
bb2a0f7a
YG
2902 if (m_lo < s_lo) { \
2903 /* underflow */ \
a2fbb9ea 2904 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2905 if (d_hi > 0) { \
2906 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2907 d_hi--; \
2908 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2909 } else { \
2910 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2911 d_hi = 0; \
2912 d_lo = 0; \
2913 } \
bb2a0f7a
YG
2914 } else { \
2915 /* m_lo >= s_lo */ \
a2fbb9ea 2916 if (m_hi < s_hi) { \
bb2a0f7a
YG
2917 d_hi = 0; \
2918 d_lo = 0; \
2919 } else { \
2920 /* m_hi >= s_hi */ \
2921 d_hi = m_hi - s_hi; \
2922 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2923 } \
2924 } \
2925 } while (0)
2926
bb2a0f7a 2927#define UPDATE_STAT64(s, t) \
a2fbb9ea 2928 do { \
bb2a0f7a
YG
2929 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2930 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2931 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2932 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2933 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2934 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2935 } while (0)
2936
bb2a0f7a 2937#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2938 do { \
bb2a0f7a
YG
2939 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2940 diff.lo, new->s##_lo, old->s##_lo); \
2941 ADD_64(estats->t##_hi, diff.hi, \
2942 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2943 } while (0)
2944
2945/* sum[hi:lo] += add */
2946#define ADD_EXTEND_64(s_hi, s_lo, a) \
2947 do { \
2948 s_lo += a; \
2949 s_hi += (s_lo < a) ? 1 : 0; \
2950 } while (0)
2951
bb2a0f7a 2952#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2953 do { \
bb2a0f7a
YG
2954 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2955 pstats->mac_stx[1].s##_lo, \
2956 new->s); \
a2fbb9ea
ET
2957 } while (0)
2958
bb2a0f7a 2959#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2960 do { \
2961 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2962 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2963 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964 } while (0)
2965
2966#define UPDATE_EXTEND_XSTAT(s, t) \
2967 do { \
2968 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2969 old_xclient->s = le32_to_cpu(xclient->s); \
2970 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2971 } while (0)
2972
2973/*
2974 * General service functions
2975 */
2976
2977static inline long bnx2x_hilo(u32 *hiref)
2978{
2979 u32 lo = *(hiref + 1);
2980#if (BITS_PER_LONG == 64)
2981 u32 hi = *hiref;
2982
2983 return HILO_U64(hi, lo);
2984#else
2985 return lo;
2986#endif
2987}
2988
2989/*
2990 * Init service functions
2991 */
2992
bb2a0f7a
YG
2993static void bnx2x_storm_stats_post(struct bnx2x *bp)
2994{
2995 if (!bp->stats_pending) {
2996 struct eth_query_ramrod_data ramrod_data = {0};
2997 int rc;
2998
2999 ramrod_data.drv_counter = bp->stats_counter++;
3000 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3001 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3002
3003 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3004 ((u32 *)&ramrod_data)[1],
3005 ((u32 *)&ramrod_data)[0], 0);
3006 if (rc == 0) {
3007 /* stats ramrod has it's own slot on the spq */
3008 bp->spq_left++;
3009 bp->stats_pending = 1;
3010 }
3011 }
3012}
3013
3014static void bnx2x_stats_init(struct bnx2x *bp)
3015{
3016 int port = BP_PORT(bp);
3017
3018 bp->executer_idx = 0;
3019 bp->stats_counter = 0;
3020
3021 /* port stats */
3022 if (!BP_NOMCP(bp))
3023 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3024 else
3025 bp->port.port_stx = 0;
3026 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3027
3028 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3029 bp->port.old_nig_stats.brb_discard =
3030 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3031 bp->port.old_nig_stats.brb_truncate =
3032 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3033 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3034 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3037
3038 /* function stats */
3039 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3040 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3041 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3042 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3043
3044 bp->stats_state = STATS_STATE_DISABLED;
3045 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3046 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3047}
3048
3049static void bnx2x_hw_stats_post(struct bnx2x *bp)
3050{
3051 struct dmae_command *dmae = &bp->stats_dmae;
3052 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3053
3054 *stats_comp = DMAE_COMP_VAL;
3055
3056 /* loader */
3057 if (bp->executer_idx) {
3058 int loader_idx = PMF_DMAE_C(bp);
3059
3060 memset(dmae, 0, sizeof(struct dmae_command));
3061
3062 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3063 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3064 DMAE_CMD_DST_RESET |
3065#ifdef __BIG_ENDIAN
3066 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3067#else
3068 DMAE_CMD_ENDIANITY_DW_SWAP |
3069#endif
3070 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3071 DMAE_CMD_PORT_0) |
3072 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3073 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3074 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3075 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3076 sizeof(struct dmae_command) *
3077 (loader_idx + 1)) >> 2;
3078 dmae->dst_addr_hi = 0;
3079 dmae->len = sizeof(struct dmae_command) >> 2;
3080 if (CHIP_IS_E1(bp))
3081 dmae->len--;
3082 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3083 dmae->comp_addr_hi = 0;
3084 dmae->comp_val = 1;
3085
3086 *stats_comp = 0;
3087 bnx2x_post_dmae(bp, dmae, loader_idx);
3088
3089 } else if (bp->func_stx) {
3090 *stats_comp = 0;
3091 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3092 }
3093}
3094
3095static int bnx2x_stats_comp(struct bnx2x *bp)
3096{
3097 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098 int cnt = 10;
3099
3100 might_sleep();
3101 while (*stats_comp != DMAE_COMP_VAL) {
3102 msleep(1);
3103 if (!cnt) {
3104 BNX2X_ERR("timeout waiting for stats finished\n");
3105 break;
3106 }
3107 cnt--;
3108 }
3109 return 1;
3110}
3111
3112/*
3113 * Statistics service functions
3114 */
3115
3116static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3117{
3118 struct dmae_command *dmae;
3119 u32 opcode;
3120 int loader_idx = PMF_DMAE_C(bp);
3121 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123 /* sanity */
3124 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3125 BNX2X_ERR("BUG!\n");
3126 return;
3127 }
3128
3129 bp->executer_idx = 0;
3130
3131 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3132 DMAE_CMD_C_ENABLE |
3133 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3134#ifdef __BIG_ENDIAN
3135 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3136#else
3137 DMAE_CMD_ENDIANITY_DW_SWAP |
3138#endif
3139 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3140 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3141
3142 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3143 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3144 dmae->src_addr_lo = bp->port.port_stx >> 2;
3145 dmae->src_addr_hi = 0;
3146 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3147 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3148 dmae->len = DMAE_LEN32_RD_MAX;
3149 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3150 dmae->comp_addr_hi = 0;
3151 dmae->comp_val = 1;
3152
3153 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3155 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3156 dmae->src_addr_hi = 0;
7a9b2557
VZ
3157 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3158 DMAE_LEN32_RD_MAX * 4);
3159 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3160 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3161 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3162 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3163 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3164 dmae->comp_val = DMAE_COMP_VAL;
3165
3166 *stats_comp = 0;
3167 bnx2x_hw_stats_post(bp);
3168 bnx2x_stats_comp(bp);
3169}
3170
3171static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3172{
3173 struct dmae_command *dmae;
34f80b04 3174 int port = BP_PORT(bp);
bb2a0f7a 3175 int vn = BP_E1HVN(bp);
a2fbb9ea 3176 u32 opcode;
bb2a0f7a 3177 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3178 u32 mac_addr;
bb2a0f7a
YG
3179 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180
3181 /* sanity */
3182 if (!bp->link_vars.link_up || !bp->port.pmf) {
3183 BNX2X_ERR("BUG!\n");
3184 return;
3185 }
a2fbb9ea
ET
3186
3187 bp->executer_idx = 0;
bb2a0f7a
YG
3188
3189 /* MCP */
3190 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3191 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3192 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3193#ifdef __BIG_ENDIAN
bb2a0f7a 3194 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3195#else
bb2a0f7a 3196 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3197#endif
bb2a0f7a
YG
3198 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3199 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3200
bb2a0f7a 3201 if (bp->port.port_stx) {
a2fbb9ea
ET
3202
3203 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3204 dmae->opcode = opcode;
bb2a0f7a
YG
3205 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3206 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3207 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3208 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3209 dmae->len = sizeof(struct host_port_stats) >> 2;
3210 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3211 dmae->comp_addr_hi = 0;
3212 dmae->comp_val = 1;
a2fbb9ea
ET
3213 }
3214
bb2a0f7a
YG
3215 if (bp->func_stx) {
3216
3217 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3218 dmae->opcode = opcode;
3219 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3220 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3221 dmae->dst_addr_lo = bp->func_stx >> 2;
3222 dmae->dst_addr_hi = 0;
3223 dmae->len = sizeof(struct host_func_stats) >> 2;
3224 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3225 dmae->comp_addr_hi = 0;
3226 dmae->comp_val = 1;
a2fbb9ea
ET
3227 }
3228
bb2a0f7a 3229 /* MAC */
a2fbb9ea
ET
3230 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3231 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3232 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3233#ifdef __BIG_ENDIAN
3234 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3235#else
3236 DMAE_CMD_ENDIANITY_DW_SWAP |
3237#endif
bb2a0f7a
YG
3238 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3239 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3240
c18487ee 3241 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3242
3243 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3244 NIG_REG_INGRESS_BMAC0_MEM);
3245
3246 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3247 BIGMAC_REGISTER_TX_STAT_GTBYT */
3248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 dmae->opcode = opcode;
3250 dmae->src_addr_lo = (mac_addr +
3251 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252 dmae->src_addr_hi = 0;
3253 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3254 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3255 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3256 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3257 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3258 dmae->comp_addr_hi = 0;
3259 dmae->comp_val = 1;
3260
3261 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3262 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264 dmae->opcode = opcode;
3265 dmae->src_addr_lo = (mac_addr +
3266 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267 dmae->src_addr_hi = 0;
3268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3269 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3270 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3271 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3272 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3273 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3274 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3275 dmae->comp_addr_hi = 0;
3276 dmae->comp_val = 1;
3277
c18487ee 3278 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3279
3280 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3281
3282 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3283 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3284 dmae->opcode = opcode;
3285 dmae->src_addr_lo = (mac_addr +
3286 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3287 dmae->src_addr_hi = 0;
3288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3290 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3291 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292 dmae->comp_addr_hi = 0;
3293 dmae->comp_val = 1;
3294
3295 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3296 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297 dmae->opcode = opcode;
3298 dmae->src_addr_lo = (mac_addr +
3299 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3300 dmae->src_addr_hi = 0;
3301 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3302 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3303 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3304 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3305 dmae->len = 1;
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3308 dmae->comp_val = 1;
3309
3310 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3311 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312 dmae->opcode = opcode;
3313 dmae->src_addr_lo = (mac_addr +
3314 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3315 dmae->src_addr_hi = 0;
3316 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3317 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3318 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3319 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3320 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3321 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322 dmae->comp_addr_hi = 0;
3323 dmae->comp_val = 1;
3324 }
3325
3326 /* NIG */
bb2a0f7a
YG
3327 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328 dmae->opcode = opcode;
3329 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3330 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3331 dmae->src_addr_hi = 0;
3332 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3333 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3334 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3335 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336 dmae->comp_addr_hi = 0;
3337 dmae->comp_val = 1;
3338
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3342 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3346 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348 dmae->len = (2*sizeof(u32)) >> 2;
3349 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3350 dmae->comp_addr_hi = 0;
3351 dmae->comp_val = 1;
3352
a2fbb9ea
ET
3353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3355 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3356 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3357#ifdef __BIG_ENDIAN
3358 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3359#else
3360 DMAE_CMD_ENDIANITY_DW_SWAP |
3361#endif
bb2a0f7a
YG
3362 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3363 (vn << DMAE_CMD_E1HVN_SHIFT));
3364 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3365 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3366 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3367 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3368 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3369 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3370 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371 dmae->len = (2*sizeof(u32)) >> 2;
3372 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3373 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3374 dmae->comp_val = DMAE_COMP_VAL;
3375
3376 *stats_comp = 0;
a2fbb9ea
ET
3377}
3378
bb2a0f7a 3379static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3380{
bb2a0f7a
YG
3381 struct dmae_command *dmae = &bp->stats_dmae;
3382 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3383
bb2a0f7a
YG
3384 /* sanity */
3385 if (!bp->func_stx) {
3386 BNX2X_ERR("BUG!\n");
3387 return;
3388 }
a2fbb9ea 3389
bb2a0f7a
YG
3390 bp->executer_idx = 0;
3391 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3392
bb2a0f7a
YG
3393 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3394 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3395 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3396#ifdef __BIG_ENDIAN
3397 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3398#else
3399 DMAE_CMD_ENDIANITY_DW_SWAP |
3400#endif
3401 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3402 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3404 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3405 dmae->dst_addr_lo = bp->func_stx >> 2;
3406 dmae->dst_addr_hi = 0;
3407 dmae->len = sizeof(struct host_func_stats) >> 2;
3408 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3409 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3410 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3411
bb2a0f7a
YG
3412 *stats_comp = 0;
3413}
a2fbb9ea 3414
bb2a0f7a
YG
3415static void bnx2x_stats_start(struct bnx2x *bp)
3416{
3417 if (bp->port.pmf)
3418 bnx2x_port_stats_init(bp);
3419
3420 else if (bp->func_stx)
3421 bnx2x_func_stats_init(bp);
3422
3423 bnx2x_hw_stats_post(bp);
3424 bnx2x_storm_stats_post(bp);
3425}
3426
3427static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3428{
3429 bnx2x_stats_comp(bp);
3430 bnx2x_stats_pmf_update(bp);
3431 bnx2x_stats_start(bp);
3432}
3433
3434static void bnx2x_stats_restart(struct bnx2x *bp)
3435{
3436 bnx2x_stats_comp(bp);
3437 bnx2x_stats_start(bp);
3438}
3439
3440static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3441{
3442 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3443 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3444 struct regpair diff;
3445
3446 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3447 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3448 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3449 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3450 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3451 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3452 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3453 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3455 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3457 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3458 UPDATE_STAT64(tx_stat_gt127,
3459 tx_stat_etherstatspkts65octetsto127octets);
3460 UPDATE_STAT64(tx_stat_gt255,
3461 tx_stat_etherstatspkts128octetsto255octets);
3462 UPDATE_STAT64(tx_stat_gt511,
3463 tx_stat_etherstatspkts256octetsto511octets);
3464 UPDATE_STAT64(tx_stat_gt1023,
3465 tx_stat_etherstatspkts512octetsto1023octets);
3466 UPDATE_STAT64(tx_stat_gt1518,
3467 tx_stat_etherstatspkts1024octetsto1522octets);
3468 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3469 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3470 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3471 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3472 UPDATE_STAT64(tx_stat_gterr,
3473 tx_stat_dot3statsinternalmactransmiterrors);
3474 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3475}
3476
3477static void bnx2x_emac_stats_update(struct bnx2x *bp)
3478{
3479 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3480 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3481
3482 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3483 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3484 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3485 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3487 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3488 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3489 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3490 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3492 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3493 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3494 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3495 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3496 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3497 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3498 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3499 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3500 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3501 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3505 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3513}
3514
3515static int bnx2x_hw_stats_update(struct bnx2x *bp)
3516{
3517 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3518 struct nig_stats *old = &(bp->port.old_nig_stats);
3519 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3521 struct regpair diff;
3522
3523 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3524 bnx2x_bmac_stats_update(bp);
3525
3526 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3527 bnx2x_emac_stats_update(bp);
3528
3529 else { /* unreached */
3530 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3531 return -1;
3532 }
a2fbb9ea 3533
bb2a0f7a
YG
3534 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3535 new->brb_discard - old->brb_discard);
66e855f3
YG
3536 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3537 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3538
bb2a0f7a
YG
3539 UPDATE_STAT64_NIG(egress_mac_pkt0,
3540 etherstatspkts1024octetsto1522octets);
3541 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3542
bb2a0f7a 3543 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3544
bb2a0f7a
YG
3545 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3546 sizeof(struct mac_stx));
3547 estats->brb_drop_hi = pstats->brb_drop_hi;
3548 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3549
bb2a0f7a 3550 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3551
bb2a0f7a 3552 return 0;
a2fbb9ea
ET
3553}
3554
bb2a0f7a 3555static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3556{
3557 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3558 int cl_id = BP_CL_ID(bp);
3559 struct tstorm_per_port_stats *tport =
3560 &stats->tstorm_common.port_statistics;
a2fbb9ea 3561 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3562 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3563 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3564 struct xstorm_per_client_stats *xclient =
3565 &stats->xstorm_common.client_statistics[cl_id];
3566 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3567 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3568 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3569 u32 diff;
3570
bb2a0f7a
YG
3571 /* are storm stats valid? */
3572 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3573 bp->stats_counter) {
3574 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3575 " tstorm counter (%d) != stats_counter (%d)\n",
3576 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3577 return -1;
3578 }
bb2a0f7a
YG
3579 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3580 bp->stats_counter) {
3581 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3582 " xstorm counter (%d) != stats_counter (%d)\n",
3583 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3584 return -2;
3585 }
a2fbb9ea 3586
bb2a0f7a
YG
3587 fstats->total_bytes_received_hi =
3588 fstats->valid_bytes_received_hi =
a2fbb9ea 3589 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3590 fstats->total_bytes_received_lo =
3591 fstats->valid_bytes_received_lo =
a2fbb9ea 3592 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3593
3594 estats->error_bytes_received_hi =
3595 le32_to_cpu(tclient->rcv_error_bytes.hi);
3596 estats->error_bytes_received_lo =
3597 le32_to_cpu(tclient->rcv_error_bytes.lo);
3598 ADD_64(estats->error_bytes_received_hi,
3599 estats->rx_stat_ifhcinbadoctets_hi,
3600 estats->error_bytes_received_lo,
3601 estats->rx_stat_ifhcinbadoctets_lo);
3602
3603 ADD_64(fstats->total_bytes_received_hi,
3604 estats->error_bytes_received_hi,
3605 fstats->total_bytes_received_lo,
3606 estats->error_bytes_received_lo);
3607
3608 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3609 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3610 total_multicast_packets_received);
a2fbb9ea 3611 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3612 total_broadcast_packets_received);
3613
3614 fstats->total_bytes_transmitted_hi =
3615 le32_to_cpu(xclient->total_sent_bytes.hi);
3616 fstats->total_bytes_transmitted_lo =
3617 le32_to_cpu(xclient->total_sent_bytes.lo);
3618
3619 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3620 total_unicast_packets_transmitted);
3621 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3622 total_multicast_packets_transmitted);
3623 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3624 total_broadcast_packets_transmitted);
3625
3626 memcpy(estats, &(fstats->total_bytes_received_hi),
3627 sizeof(struct host_func_stats) - 2*sizeof(u32));
3628
3629 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3630 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3631 estats->brb_truncate_discard =
3632 le32_to_cpu(tport->brb_truncate_discard);
3633 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3634
3635 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3636 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3637 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3638 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3639 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3640 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3641 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3642 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3643 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3644 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3645 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3646 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3647 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3648
bb2a0f7a
YG
3649 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3650 old_tclient->packets_too_big_discard =
a2fbb9ea 3651 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3652 estats->no_buff_discard =
3653 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3654 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3655
3656 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3657 old_xclient->unicast_bytes_sent.hi =
3658 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3659 old_xclient->unicast_bytes_sent.lo =
3660 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3661 old_xclient->multicast_bytes_sent.hi =
3662 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3663 old_xclient->multicast_bytes_sent.lo =
3664 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3665 old_xclient->broadcast_bytes_sent.hi =
3666 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3667 old_xclient->broadcast_bytes_sent.lo =
3668 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3669
3670 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3671
3672 return 0;
3673}
3674
bb2a0f7a 3675static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3676{
bb2a0f7a
YG
3677 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3678 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3679 struct net_device_stats *nstats = &bp->dev->stats;
3680
3681 nstats->rx_packets =
3682 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3683 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3684 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3685
3686 nstats->tx_packets =
3687 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3688 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3689 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3690
bb2a0f7a 3691 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3692
0e39e645 3693 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3694
bb2a0f7a
YG
3695 nstats->rx_dropped = old_tclient->checksum_discard +
3696 estats->mac_discard;
a2fbb9ea
ET
3697 nstats->tx_dropped = 0;
3698
3699 nstats->multicast =
3700 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3701
bb2a0f7a
YG
3702 nstats->collisions =
3703 estats->tx_stat_dot3statssinglecollisionframes_lo +
3704 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3705 estats->tx_stat_dot3statslatecollisions_lo +
3706 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3707
bb2a0f7a
YG
3708 estats->jabber_packets_received =
3709 old_tclient->packets_too_big_discard +
3710 estats->rx_stat_dot3statsframestoolong_lo;
3711
3712 nstats->rx_length_errors =
3713 estats->rx_stat_etherstatsundersizepkts_lo +
3714 estats->jabber_packets_received;
66e855f3 3715 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3716 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3717 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3718 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3719 nstats->rx_missed_errors = estats->xxoverflow_discard;
3720
3721 nstats->rx_errors = nstats->rx_length_errors +
3722 nstats->rx_over_errors +
3723 nstats->rx_crc_errors +
3724 nstats->rx_frame_errors +
0e39e645
ET
3725 nstats->rx_fifo_errors +
3726 nstats->rx_missed_errors;
a2fbb9ea 3727
bb2a0f7a
YG
3728 nstats->tx_aborted_errors =
3729 estats->tx_stat_dot3statslatecollisions_lo +
3730 estats->tx_stat_dot3statsexcessivecollisions_lo;
3731 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3732 nstats->tx_fifo_errors = 0;
3733 nstats->tx_heartbeat_errors = 0;
3734 nstats->tx_window_errors = 0;
3735
3736 nstats->tx_errors = nstats->tx_aborted_errors +
3737 nstats->tx_carrier_errors;
a2fbb9ea
ET
3738}
3739
bb2a0f7a 3740static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3741{
bb2a0f7a
YG
3742 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3743 int update = 0;
a2fbb9ea 3744
bb2a0f7a
YG
3745 if (*stats_comp != DMAE_COMP_VAL)
3746 return;
3747
3748 if (bp->port.pmf)
3749 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3750
bb2a0f7a 3751 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3752
bb2a0f7a
YG
3753 if (update)
3754 bnx2x_net_stats_update(bp);
a2fbb9ea 3755
bb2a0f7a
YG
3756 else {
3757 if (bp->stats_pending) {
3758 bp->stats_pending++;
3759 if (bp->stats_pending == 3) {
3760 BNX2X_ERR("stats not updated for 3 times\n");
3761 bnx2x_panic();
3762 return;
3763 }
3764 }
a2fbb9ea
ET
3765 }
3766
3767 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3768 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3769 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3770 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3771 int i;
a2fbb9ea
ET
3772
3773 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3774 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3775 " tx pkt (%lx)\n",
3776 bnx2x_tx_avail(bp->fp),
7a9b2557 3777 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3778 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3779 " rx pkt (%lx)\n",
7a9b2557
VZ
3780 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3781 bp->fp->rx_comp_cons),
3782 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea
ET
3783 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3784 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3785 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3786 printk(KERN_DEBUG "tstats: checksum_discard %u "
3787 "packets_too_big_discard %u no_buff_discard %u "
3788 "mac_discard %u mac_filter_discard %u "
3789 "xxovrflow_discard %u brb_truncate_discard %u "
3790 "ttl0_discard %u\n",
bb2a0f7a
YG
3791 old_tclient->checksum_discard,
3792 old_tclient->packets_too_big_discard,
3793 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3794 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3795 estats->brb_truncate_discard,
3796 old_tclient->ttl0_discard);
a2fbb9ea
ET
3797
3798 for_each_queue(bp, i) {
3799 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3800 bnx2x_fp(bp, i, tx_pkt),
3801 bnx2x_fp(bp, i, rx_pkt),
3802 bnx2x_fp(bp, i, rx_calls));
3803 }
3804 }
3805
bb2a0f7a
YG
3806 bnx2x_hw_stats_post(bp);
3807 bnx2x_storm_stats_post(bp);
3808}
a2fbb9ea 3809
bb2a0f7a
YG
3810static void bnx2x_port_stats_stop(struct bnx2x *bp)
3811{
3812 struct dmae_command *dmae;
3813 u32 opcode;
3814 int loader_idx = PMF_DMAE_C(bp);
3815 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3816
bb2a0f7a 3817 bp->executer_idx = 0;
a2fbb9ea 3818
bb2a0f7a
YG
3819 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3820 DMAE_CMD_C_ENABLE |
3821 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3822#ifdef __BIG_ENDIAN
bb2a0f7a 3823 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3824#else
bb2a0f7a 3825 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3826#endif
bb2a0f7a
YG
3827 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3828 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3829
3830 if (bp->port.port_stx) {
3831
3832 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3833 if (bp->func_stx)
3834 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3835 else
3836 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3837 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3838 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3839 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3840 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3841 dmae->len = sizeof(struct host_port_stats) >> 2;
3842 if (bp->func_stx) {
3843 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3844 dmae->comp_addr_hi = 0;
3845 dmae->comp_val = 1;
3846 } else {
3847 dmae->comp_addr_lo =
3848 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3849 dmae->comp_addr_hi =
3850 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3851 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3852
bb2a0f7a
YG
3853 *stats_comp = 0;
3854 }
a2fbb9ea
ET
3855 }
3856
bb2a0f7a
YG
3857 if (bp->func_stx) {
3858
3859 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3861 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3862 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3863 dmae->dst_addr_lo = bp->func_stx >> 2;
3864 dmae->dst_addr_hi = 0;
3865 dmae->len = sizeof(struct host_func_stats) >> 2;
3866 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3867 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3868 dmae->comp_val = DMAE_COMP_VAL;
3869
3870 *stats_comp = 0;
a2fbb9ea 3871 }
bb2a0f7a
YG
3872}
3873
3874static void bnx2x_stats_stop(struct bnx2x *bp)
3875{
3876 int update = 0;
3877
3878 bnx2x_stats_comp(bp);
3879
3880 if (bp->port.pmf)
3881 update = (bnx2x_hw_stats_update(bp) == 0);
3882
3883 update |= (bnx2x_storm_stats_update(bp) == 0);
3884
3885 if (update) {
3886 bnx2x_net_stats_update(bp);
a2fbb9ea 3887
bb2a0f7a
YG
3888 if (bp->port.pmf)
3889 bnx2x_port_stats_stop(bp);
3890
3891 bnx2x_hw_stats_post(bp);
3892 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3893 }
3894}
3895
bb2a0f7a
YG
3896static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3897{
3898}
3899
3900static const struct {
3901 void (*action)(struct bnx2x *bp);
3902 enum bnx2x_stats_state next_state;
3903} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3904/* state event */
3905{
3906/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3907/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3908/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3909/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3910},
3911{
3912/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3913/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3914/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3915/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3916}
3917};
3918
3919static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3920{
3921 enum bnx2x_stats_state state = bp->stats_state;
3922
3923 bnx2x_stats_stm[state][event].action(bp);
3924 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3925
3926 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3927 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3928 state, event, bp->stats_state);
3929}
3930
a2fbb9ea
ET
3931static void bnx2x_timer(unsigned long data)
3932{
3933 struct bnx2x *bp = (struct bnx2x *) data;
3934
3935 if (!netif_running(bp->dev))
3936 return;
3937
3938 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3939 goto timer_restart;
a2fbb9ea
ET
3940
3941 if (poll) {
3942 struct bnx2x_fastpath *fp = &bp->fp[0];
3943 int rc;
3944
3945 bnx2x_tx_int(fp, 1000);
3946 rc = bnx2x_rx_int(fp, 1000);
3947 }
3948
34f80b04
EG
3949 if (!BP_NOMCP(bp)) {
3950 int func = BP_FUNC(bp);
a2fbb9ea
ET
3951 u32 drv_pulse;
3952 u32 mcp_pulse;
3953
3954 ++bp->fw_drv_pulse_wr_seq;
3955 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3956 /* TBD - add SYSTEM_TIME */
3957 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3958 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3959
34f80b04 3960 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3961 MCP_PULSE_SEQ_MASK);
3962 /* The delta between driver pulse and mcp response
3963 * should be 1 (before mcp response) or 0 (after mcp response)
3964 */
3965 if ((drv_pulse != mcp_pulse) &&
3966 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3967 /* someone lost a heartbeat... */
3968 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3969 drv_pulse, mcp_pulse);
3970 }
3971 }
3972
bb2a0f7a
YG
3973 if ((bp->state == BNX2X_STATE_OPEN) ||
3974 (bp->state == BNX2X_STATE_DISABLED))
3975 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3976
f1410647 3977timer_restart:
a2fbb9ea
ET
3978 mod_timer(&bp->timer, jiffies + bp->current_interval);
3979}
3980
3981/* end of Statistics */
3982
3983/* nic init */
3984
3985/*
3986 * nic init service functions
3987 */
3988
34f80b04 3989static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3990{
34f80b04
EG
3991 int port = BP_PORT(bp);
3992
3993 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3994 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3995 sizeof(struct ustorm_def_status_block)/4);
3996 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3997 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3998 sizeof(struct cstorm_def_status_block)/4);
3999}
4000
4001static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
4002 struct host_status_block *sb, dma_addr_t mapping)
4003{
4004 int port = BP_PORT(bp);
bb2a0f7a 4005 int func = BP_FUNC(bp);
a2fbb9ea 4006 int index;
34f80b04 4007 u64 section;
a2fbb9ea
ET
4008
4009 /* USTORM */
4010 section = ((u64)mapping) + offsetof(struct host_status_block,
4011 u_status_block);
34f80b04 4012 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4013
4014 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4015 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4016 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4017 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4018 U64_HI(section));
bb2a0f7a
YG
4019 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4020 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4021
4022 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4023 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4024 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4025
4026 /* CSTORM */
4027 section = ((u64)mapping) + offsetof(struct host_status_block,
4028 c_status_block);
34f80b04 4029 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4030
4031 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4032 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4033 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4034 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4035 U64_HI(section));
7a9b2557
VZ
4036 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4037 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4038
4039 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4040 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4041 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4042
4043 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4044}
4045
4046static void bnx2x_zero_def_sb(struct bnx2x *bp)
4047{
4048 int func = BP_FUNC(bp);
a2fbb9ea 4049
34f80b04
EG
4050 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4051 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052 sizeof(struct ustorm_def_status_block)/4);
4053 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4054 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4055 sizeof(struct cstorm_def_status_block)/4);
4056 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4057 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4058 sizeof(struct xstorm_def_status_block)/4);
4059 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4060 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4061 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4062}
4063
4064static void bnx2x_init_def_sb(struct bnx2x *bp,
4065 struct host_def_status_block *def_sb,
34f80b04 4066 dma_addr_t mapping, int sb_id)
a2fbb9ea 4067{
34f80b04
EG
4068 int port = BP_PORT(bp);
4069 int func = BP_FUNC(bp);
a2fbb9ea
ET
4070 int index, val, reg_offset;
4071 u64 section;
4072
4073 /* ATTN */
4074 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4075 atten_status_block);
34f80b04 4076 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4077
49d66772
ET
4078 bp->def_att_idx = 0;
4079 bp->attn_state = 0;
4080
a2fbb9ea
ET
4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4082 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4083
34f80b04 4084 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4085 bp->attn_group[index].sig[0] = REG_RD(bp,
4086 reg_offset + 0x10*index);
4087 bp->attn_group[index].sig[1] = REG_RD(bp,
4088 reg_offset + 0x4 + 0x10*index);
4089 bp->attn_group[index].sig[2] = REG_RD(bp,
4090 reg_offset + 0x8 + 0x10*index);
4091 bp->attn_group[index].sig[3] = REG_RD(bp,
4092 reg_offset + 0xc + 0x10*index);
4093 }
4094
a2fbb9ea
ET
4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4096 HC_REG_ATTN_MSG0_ADDR_L);
4097
4098 REG_WR(bp, reg_offset, U64_LO(section));
4099 REG_WR(bp, reg_offset + 4, U64_HI(section));
4100
4101 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4102
4103 val = REG_RD(bp, reg_offset);
34f80b04 4104 val |= sb_id;
a2fbb9ea
ET
4105 REG_WR(bp, reg_offset, val);
4106
4107 /* USTORM */
4108 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4109 u_def_status_block);
34f80b04 4110 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4111
49d66772
ET
4112 bp->def_u_idx = 0;
4113
a2fbb9ea 4114 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4115 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4116 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4117 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4118 U64_HI(section));
34f80b04
EG
4119 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4120 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4121 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4122 BNX2X_BTR);
4123
4124 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4125 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4126 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4127
4128 /* CSTORM */
4129 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4130 c_def_status_block);
34f80b04 4131 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea 4132
49d66772
ET
4133 bp->def_c_idx = 0;
4134
a2fbb9ea 4135 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4136 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4137 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4138 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4139 U64_HI(section));
34f80b04
EG
4140 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4141 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4142 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4143 BNX2X_BTR);
4144
4145 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4146 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4147 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4148
4149 /* TSTORM */
4150 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4151 t_def_status_block);
34f80b04 4152 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea 4153
49d66772
ET
4154 bp->def_t_idx = 0;
4155
a2fbb9ea 4156 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4157 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4158 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4159 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4160 U64_HI(section));
34f80b04
EG
4161 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4162 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4163 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4164 BNX2X_BTR);
4165
4166 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4167 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4168 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4169
4170 /* XSTORM */
4171 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4172 x_def_status_block);
34f80b04 4173 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea 4174
49d66772
ET
4175 bp->def_x_idx = 0;
4176
a2fbb9ea 4177 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4178 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4179 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4180 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4181 U64_HI(section));
34f80b04
EG
4182 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4183 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4184 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4185 BNX2X_BTR);
4186
4187 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4188 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4189 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4190
bb2a0f7a 4191 bp->stats_pending = 0;
66e855f3 4192 bp->set_mac_pending = 0;
bb2a0f7a 4193
34f80b04 4194 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4195}
4196
4197static void bnx2x_update_coalesce(struct bnx2x *bp)
4198{
34f80b04 4199 int port = BP_PORT(bp);
a2fbb9ea
ET
4200 int i;
4201
4202 for_each_queue(bp, i) {
34f80b04 4203 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4204
4205 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4206 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4207 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4208 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4209 bp->rx_ticks/12);
a2fbb9ea 4210 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4211 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4212 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4213 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4214
4215 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4216 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4217 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4218 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4219 bp->tx_ticks/12);
a2fbb9ea 4220 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4221 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4222 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4223 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4224 }
4225}
4226
7a9b2557
VZ
4227static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4228 struct bnx2x_fastpath *fp, int last)
4229{
4230 int i;
4231
4232 for (i = 0; i < last; i++) {
4233 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4234 struct sk_buff *skb = rx_buf->skb;
4235
4236 if (skb == NULL) {
4237 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4238 continue;
4239 }
4240
4241 if (fp->tpa_state[i] == BNX2X_TPA_START)
4242 pci_unmap_single(bp->pdev,
4243 pci_unmap_addr(rx_buf, mapping),
4244 bp->rx_buf_use_size,
4245 PCI_DMA_FROMDEVICE);
4246
4247 dev_kfree_skb(skb);
4248 rx_buf->skb = NULL;
4249 }
4250}
4251
a2fbb9ea
ET
4252static void bnx2x_init_rx_rings(struct bnx2x *bp)
4253{
7a9b2557 4254 int func = BP_FUNC(bp);
32626230
EG
4255 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4256 ETH_MAX_AGGREGATION_QUEUES_E1H;
4257 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4258 int i, j;
a2fbb9ea
ET
4259
4260 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4261 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4262 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4263
7a9b2557
VZ
4264 if (bp->flags & TPA_ENABLE_FLAG) {
4265 DP(NETIF_MSG_IFUP,
4266 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4267 bp->rx_buf_use_size, bp->rx_buf_size,
4268 bp->dev->mtu + ETH_OVREHEAD);
4269
4270 for_each_queue(bp, j) {
32626230 4271 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4272
32626230 4273 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4274 fp->tpa_pool[i].skb =
4275 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4276 if (!fp->tpa_pool[i].skb) {
4277 BNX2X_ERR("Failed to allocate TPA "
4278 "skb pool for queue[%d] - "
4279 "disabling TPA on this "
4280 "queue!\n", j);
4281 bnx2x_free_tpa_pool(bp, fp, i);
4282 fp->disable_tpa = 1;
4283 break;
4284 }
4285 pci_unmap_addr_set((struct sw_rx_bd *)
4286 &bp->fp->tpa_pool[i],
4287 mapping, 0);
4288 fp->tpa_state[i] = BNX2X_TPA_STOP;
4289 }
4290 }
4291 }
4292
a2fbb9ea
ET
4293 for_each_queue(bp, j) {
4294 struct bnx2x_fastpath *fp = &bp->fp[j];
4295
4296 fp->rx_bd_cons = 0;
4297 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4298 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4299
4300 /* "next page" elements initialization */
4301 /* SGE ring */
4302 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4303 struct eth_rx_sge *sge;
4304
4305 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4306 sge->addr_hi =
4307 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4308 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4309 sge->addr_lo =
4310 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4311 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4312 }
4313
4314 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4315
7a9b2557 4316 /* RX BD ring */
a2fbb9ea
ET
4317 for (i = 1; i <= NUM_RX_RINGS; i++) {
4318 struct eth_rx_bd *rx_bd;
4319
4320 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4321 rx_bd->addr_hi =
4322 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4323 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4324 rx_bd->addr_lo =
4325 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4326 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4327 }
4328
34f80b04 4329 /* CQ ring */
a2fbb9ea
ET
4330 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4331 struct eth_rx_cqe_next_page *nextpg;
4332
4333 nextpg = (struct eth_rx_cqe_next_page *)
4334 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4335 nextpg->addr_hi =
4336 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4337 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4338 nextpg->addr_lo =
4339 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4340 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4341 }
4342
7a9b2557
VZ
4343 /* Allocate SGEs and initialize the ring elements */
4344 for (i = 0, ring_prod = 0;
4345 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4346
7a9b2557
VZ
4347 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4348 BNX2X_ERR("was only able to allocate "
4349 "%d rx sges\n", i);
4350 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4351 /* Cleanup already allocated elements */
4352 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4353 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4354 fp->disable_tpa = 1;
4355 ring_prod = 0;
4356 break;
4357 }
4358 ring_prod = NEXT_SGE_IDX(ring_prod);
4359 }
4360 fp->rx_sge_prod = ring_prod;
4361
4362 /* Allocate BDs and initialize BD ring */
66e855f3 4363 fp->rx_comp_cons = 0;
7a9b2557 4364 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4365 for (i = 0; i < bp->rx_ring_size; i++) {
4366 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4367 BNX2X_ERR("was only able to allocate "
4368 "%d rx skbs\n", i);
66e855f3 4369 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4370 break;
4371 }
4372 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4373 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4374 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4375 }
4376
7a9b2557
VZ
4377 fp->rx_bd_prod = ring_prod;
4378 /* must not have more available CQEs than BDs */
4379 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4380 cqe_ring_prod);
a2fbb9ea
ET
4381 fp->rx_pkt = fp->rx_calls = 0;
4382
7a9b2557
VZ
4383 /* Warning!
4384 * this will generate an interrupt (to the TSTORM)
4385 * must only be done after chip is initialized
4386 */
4387 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4388 fp->rx_sge_prod);
a2fbb9ea
ET
4389 if (j != 0)
4390 continue;
4391
4392 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4393 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4394 U64_LO(fp->rx_comp_mapping));
4395 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4396 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4397 U64_HI(fp->rx_comp_mapping));
4398 }
4399}
4400
4401static void bnx2x_init_tx_ring(struct bnx2x *bp)
4402{
4403 int i, j;
4404
4405 for_each_queue(bp, j) {
4406 struct bnx2x_fastpath *fp = &bp->fp[j];
4407
4408 for (i = 1; i <= NUM_TX_RINGS; i++) {
4409 struct eth_tx_bd *tx_bd =
4410 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4411
4412 tx_bd->addr_hi =
4413 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4414 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4415 tx_bd->addr_lo =
4416 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4417 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4418 }
4419
4420 fp->tx_pkt_prod = 0;
4421 fp->tx_pkt_cons = 0;
4422 fp->tx_bd_prod = 0;
4423 fp->tx_bd_cons = 0;
4424 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4425 fp->tx_pkt = 0;
4426 }
4427}
4428
4429static void bnx2x_init_sp_ring(struct bnx2x *bp)
4430{
34f80b04 4431 int func = BP_FUNC(bp);
a2fbb9ea
ET
4432
4433 spin_lock_init(&bp->spq_lock);
4434
4435 bp->spq_left = MAX_SPQ_PENDING;
4436 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4437 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4438 bp->spq_prod_bd = bp->spq;
4439 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4440
34f80b04 4441 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4442 U64_LO(bp->spq_mapping));
34f80b04
EG
4443 REG_WR(bp,
4444 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4445 U64_HI(bp->spq_mapping));
4446
34f80b04 4447 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4448 bp->spq_prod_idx);
4449}
4450
4451static void bnx2x_init_context(struct bnx2x *bp)
4452{
4453 int i;
4454
4455 for_each_queue(bp, i) {
4456 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4457 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4458 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4459
4460 context->xstorm_st_context.tx_bd_page_base_hi =
4461 U64_HI(fp->tx_desc_mapping);
4462 context->xstorm_st_context.tx_bd_page_base_lo =
4463 U64_LO(fp->tx_desc_mapping);
4464 context->xstorm_st_context.db_data_addr_hi =
4465 U64_HI(fp->tx_prods_mapping);
4466 context->xstorm_st_context.db_data_addr_lo =
4467 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4468 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4469 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4470
4471 context->ustorm_st_context.common.sb_index_numbers =
4472 BNX2X_RX_SB_INDEX_NUM;
4473 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4474 context->ustorm_st_context.common.status_block_id = sb_id;
4475 context->ustorm_st_context.common.flags =
4476 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4477 context->ustorm_st_context.common.mc_alignment_size = 64;
4478 context->ustorm_st_context.common.bd_buff_size =
4479 bp->rx_buf_use_size;
4480 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4481 U64_HI(fp->rx_desc_mapping);
34f80b04 4482 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4483 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4484 if (!fp->disable_tpa) {
4485 context->ustorm_st_context.common.flags |=
4486 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4487 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4488 context->ustorm_st_context.common.sge_buff_size =
4489 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4490 context->ustorm_st_context.common.sge_page_base_hi =
4491 U64_HI(fp->rx_sge_mapping);
4492 context->ustorm_st_context.common.sge_page_base_lo =
4493 U64_LO(fp->rx_sge_mapping);
4494 }
4495
a2fbb9ea
ET
4496 context->cstorm_st_context.sb_index_number =
4497 HC_INDEX_C_ETH_TX_CQ_CONS;
34f80b04 4498 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4499
4500 context->xstorm_ag_context.cdu_reserved =
4501 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4502 CDU_REGION_NUMBER_XCM_AG,
4503 ETH_CONNECTION_TYPE);
4504 context->ustorm_ag_context.cdu_usage =
4505 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4506 CDU_REGION_NUMBER_UCM_AG,
4507 ETH_CONNECTION_TYPE);
4508 }
4509}
4510
4511static void bnx2x_init_ind_table(struct bnx2x *bp)
4512{
34f80b04 4513 int port = BP_PORT(bp);
a2fbb9ea
ET
4514 int i;
4515
4516 if (!is_multi(bp))
4517 return;
4518
34f80b04 4519 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4520 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4521 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4522 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4523 i % bp->num_queues);
4524
4525 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4526}
4527
49d66772
ET
4528static void bnx2x_set_client_config(struct bnx2x *bp)
4529{
49d66772 4530 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4531 int port = BP_PORT(bp);
4532 int i;
49d66772 4533
34f80b04 4534 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4535 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4536 tstorm_client.config_flags =
4537 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4538#ifdef BCM_VLAN
34f80b04 4539 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4540 tstorm_client.config_flags |=
4541 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4542 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4543 }
4544#endif
49d66772 4545
7a9b2557
VZ
4546 if (bp->flags & TPA_ENABLE_FLAG) {
4547 tstorm_client.max_sges_for_packet =
4548 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4549 tstorm_client.max_sges_for_packet =
4550 ((tstorm_client.max_sges_for_packet +
4551 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4552 PAGES_PER_SGE_SHIFT;
4553
4554 tstorm_client.config_flags |=
4555 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4556 }
4557
49d66772
ET
4558 for_each_queue(bp, i) {
4559 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4560 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4561 ((u32 *)&tstorm_client)[0]);
4562 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4563 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4564 ((u32 *)&tstorm_client)[1]);
4565 }
4566
34f80b04
EG
4567 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4568 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4569}
4570
a2fbb9ea
ET
4571static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4572{
a2fbb9ea 4573 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4574 int mode = bp->rx_mode;
4575 int mask = (1 << BP_L_ID(bp));
4576 int func = BP_FUNC(bp);
a2fbb9ea
ET
4577 int i;
4578
4579 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4580
4581 switch (mode) {
4582 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4583 tstorm_mac_filter.ucast_drop_all = mask;
4584 tstorm_mac_filter.mcast_drop_all = mask;
4585 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4586 break;
4587 case BNX2X_RX_MODE_NORMAL:
34f80b04 4588 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4589 break;
4590 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4591 tstorm_mac_filter.mcast_accept_all = mask;
4592 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4593 break;
4594 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4595 tstorm_mac_filter.ucast_accept_all = mask;
4596 tstorm_mac_filter.mcast_accept_all = mask;
4597 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4598 break;
4599 default:
34f80b04
EG
4600 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4601 break;
a2fbb9ea
ET
4602 }
4603
4604 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4605 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4606 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4607 ((u32 *)&tstorm_mac_filter)[i]);
4608
34f80b04 4609/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4610 ((u32 *)&tstorm_mac_filter)[i]); */
4611 }
a2fbb9ea 4612
49d66772
ET
4613 if (mode != BNX2X_RX_MODE_NONE)
4614 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4615}
4616
471de716
EG
4617static void bnx2x_init_internal_common(struct bnx2x *bp)
4618{
4619 int i;
4620
4621 /* Zero this manually as its initialization is
4622 currently missing in the initTool */
4623 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4624 REG_WR(bp, BAR_USTRORM_INTMEM +
4625 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4626}
4627
4628static void bnx2x_init_internal_port(struct bnx2x *bp)
4629{
4630 int port = BP_PORT(bp);
4631
4632 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4635 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4636}
4637
4638static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4639{
a2fbb9ea
ET
4640 struct tstorm_eth_function_common_config tstorm_config = {0};
4641 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4642 int port = BP_PORT(bp);
4643 int func = BP_FUNC(bp);
4644 int i;
471de716 4645 u16 max_agg_size;
a2fbb9ea
ET
4646
4647 if (is_multi(bp)) {
4648 tstorm_config.config_flags = MULTI_FLAGS;
4649 tstorm_config.rss_result_mask = MULTI_MASK;
4650 }
4651
34f80b04
EG
4652 tstorm_config.leading_client_id = BP_L_ID(bp);
4653
a2fbb9ea 4654 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4655 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4656 (*(u32 *)&tstorm_config));
4657
c14423fe 4658 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4659 bnx2x_set_storm_rx_mode(bp);
4660
66e855f3
YG
4661 /* reset xstorm per client statistics */
4662 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4663 REG_WR(bp, BAR_XSTRORM_INTMEM +
4664 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4665 i*4, 0);
4666 }
4667 /* reset tstorm per client statistics */
4668 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4669 REG_WR(bp, BAR_TSTRORM_INTMEM +
4670 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4671 i*4, 0);
4672 }
4673
4674 /* Init statistics related context */
34f80b04 4675 stats_flags.collect_eth = 1;
a2fbb9ea 4676
66e855f3 4677 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4678 ((u32 *)&stats_flags)[0]);
66e855f3 4679 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4680 ((u32 *)&stats_flags)[1]);
4681
66e855f3 4682 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4683 ((u32 *)&stats_flags)[0]);
66e855f3 4684 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4685 ((u32 *)&stats_flags)[1]);
4686
66e855f3 4687 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4688 ((u32 *)&stats_flags)[0]);
66e855f3 4689 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4690 ((u32 *)&stats_flags)[1]);
4691
66e855f3
YG
4692 REG_WR(bp, BAR_XSTRORM_INTMEM +
4693 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4694 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4695 REG_WR(bp, BAR_XSTRORM_INTMEM +
4696 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4697 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4698
4699 REG_WR(bp, BAR_TSTRORM_INTMEM +
4700 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4701 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4702 REG_WR(bp, BAR_TSTRORM_INTMEM +
4703 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4704 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4705
4706 if (CHIP_IS_E1H(bp)) {
4707 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4708 IS_E1HMF(bp));
4709 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4710 IS_E1HMF(bp));
4711 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4712 IS_E1HMF(bp));
4713 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4714 IS_E1HMF(bp));
4715
7a9b2557
VZ
4716 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4717 bp->e1hov);
34f80b04
EG
4718 }
4719
471de716
EG
4720 /* Init CQ ring mapping and aggregation size */
4721 max_agg_size = min((u32)(bp->rx_buf_use_size +
4722 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4723 (u32)0xffff);
7a9b2557
VZ
4724 for_each_queue(bp, i) {
4725 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4726
4727 REG_WR(bp, BAR_USTRORM_INTMEM +
4728 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4729 U64_LO(fp->rx_comp_mapping));
4730 REG_WR(bp, BAR_USTRORM_INTMEM +
4731 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4732 U64_HI(fp->rx_comp_mapping));
4733
7a9b2557
VZ
4734 REG_WR16(bp, BAR_USTRORM_INTMEM +
4735 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4736 max_agg_size);
4737 }
a2fbb9ea
ET
4738}
4739
471de716
EG
4740static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4741{
4742 switch (load_code) {
4743 case FW_MSG_CODE_DRV_LOAD_COMMON:
4744 bnx2x_init_internal_common(bp);
4745 /* no break */
4746
4747 case FW_MSG_CODE_DRV_LOAD_PORT:
4748 bnx2x_init_internal_port(bp);
4749 /* no break */
4750
4751 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4752 bnx2x_init_internal_func(bp);
4753 break;
4754
4755 default:
4756 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4757 break;
4758 }
4759}
4760
4761static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4762{
4763 int i;
4764
4765 for_each_queue(bp, i) {
4766 struct bnx2x_fastpath *fp = &bp->fp[i];
4767
34f80b04 4768 fp->bp = bp;
a2fbb9ea 4769 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4770 fp->index = i;
34f80b04
EG
4771 fp->cl_id = BP_L_ID(bp) + i;
4772 fp->sb_id = fp->cl_id;
4773 DP(NETIF_MSG_IFUP,
4774 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4775 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4776 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4777 fp->status_blk_mapping);
a2fbb9ea
ET
4778 }
4779
4780 bnx2x_init_def_sb(bp, bp->def_status_blk,
34f80b04 4781 bp->def_status_blk_mapping, DEF_SB_ID);
a2fbb9ea
ET
4782 bnx2x_update_coalesce(bp);
4783 bnx2x_init_rx_rings(bp);
4784 bnx2x_init_tx_ring(bp);
4785 bnx2x_init_sp_ring(bp);
4786 bnx2x_init_context(bp);
471de716 4787 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4788 bnx2x_init_ind_table(bp);
615f8fd9 4789 bnx2x_int_enable(bp);
a2fbb9ea
ET
4790}
4791
4792/* end of nic init */
4793
4794/*
4795 * gzip service functions
4796 */
4797
4798static int bnx2x_gunzip_init(struct bnx2x *bp)
4799{
4800 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4801 &bp->gunzip_mapping);
4802 if (bp->gunzip_buf == NULL)
4803 goto gunzip_nomem1;
4804
4805 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4806 if (bp->strm == NULL)
4807 goto gunzip_nomem2;
4808
4809 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4810 GFP_KERNEL);
4811 if (bp->strm->workspace == NULL)
4812 goto gunzip_nomem3;
4813
4814 return 0;
4815
4816gunzip_nomem3:
4817 kfree(bp->strm);
4818 bp->strm = NULL;
4819
4820gunzip_nomem2:
4821 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4822 bp->gunzip_mapping);
4823 bp->gunzip_buf = NULL;
4824
4825gunzip_nomem1:
4826 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4827 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4828 return -ENOMEM;
4829}
4830
4831static void bnx2x_gunzip_end(struct bnx2x *bp)
4832{
4833 kfree(bp->strm->workspace);
4834
4835 kfree(bp->strm);
4836 bp->strm = NULL;
4837
4838 if (bp->gunzip_buf) {
4839 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4840 bp->gunzip_mapping);
4841 bp->gunzip_buf = NULL;
4842 }
4843}
4844
4845static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4846{
4847 int n, rc;
4848
4849 /* check gzip header */
4850 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4851 return -EINVAL;
4852
4853 n = 10;
4854
34f80b04 4855#define FNAME 0x8
a2fbb9ea
ET
4856
4857 if (zbuf[3] & FNAME)
4858 while ((zbuf[n++] != 0) && (n < len));
4859
4860 bp->strm->next_in = zbuf + n;
4861 bp->strm->avail_in = len - n;
4862 bp->strm->next_out = bp->gunzip_buf;
4863 bp->strm->avail_out = FW_BUF_SIZE;
4864
4865 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4866 if (rc != Z_OK)
4867 return rc;
4868
4869 rc = zlib_inflate(bp->strm, Z_FINISH);
4870 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4871 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4872 bp->dev->name, bp->strm->msg);
4873
4874 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4875 if (bp->gunzip_outlen & 0x3)
4876 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4877 " gunzip_outlen (%d) not aligned\n",
4878 bp->dev->name, bp->gunzip_outlen);
4879 bp->gunzip_outlen >>= 2;
4880
4881 zlib_inflateEnd(bp->strm);
4882
4883 if (rc == Z_STREAM_END)
4884 return 0;
4885
4886 return rc;
4887}
4888
4889/* nic load/unload */
4890
4891/*
34f80b04 4892 * General service functions
a2fbb9ea
ET
4893 */
4894
4895/* send a NIG loopback debug packet */
4896static void bnx2x_lb_pckt(struct bnx2x *bp)
4897{
a2fbb9ea 4898 u32 wb_write[3];
a2fbb9ea
ET
4899
4900 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4901 wb_write[0] = 0x55555555;
4902 wb_write[1] = 0x55555555;
34f80b04 4903 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4904 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4905
4906 /* NON-IP protocol */
a2fbb9ea
ET
4907 wb_write[0] = 0x09000000;
4908 wb_write[1] = 0x55555555;
34f80b04 4909 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4910 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4911}
4912
4913/* some of the internal memories
4914 * are not directly readable from the driver
4915 * to test them we send debug packets
4916 */
4917static int bnx2x_int_mem_test(struct bnx2x *bp)
4918{
4919 int factor;
4920 int count, i;
4921 u32 val = 0;
4922
ad8d3948 4923 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4924 factor = 120;
ad8d3948
EG
4925 else if (CHIP_REV_IS_EMUL(bp))
4926 factor = 200;
4927 else
a2fbb9ea 4928 factor = 1;
a2fbb9ea
ET
4929
4930 DP(NETIF_MSG_HW, "start part1\n");
4931
4932 /* Disable inputs of parser neighbor blocks */
4933 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4934 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4935 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4936 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4937
4938 /* Write 0 to parser credits for CFC search request */
4939 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4940
4941 /* send Ethernet packet */
4942 bnx2x_lb_pckt(bp);
4943
4944 /* TODO do i reset NIG statistic? */
4945 /* Wait until NIG register shows 1 packet of size 0x10 */
4946 count = 1000 * factor;
4947 while (count) {
34f80b04 4948
a2fbb9ea
ET
4949 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4950 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4951 if (val == 0x10)
4952 break;
4953
4954 msleep(10);
4955 count--;
4956 }
4957 if (val != 0x10) {
4958 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4959 return -1;
4960 }
4961
4962 /* Wait until PRS register shows 1 packet */
4963 count = 1000 * factor;
4964 while (count) {
4965 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4966 if (val == 1)
4967 break;
4968
4969 msleep(10);
4970 count--;
4971 }
4972 if (val != 0x1) {
4973 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4974 return -2;
4975 }
4976
4977 /* Reset and init BRB, PRS */
34f80b04 4978 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4979 msleep(50);
34f80b04 4980 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4981 msleep(50);
4982 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4983 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4984
4985 DP(NETIF_MSG_HW, "part2\n");
4986
4987 /* Disable inputs of parser neighbor blocks */
4988 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4989 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4990 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4991 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4992
4993 /* Write 0 to parser credits for CFC search request */
4994 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4995
4996 /* send 10 Ethernet packets */
4997 for (i = 0; i < 10; i++)
4998 bnx2x_lb_pckt(bp);
4999
5000 /* Wait until NIG register shows 10 + 1
5001 packets of size 11*0x10 = 0xb0 */
5002 count = 1000 * factor;
5003 while (count) {
34f80b04 5004
a2fbb9ea
ET
5005 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5006 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5007 if (val == 0xb0)
5008 break;
5009
5010 msleep(10);
5011 count--;
5012 }
5013 if (val != 0xb0) {
5014 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5015 return -3;
5016 }
5017
5018 /* Wait until PRS register shows 2 packets */
5019 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5020 if (val != 2)
5021 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5022
5023 /* Write 1 to parser credits for CFC search request */
5024 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5025
5026 /* Wait until PRS register shows 3 packets */
5027 msleep(10 * factor);
5028 /* Wait until NIG register shows 1 packet of size 0x10 */
5029 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5030 if (val != 3)
5031 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5032
5033 /* clear NIG EOP FIFO */
5034 for (i = 0; i < 11; i++)
5035 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5036 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5037 if (val != 1) {
5038 BNX2X_ERR("clear of NIG failed\n");
5039 return -4;
5040 }
5041
5042 /* Reset and init BRB, PRS, NIG */
5043 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5044 msleep(50);
5045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5046 msleep(50);
5047 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5048 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5049#ifndef BCM_ISCSI
5050 /* set NIC mode */
5051 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5052#endif
5053
5054 /* Enable inputs of parser neighbor blocks */
5055 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5056 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5057 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5058 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5059
5060 DP(NETIF_MSG_HW, "done\n");
5061
5062 return 0; /* OK */
5063}
5064
5065static void enable_blocks_attention(struct bnx2x *bp)
5066{
5067 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5068 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5069 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5070 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5071 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5072 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5073 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5074 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5075 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5076/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5077/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5078 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5079 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5080 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5081/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5082/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5083 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5084 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5085 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5086 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5087/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5088/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5089 if (CHIP_REV_IS_FPGA(bp))
5090 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5091 else
5092 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5093 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5094 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5095 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5096/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5097/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5098 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5099 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5100/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5101 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5102}
5103
34f80b04
EG
5104
5105static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5106{
a2fbb9ea 5107 u32 val, i;
a2fbb9ea 5108
34f80b04 5109 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5110
34f80b04
EG
5111 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5112 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5113
34f80b04
EG
5114 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5115 if (CHIP_IS_E1H(bp))
5116 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5117
34f80b04
EG
5118 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5119 msleep(30);
5120 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5121
34f80b04
EG
5122 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5123 if (CHIP_IS_E1(bp)) {
5124 /* enable HW interrupt from PXP on USDM overflow
5125 bit 16 on INT_MASK_0 */
5126 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5127 }
a2fbb9ea 5128
34f80b04
EG
5129 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5130 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5131
5132#ifdef __BIG_ENDIAN
34f80b04
EG
5133 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5134 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5135 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5136 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5137 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5138 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5139
5140/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5141 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5142 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5143 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5144 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5145#endif
5146
5147#ifndef BCM_ISCSI
5148 /* set NIC mode */
5149 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5150#endif
5151
34f80b04 5152 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5153#ifdef BCM_ISCSI
34f80b04
EG
5154 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5155 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5156 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5157#endif
5158
34f80b04
EG
5159 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5160 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5161
34f80b04
EG
5162 /* let the HW do it's magic ... */
5163 msleep(100);
5164 /* finish PXP init */
5165 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5166 if (val != 1) {
5167 BNX2X_ERR("PXP2 CFG failed\n");
5168 return -EBUSY;
5169 }
5170 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5171 if (val != 1) {
5172 BNX2X_ERR("PXP2 RD_INIT failed\n");
5173 return -EBUSY;
5174 }
a2fbb9ea 5175
34f80b04
EG
5176 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5177 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5178
34f80b04 5179 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5180
34f80b04
EG
5181 /* clean the DMAE memory */
5182 bp->dmae_ready = 1;
5183 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5184
34f80b04
EG
5185 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5186 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5187 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5188 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5189
34f80b04
EG
5190 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5191 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5192 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5193 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5194
5195 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5196 /* soft reset pulse */
5197 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5198 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5199
5200#ifdef BCM_ISCSI
34f80b04 5201 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5202#endif
a2fbb9ea 5203
34f80b04
EG
5204 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5205 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5206 if (!CHIP_REV_IS_SLOW(bp)) {
5207 /* enable hw interrupt from doorbell Q */
5208 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5209 }
a2fbb9ea 5210
34f80b04
EG
5211 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5212 if (CHIP_REV_IS_SLOW(bp)) {
5213 /* fix for emulation and FPGA for no pause */
5214 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5215 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5216 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5217 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5218 }
a2fbb9ea 5219
34f80b04
EG
5220 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5221 if (CHIP_IS_E1H(bp))
5222 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5223
34f80b04
EG
5224 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5225 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5226 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5227 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5228
34f80b04
EG
5229 if (CHIP_IS_E1H(bp)) {
5230 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5231 STORM_INTMEM_SIZE_E1H/2);
5232 bnx2x_init_fill(bp,
5233 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5234 0, STORM_INTMEM_SIZE_E1H/2);
5235 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5236 STORM_INTMEM_SIZE_E1H/2);
5237 bnx2x_init_fill(bp,
5238 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5239 0, STORM_INTMEM_SIZE_E1H/2);
5240 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5241 STORM_INTMEM_SIZE_E1H/2);
5242 bnx2x_init_fill(bp,
5243 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5244 0, STORM_INTMEM_SIZE_E1H/2);
5245 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5246 STORM_INTMEM_SIZE_E1H/2);
5247 bnx2x_init_fill(bp,
5248 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5249 0, STORM_INTMEM_SIZE_E1H/2);
5250 } else { /* E1 */
ad8d3948
EG
5251 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5252 STORM_INTMEM_SIZE_E1);
5253 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5254 STORM_INTMEM_SIZE_E1);
5255 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5256 STORM_INTMEM_SIZE_E1);
5257 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5258 STORM_INTMEM_SIZE_E1);
34f80b04 5259 }
a2fbb9ea 5260
34f80b04
EG
5261 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5262 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5263 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5264 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5265
34f80b04
EG
5266 /* sync semi rtc */
5267 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5268 0x80000000);
5269 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5270 0x80000000);
a2fbb9ea 5271
34f80b04
EG
5272 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5273 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5274 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5275
34f80b04
EG
5276 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5277 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5278 REG_WR(bp, i, 0xc0cac01a);
5279 /* TODO: replace with something meaningful */
5280 }
5281 if (CHIP_IS_E1H(bp))
5282 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5283 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5284
34f80b04
EG
5285 if (sizeof(union cdu_context) != 1024)
5286 /* we currently assume that a context is 1024 bytes */
5287 printk(KERN_ALERT PFX "please adjust the size of"
5288 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5289
34f80b04
EG
5290 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5291 val = (4 << 24) + (0 << 12) + 1024;
5292 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5293 if (CHIP_IS_E1(bp)) {
5294 /* !!! fix pxp client crdit until excel update */
5295 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5296 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5297 }
a2fbb9ea 5298
34f80b04
EG
5299 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5300 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5301
34f80b04
EG
5302 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5303 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5304
34f80b04
EG
5305 /* PXPCS COMMON comes here */
5306 /* Reset PCIE errors for debug */
5307 REG_WR(bp, 0x2814, 0xffffffff);
5308 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5309
34f80b04
EG
5310 /* EMAC0 COMMON comes here */
5311 /* EMAC1 COMMON comes here */
5312 /* DBU COMMON comes here */
5313 /* DBG COMMON comes here */
5314
5315 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5316 if (CHIP_IS_E1H(bp)) {
5317 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5318 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5319 }
5320
5321 if (CHIP_REV_IS_SLOW(bp))
5322 msleep(200);
5323
5324 /* finish CFC init */
5325 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5326 if (val != 1) {
5327 BNX2X_ERR("CFC LL_INIT failed\n");
5328 return -EBUSY;
5329 }
5330 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5331 if (val != 1) {
5332 BNX2X_ERR("CFC AC_INIT failed\n");
5333 return -EBUSY;
5334 }
5335 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5336 if (val != 1) {
5337 BNX2X_ERR("CFC CAM_INIT failed\n");
5338 return -EBUSY;
5339 }
5340 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5341
34f80b04
EG
5342 /* read NIG statistic
5343 to see if this is our first up since powerup */
5344 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5345 val = *bnx2x_sp(bp, wb_data[0]);
5346
5347 /* do internal memory self test */
5348 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5349 BNX2X_ERR("internal mem self test failed\n");
5350 return -EBUSY;
5351 }
5352
5353 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5354 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5355 /* Fan failure is indicated by SPIO 5 */
5356 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5357 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5358
5359 /* set to active low mode */
5360 val = REG_RD(bp, MISC_REG_SPIO_INT);
5361 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5362 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5363 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5364
34f80b04
EG
5365 /* enable interrupt to signal the IGU */
5366 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5367 val |= (1 << MISC_REGISTERS_SPIO_5);
5368 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5369 break;
f1410647 5370
34f80b04
EG
5371 default:
5372 break;
5373 }
f1410647 5374
34f80b04
EG
5375 /* clear PXP2 attentions */
5376 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5377
34f80b04 5378 enable_blocks_attention(bp);
a2fbb9ea 5379
7a9b2557
VZ
5380 if (bp->flags & TPA_ENABLE_FLAG) {
5381 struct tstorm_eth_tpa_exist tmp = {0};
5382
5383 tmp.tpa_exist = 1;
5384
5385 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5386 ((u32 *)&tmp)[0]);
5387 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5388 ((u32 *)&tmp)[1]);
5389 }
5390
34f80b04
EG
5391 return 0;
5392}
a2fbb9ea 5393
34f80b04
EG
5394static int bnx2x_init_port(struct bnx2x *bp)
5395{
5396 int port = BP_PORT(bp);
5397 u32 val;
a2fbb9ea 5398
34f80b04
EG
5399 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5400
5401 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5402
5403 /* Port PXP comes here */
5404 /* Port PXP2 comes here */
a2fbb9ea
ET
5405#ifdef BCM_ISCSI
5406 /* Port0 1
5407 * Port1 385 */
5408 i++;
5409 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5410 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5411 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5412 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5413
5414 /* Port0 2
5415 * Port1 386 */
5416 i++;
5417 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5418 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5419 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5420 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5421
5422 /* Port0 3
5423 * Port1 387 */
5424 i++;
5425 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5426 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5427 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5428 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5429#endif
34f80b04 5430 /* Port CMs come here */
a2fbb9ea
ET
5431
5432 /* Port QM comes here */
a2fbb9ea
ET
5433#ifdef BCM_ISCSI
5434 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5435 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5436
5437 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5438 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5439#endif
5440 /* Port DQ comes here */
5441 /* Port BRB1 comes here */
ad8d3948 5442 /* Port PRS comes here */
a2fbb9ea
ET
5443 /* Port TSDM comes here */
5444 /* Port CSDM comes here */
5445 /* Port USDM comes here */
5446 /* Port XSDM comes here */
34f80b04
EG
5447 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5448 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5449 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5450 port ? USEM_PORT1_END : USEM_PORT0_END);
5451 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5452 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5453 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5454 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5455 /* Port UPB comes here */
34f80b04
EG
5456 /* Port XPB comes here */
5457
5458 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5459 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5460
5461 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5462 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5463
5464 /* update threshold */
34f80b04 5465 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5466 /* update init credit */
34f80b04 5467 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5468
5469 /* probe changes */
34f80b04 5470 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5471 msleep(5);
34f80b04 5472 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5473
5474#ifdef BCM_ISCSI
5475 /* tell the searcher where the T2 table is */
5476 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5477
5478 wb_write[0] = U64_LO(bp->t2_mapping);
5479 wb_write[1] = U64_HI(bp->t2_mapping);
5480 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5481 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5482 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5483 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5484
5485 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5486 /* Port SRCH comes here */
5487#endif
5488 /* Port CDU comes here */
5489 /* Port CFC comes here */
34f80b04
EG
5490
5491 if (CHIP_IS_E1(bp)) {
5492 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5493 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5494 }
5495 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5496 port ? HC_PORT1_END : HC_PORT0_END);
5497
5498 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5499 MISC_AEU_PORT0_START,
34f80b04
EG
5500 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5501 /* init aeu_mask_attn_func_0/1:
5502 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5503 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5504 * bits 4-7 are used for "per vn group attention" */
5505 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5506 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5507
a2fbb9ea
ET
5508 /* Port PXPCS comes here */
5509 /* Port EMAC0 comes here */
5510 /* Port EMAC1 comes here */
5511 /* Port DBU comes here */
5512 /* Port DBG comes here */
34f80b04
EG
5513 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5514 port ? NIG_PORT1_END : NIG_PORT0_END);
5515
5516 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5517
5518 if (CHIP_IS_E1H(bp)) {
5519 u32 wsum;
5520 struct cmng_struct_per_port m_cmng_port;
5521 int vn;
5522
5523 /* 0x2 disable e1hov, 0x1 enable */
5524 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5525 (IS_E1HMF(bp) ? 0x1 : 0x2));
5526
5527 /* Init RATE SHAPING and FAIRNESS contexts.
5528 Initialize as if there is 10G link. */
5529 wsum = bnx2x_calc_vn_wsum(bp);
5530 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5531 if (IS_E1HMF(bp))
5532 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5533 bnx2x_init_vn_minmax(bp, 2*vn + port,
5534 wsum, 10000, &m_cmng_port);
5535 }
5536
a2fbb9ea
ET
5537 /* Port MCP comes here */
5538 /* Port DMAE comes here */
5539
34f80b04 5540 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
5541 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5542 /* add SPIO 5 to group 0 */
5543 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5544 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5545 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5546 break;
5547
5548 default:
5549 break;
5550 }
5551
c18487ee 5552 bnx2x__link_reset(bp);
a2fbb9ea 5553
34f80b04
EG
5554 return 0;
5555}
5556
5557#define ILT_PER_FUNC (768/2)
5558#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5559/* the phys address is shifted right 12 bits and has an added
5560 1=valid bit added to the 53rd bit
5561 then since this is a wide register(TM)
5562 we split it into two 32 bit writes
5563 */
5564#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5565#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5566#define PXP_ONE_ILT(x) (((x) << 10) | x)
5567#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5568
5569#define CNIC_ILT_LINES 0
5570
5571static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5572{
5573 int reg;
5574
5575 if (CHIP_IS_E1H(bp))
5576 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5577 else /* E1 */
5578 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5579
5580 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5581}
5582
5583static int bnx2x_init_func(struct bnx2x *bp)
5584{
5585 int port = BP_PORT(bp);
5586 int func = BP_FUNC(bp);
5587 int i;
5588
5589 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5590
5591 i = FUNC_ILT_BASE(func);
5592
5593 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5594 if (CHIP_IS_E1H(bp)) {
5595 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5596 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5597 } else /* E1 */
5598 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5599 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5600
5601
5602 if (CHIP_IS_E1H(bp)) {
5603 for (i = 0; i < 9; i++)
5604 bnx2x_init_block(bp,
5605 cm_start[func][i], cm_end[func][i]);
5606
5607 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5608 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5609 }
5610
5611 /* HC init per function */
5612 if (CHIP_IS_E1H(bp)) {
5613 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5614
5615 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5616 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5617 }
5618 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5619
5620 if (CHIP_IS_E1H(bp))
5621 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5622
c14423fe 5623 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5624 REG_WR(bp, 0x2114, 0xffffffff);
5625 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5626
34f80b04
EG
5627 return 0;
5628}
5629
5630static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5631{
5632 int i, rc = 0;
a2fbb9ea 5633
34f80b04
EG
5634 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5635 BP_FUNC(bp), load_code);
a2fbb9ea 5636
34f80b04
EG
5637 bp->dmae_ready = 0;
5638 mutex_init(&bp->dmae_mutex);
5639 bnx2x_gunzip_init(bp);
a2fbb9ea 5640
34f80b04
EG
5641 switch (load_code) {
5642 case FW_MSG_CODE_DRV_LOAD_COMMON:
5643 rc = bnx2x_init_common(bp);
5644 if (rc)
5645 goto init_hw_err;
5646 /* no break */
5647
5648 case FW_MSG_CODE_DRV_LOAD_PORT:
5649 bp->dmae_ready = 1;
5650 rc = bnx2x_init_port(bp);
5651 if (rc)
5652 goto init_hw_err;
5653 /* no break */
5654
5655 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5656 bp->dmae_ready = 1;
5657 rc = bnx2x_init_func(bp);
5658 if (rc)
5659 goto init_hw_err;
5660 break;
5661
5662 default:
5663 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5664 break;
5665 }
5666
5667 if (!BP_NOMCP(bp)) {
5668 int func = BP_FUNC(bp);
a2fbb9ea
ET
5669
5670 bp->fw_drv_pulse_wr_seq =
34f80b04 5671 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5672 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5673 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5674 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5675 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5676 } else
5677 bp->func_stx = 0;
a2fbb9ea 5678
34f80b04
EG
5679 /* this needs to be done before gunzip end */
5680 bnx2x_zero_def_sb(bp);
5681 for_each_queue(bp, i)
5682 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5683
5684init_hw_err:
5685 bnx2x_gunzip_end(bp);
5686
5687 return rc;
a2fbb9ea
ET
5688}
5689
c14423fe 5690/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5691static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5692{
34f80b04 5693 int func = BP_FUNC(bp);
f1410647
ET
5694 u32 seq = ++bp->fw_seq;
5695 u32 rc = 0;
19680c48
EG
5696 u32 cnt = 1;
5697 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5698
34f80b04 5699 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5700 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5701
19680c48
EG
5702 do {
5703 /* let the FW do it's magic ... */
5704 msleep(delay);
a2fbb9ea 5705
19680c48 5706 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5707
19680c48
EG
5708 /* Give the FW up to 2 second (200*10ms) */
5709 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5710
5711 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5712 cnt*delay, rc, seq);
a2fbb9ea
ET
5713
5714 /* is this a reply to our command? */
5715 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5716 rc &= FW_MSG_CODE_MASK;
f1410647 5717
a2fbb9ea
ET
5718 } else {
5719 /* FW BUG! */
5720 BNX2X_ERR("FW failed to respond!\n");
5721 bnx2x_fw_dump(bp);
5722 rc = 0;
5723 }
f1410647 5724
a2fbb9ea
ET
5725 return rc;
5726}
5727
5728static void bnx2x_free_mem(struct bnx2x *bp)
5729{
5730
5731#define BNX2X_PCI_FREE(x, y, size) \
5732 do { \
5733 if (x) { \
5734 pci_free_consistent(bp->pdev, size, x, y); \
5735 x = NULL; \
5736 y = 0; \
5737 } \
5738 } while (0)
5739
5740#define BNX2X_FREE(x) \
5741 do { \
5742 if (x) { \
5743 vfree(x); \
5744 x = NULL; \
5745 } \
5746 } while (0)
5747
5748 int i;
5749
5750 /* fastpath */
5751 for_each_queue(bp, i) {
5752
5753 /* Status blocks */
5754 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5755 bnx2x_fp(bp, i, status_blk_mapping),
5756 sizeof(struct host_status_block) +
5757 sizeof(struct eth_tx_db_data));
5758
5759 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5760 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5761 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5762 bnx2x_fp(bp, i, tx_desc_mapping),
5763 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5764
5765 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5766 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5767 bnx2x_fp(bp, i, rx_desc_mapping),
5768 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5769
5770 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5771 bnx2x_fp(bp, i, rx_comp_mapping),
5772 sizeof(struct eth_fast_path_rx_cqe) *
5773 NUM_RCQ_BD);
a2fbb9ea 5774
7a9b2557 5775 /* SGE ring */
32626230 5776 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5777 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5778 bnx2x_fp(bp, i, rx_sge_mapping),
5779 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5780 }
a2fbb9ea
ET
5781 /* end of fastpath */
5782
5783 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5784 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5785
5786 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5787 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5788
5789#ifdef BCM_ISCSI
5790 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5791 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5792 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5793 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5794#endif
7a9b2557 5795 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5796
5797#undef BNX2X_PCI_FREE
5798#undef BNX2X_KFREE
5799}
5800
5801static int bnx2x_alloc_mem(struct bnx2x *bp)
5802{
5803
5804#define BNX2X_PCI_ALLOC(x, y, size) \
5805 do { \
5806 x = pci_alloc_consistent(bp->pdev, size, y); \
5807 if (x == NULL) \
5808 goto alloc_mem_err; \
5809 memset(x, 0, size); \
5810 } while (0)
5811
5812#define BNX2X_ALLOC(x, size) \
5813 do { \
5814 x = vmalloc(size); \
5815 if (x == NULL) \
5816 goto alloc_mem_err; \
5817 memset(x, 0, size); \
5818 } while (0)
5819
5820 int i;
5821
5822 /* fastpath */
a2fbb9ea
ET
5823 for_each_queue(bp, i) {
5824 bnx2x_fp(bp, i, bp) = bp;
5825
5826 /* Status blocks */
5827 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5828 &bnx2x_fp(bp, i, status_blk_mapping),
5829 sizeof(struct host_status_block) +
5830 sizeof(struct eth_tx_db_data));
5831
5832 bnx2x_fp(bp, i, hw_tx_prods) =
5833 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5834
5835 bnx2x_fp(bp, i, tx_prods_mapping) =
5836 bnx2x_fp(bp, i, status_blk_mapping) +
5837 sizeof(struct host_status_block);
5838
5839 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5840 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5841 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5842 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5843 &bnx2x_fp(bp, i, tx_desc_mapping),
5844 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5845
5846 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5847 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5848 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5849 &bnx2x_fp(bp, i, rx_desc_mapping),
5850 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5851
5852 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5853 &bnx2x_fp(bp, i, rx_comp_mapping),
5854 sizeof(struct eth_fast_path_rx_cqe) *
5855 NUM_RCQ_BD);
5856
7a9b2557
VZ
5857 /* SGE ring */
5858 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5859 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5860 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5861 &bnx2x_fp(bp, i, rx_sge_mapping),
5862 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5863 }
5864 /* end of fastpath */
5865
5866 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5867 sizeof(struct host_def_status_block));
5868
5869 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5870 sizeof(struct bnx2x_slowpath));
5871
5872#ifdef BCM_ISCSI
5873 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5874
5875 /* Initialize T1 */
5876 for (i = 0; i < 64*1024; i += 64) {
5877 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5878 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5879 }
5880
5881 /* allocate searcher T2 table
5882 we allocate 1/4 of alloc num for T2
5883 (which is not entered into the ILT) */
5884 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5885
5886 /* Initialize T2 */
5887 for (i = 0; i < 16*1024; i += 64)
5888 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5889
c14423fe 5890 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5891 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5892
5893 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5894 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5895
5896 /* QM queues (128*MAX_CONN) */
5897 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5898#endif
5899
5900 /* Slow path ring */
5901 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5902
5903 return 0;
5904
5905alloc_mem_err:
5906 bnx2x_free_mem(bp);
5907 return -ENOMEM;
5908
5909#undef BNX2X_PCI_ALLOC
5910#undef BNX2X_ALLOC
5911}
5912
5913static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5914{
5915 int i;
5916
5917 for_each_queue(bp, i) {
5918 struct bnx2x_fastpath *fp = &bp->fp[i];
5919
5920 u16 bd_cons = fp->tx_bd_cons;
5921 u16 sw_prod = fp->tx_pkt_prod;
5922 u16 sw_cons = fp->tx_pkt_cons;
5923
a2fbb9ea
ET
5924 while (sw_cons != sw_prod) {
5925 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5926 sw_cons++;
5927 }
5928 }
5929}
5930
5931static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5932{
5933 int i, j;
5934
5935 for_each_queue(bp, j) {
5936 struct bnx2x_fastpath *fp = &bp->fp[j];
5937
a2fbb9ea
ET
5938 for (i = 0; i < NUM_RX_BD; i++) {
5939 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5940 struct sk_buff *skb = rx_buf->skb;
5941
5942 if (skb == NULL)
5943 continue;
5944
5945 pci_unmap_single(bp->pdev,
5946 pci_unmap_addr(rx_buf, mapping),
5947 bp->rx_buf_use_size,
5948 PCI_DMA_FROMDEVICE);
5949
5950 rx_buf->skb = NULL;
5951 dev_kfree_skb(skb);
5952 }
7a9b2557 5953 if (!fp->disable_tpa)
32626230
EG
5954 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5955 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5956 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5957 }
5958}
5959
5960static void bnx2x_free_skbs(struct bnx2x *bp)
5961{
5962 bnx2x_free_tx_skbs(bp);
5963 bnx2x_free_rx_skbs(bp);
5964}
5965
5966static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5967{
34f80b04 5968 int i, offset = 1;
a2fbb9ea
ET
5969
5970 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5971 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5972 bp->msix_table[0].vector);
5973
5974 for_each_queue(bp, i) {
c14423fe 5975 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5976 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5977 bnx2x_fp(bp, i, state));
5978
228241eb
ET
5979 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5980 BNX2X_ERR("IRQ of fp #%d being freed while "
5981 "state != closed\n", i);
a2fbb9ea 5982
34f80b04 5983 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5984 }
a2fbb9ea
ET
5985}
5986
5987static void bnx2x_free_irq(struct bnx2x *bp)
5988{
a2fbb9ea 5989 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5990 bnx2x_free_msix_irqs(bp);
5991 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5992 bp->flags &= ~USING_MSIX_FLAG;
5993
5994 } else
5995 free_irq(bp->pdev->irq, bp->dev);
5996}
5997
5998static int bnx2x_enable_msix(struct bnx2x *bp)
5999{
34f80b04 6000 int i, rc, offset;
a2fbb9ea
ET
6001
6002 bp->msix_table[0].entry = 0;
34f80b04
EG
6003 offset = 1;
6004 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6005
34f80b04
EG
6006 for_each_queue(bp, i) {
6007 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6008
34f80b04
EG
6009 bp->msix_table[i + offset].entry = igu_vec;
6010 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6011 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6012 }
6013
34f80b04
EG
6014 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6015 bp->num_queues + offset);
6016 if (rc) {
6017 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6018 return -1;
6019 }
a2fbb9ea
ET
6020 bp->flags |= USING_MSIX_FLAG;
6021
6022 return 0;
a2fbb9ea
ET
6023}
6024
a2fbb9ea
ET
6025static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6026{
34f80b04 6027 int i, rc, offset = 1;
a2fbb9ea 6028
a2fbb9ea
ET
6029 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6030 bp->dev->name, bp->dev);
a2fbb9ea
ET
6031 if (rc) {
6032 BNX2X_ERR("request sp irq failed\n");
6033 return -EBUSY;
6034 }
6035
6036 for_each_queue(bp, i) {
34f80b04 6037 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6038 bnx2x_msix_fp_int, 0,
6039 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6040 if (rc) {
34f80b04
EG
6041 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6042 i + offset, rc);
a2fbb9ea
ET
6043 bnx2x_free_msix_irqs(bp);
6044 return -EBUSY;
6045 }
6046
6047 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6048 }
6049
6050 return 0;
a2fbb9ea
ET
6051}
6052
6053static int bnx2x_req_irq(struct bnx2x *bp)
6054{
34f80b04 6055 int rc;
a2fbb9ea 6056
34f80b04
EG
6057 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6058 bp->dev->name, bp->dev);
a2fbb9ea
ET
6059 if (!rc)
6060 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6061
6062 return rc;
a2fbb9ea
ET
6063}
6064
6065/*
6066 * Init service functions
6067 */
6068
34f80b04 6069static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
a2fbb9ea
ET
6070{
6071 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6072 int port = BP_PORT(bp);
a2fbb9ea
ET
6073
6074 /* CAM allocation
6075 * unicasts 0-31:port0 32-63:port1
6076 * multicast 64-127:port0 128-191:port1
6077 */
6078 config->hdr.length_6b = 2;
34f80b04
EG
6079 config->hdr.offset = port ? 31 : 0;
6080 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6081 config->hdr.reserved1 = 0;
6082
6083 /* primary MAC */
6084 config->config_table[0].cam_entry.msb_mac_addr =
6085 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6086 config->config_table[0].cam_entry.middle_mac_addr =
6087 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6088 config->config_table[0].cam_entry.lsb_mac_addr =
6089 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6090 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6091 config->config_table[0].target_table_entry.flags = 0;
6092 config->config_table[0].target_table_entry.client_id = 0;
6093 config->config_table[0].target_table_entry.vlan_id = 0;
6094
6095 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6096 config->config_table[0].cam_entry.msb_mac_addr,
6097 config->config_table[0].cam_entry.middle_mac_addr,
6098 config->config_table[0].cam_entry.lsb_mac_addr);
6099
6100 /* broadcast */
6101 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6102 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6103 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6104 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6105 config->config_table[1].target_table_entry.flags =
6106 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6107 config->config_table[1].target_table_entry.client_id = 0;
6108 config->config_table[1].target_table_entry.vlan_id = 0;
6109
6110 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6111 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6112 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6113}
6114
34f80b04
EG
6115static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6116{
6117 struct mac_configuration_cmd_e1h *config =
6118 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6119
6120 if (bp->state != BNX2X_STATE_OPEN) {
6121 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6122 return;
6123 }
6124
6125 /* CAM allocation for E1H
6126 * unicasts: by func number
6127 * multicast: 20+FUNC*20, 20 each
6128 */
6129 config->hdr.length_6b = 1;
6130 config->hdr.offset = BP_FUNC(bp);
6131 config->hdr.client_id = BP_CL_ID(bp);
6132 config->hdr.reserved1 = 0;
6133
6134 /* primary MAC */
6135 config->config_table[0].msb_mac_addr =
6136 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6137 config->config_table[0].middle_mac_addr =
6138 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6139 config->config_table[0].lsb_mac_addr =
6140 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6141 config->config_table[0].client_id = BP_L_ID(bp);
6142 config->config_table[0].vlan_id = 0;
6143 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6144 config->config_table[0].flags = BP_PORT(bp);
6145
6146 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6147 config->config_table[0].msb_mac_addr,
6148 config->config_table[0].middle_mac_addr,
6149 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6150
6151 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6152 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6153 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6154}
6155
a2fbb9ea
ET
6156static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6157 int *state_p, int poll)
6158{
6159 /* can take a while if any port is running */
34f80b04 6160 int cnt = 500;
a2fbb9ea 6161
c14423fe
ET
6162 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6163 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6164
6165 might_sleep();
34f80b04 6166 while (cnt--) {
a2fbb9ea
ET
6167 if (poll) {
6168 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6169 /* if index is different from 0
6170 * the reply for some commands will
a2fbb9ea
ET
6171 * be on the none default queue
6172 */
6173 if (idx)
6174 bnx2x_rx_int(&bp->fp[idx], 10);
6175 }
34f80b04 6176 mb(); /* state is changed by bnx2x_sp_event() */
a2fbb9ea 6177
49d66772 6178 if (*state_p == state)
a2fbb9ea
ET
6179 return 0;
6180
a2fbb9ea 6181 msleep(1);
a2fbb9ea
ET
6182 }
6183
a2fbb9ea 6184 /* timeout! */
49d66772
ET
6185 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6186 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6187#ifdef BNX2X_STOP_ON_ERROR
6188 bnx2x_panic();
6189#endif
a2fbb9ea 6190
49d66772 6191 return -EBUSY;
a2fbb9ea
ET
6192}
6193
6194static int bnx2x_setup_leading(struct bnx2x *bp)
6195{
34f80b04 6196 int rc;
a2fbb9ea 6197
c14423fe 6198 /* reset IGU state */
34f80b04 6199 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6200
6201 /* SETUP ramrod */
6202 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6203
34f80b04
EG
6204 /* Wait for completion */
6205 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6206
34f80b04 6207 return rc;
a2fbb9ea
ET
6208}
6209
6210static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6211{
a2fbb9ea 6212 /* reset IGU state */
34f80b04 6213 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6214
228241eb 6215 /* SETUP ramrod */
a2fbb9ea
ET
6216 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6217 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6218
6219 /* Wait for completion */
6220 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6221 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6222}
6223
a2fbb9ea
ET
6224static int bnx2x_poll(struct napi_struct *napi, int budget);
6225static void bnx2x_set_rx_mode(struct net_device *dev);
6226
34f80b04
EG
6227/* must be called with rtnl_lock */
6228static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6229{
228241eb 6230 u32 load_code;
34f80b04
EG
6231 int i, rc;
6232
6233#ifdef BNX2X_STOP_ON_ERROR
6234 if (unlikely(bp->panic))
6235 return -EPERM;
6236#endif
a2fbb9ea
ET
6237
6238 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6239
34f80b04
EG
6240 /* Send LOAD_REQUEST command to MCP
6241 Returns the type of LOAD command:
6242 if it is the first port to be initialized
6243 common blocks should be initialized, otherwise - not
a2fbb9ea 6244 */
34f80b04 6245 if (!BP_NOMCP(bp)) {
228241eb
ET
6246 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6247 if (!load_code) {
da5a662a 6248 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6249 return -EBUSY;
6250 }
34f80b04 6251 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6252 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6253
a2fbb9ea 6254 } else {
da5a662a
VZ
6255 int port = BP_PORT(bp);
6256
34f80b04
EG
6257 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6258 load_count[0], load_count[1], load_count[2]);
6259 load_count[0]++;
da5a662a 6260 load_count[1 + port]++;
34f80b04
EG
6261 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6262 load_count[0], load_count[1], load_count[2]);
6263 if (load_count[0] == 1)
6264 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6265 else if (load_count[1 + port] == 1)
34f80b04
EG
6266 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6267 else
6268 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6269 }
6270
34f80b04
EG
6271 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6272 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6273 bp->port.pmf = 1;
6274 else
6275 bp->port.pmf = 0;
6276 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6277
6278 /* if we can't use MSI-X we only need one fp,
6279 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6280 * and fallback to inta with one fp
6281 */
34f80b04
EG
6282 if (use_inta) {
6283 bp->num_queues = 1;
6284
6285 } else {
6286 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6287 /* user requested number */
6288 bp->num_queues = use_multi;
6289
6290 else if (use_multi)
6291 bp->num_queues = min_t(u32, num_online_cpus(),
6292 BP_MAX_QUEUES(bp));
6293 else
a2fbb9ea 6294 bp->num_queues = 1;
34f80b04
EG
6295
6296 if (bnx2x_enable_msix(bp)) {
6297 /* failed to enable MSI-X */
6298 bp->num_queues = 1;
6299 if (use_multi)
6300 BNX2X_ERR("Multi requested but failed"
6301 " to enable MSI-X\n");
a2fbb9ea
ET
6302 }
6303 }
34f80b04
EG
6304 DP(NETIF_MSG_IFUP,
6305 "set number of queues to %d\n", bp->num_queues);
c14423fe 6306
a2fbb9ea
ET
6307 if (bnx2x_alloc_mem(bp))
6308 return -ENOMEM;
6309
7a9b2557
VZ
6310 for_each_queue(bp, i)
6311 bnx2x_fp(bp, i, disable_tpa) =
6312 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6313
34f80b04
EG
6314 if (bp->flags & USING_MSIX_FLAG) {
6315 rc = bnx2x_req_msix_irqs(bp);
6316 if (rc) {
6317 pci_disable_msix(bp->pdev);
6318 goto load_error;
6319 }
6320 } else {
6321 bnx2x_ack_int(bp);
6322 rc = bnx2x_req_irq(bp);
6323 if (rc) {
6324 BNX2X_ERR("IRQ request failed, aborting\n");
6325 goto load_error;
a2fbb9ea
ET
6326 }
6327 }
6328
6329 for_each_queue(bp, i)
6330 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6331 bnx2x_poll, 128);
6332
a2fbb9ea 6333 /* Initialize HW */
34f80b04
EG
6334 rc = bnx2x_init_hw(bp, load_code);
6335 if (rc) {
a2fbb9ea 6336 BNX2X_ERR("HW init failed, aborting\n");
228241eb 6337 goto load_error;
a2fbb9ea
ET
6338 }
6339
a2fbb9ea 6340 /* Setup NIC internals and enable interrupts */
471de716 6341 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6342
6343 /* Send LOAD_DONE command to MCP */
34f80b04 6344 if (!BP_NOMCP(bp)) {
228241eb
ET
6345 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6346 if (!load_code) {
da5a662a 6347 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6348 rc = -EBUSY;
228241eb 6349 goto load_int_disable;
a2fbb9ea
ET
6350 }
6351 }
6352
bb2a0f7a
YG
6353 bnx2x_stats_init(bp);
6354
a2fbb9ea
ET
6355 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6356
6357 /* Enable Rx interrupt handling before sending the ramrod
6358 as it's completed on Rx FP queue */
6359 for_each_queue(bp, i)
6360 napi_enable(&bnx2x_fp(bp, i, napi));
6361
da5a662a
VZ
6362 /* Enable interrupt handling */
6363 atomic_set(&bp->intr_sem, 0);
6364
34f80b04
EG
6365 rc = bnx2x_setup_leading(bp);
6366 if (rc) {
da5a662a 6367 BNX2X_ERR("Setup leading failed!\n");
228241eb 6368 goto load_stop_netif;
34f80b04 6369 }
a2fbb9ea 6370
34f80b04
EG
6371 if (CHIP_IS_E1H(bp))
6372 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6373 BNX2X_ERR("!!! mf_cfg function disabled\n");
6374 bp->state = BNX2X_STATE_DISABLED;
6375 }
a2fbb9ea 6376
34f80b04
EG
6377 if (bp->state == BNX2X_STATE_OPEN)
6378 for_each_nondefault_queue(bp, i) {
6379 rc = bnx2x_setup_multi(bp, i);
6380 if (rc)
6381 goto load_stop_netif;
6382 }
a2fbb9ea 6383
34f80b04
EG
6384 if (CHIP_IS_E1(bp))
6385 bnx2x_set_mac_addr_e1(bp);
6386 else
6387 bnx2x_set_mac_addr_e1h(bp);
6388
6389 if (bp->port.pmf)
6390 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6391
6392 /* Start fast path */
34f80b04
EG
6393 switch (load_mode) {
6394 case LOAD_NORMAL:
6395 /* Tx queue should be only reenabled */
6396 netif_wake_queue(bp->dev);
6397 bnx2x_set_rx_mode(bp->dev);
6398 break;
6399
6400 case LOAD_OPEN:
a2fbb9ea 6401 netif_start_queue(bp->dev);
34f80b04 6402 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6403 if (bp->flags & USING_MSIX_FLAG)
6404 printk(KERN_INFO PFX "%s: using MSI-X\n",
6405 bp->dev->name);
34f80b04 6406 break;
a2fbb9ea 6407
34f80b04 6408 case LOAD_DIAG:
a2fbb9ea 6409 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6410 bp->state = BNX2X_STATE_DIAG;
6411 break;
6412
6413 default:
6414 break;
a2fbb9ea
ET
6415 }
6416
34f80b04
EG
6417 if (!bp->port.pmf)
6418 bnx2x__link_status_update(bp);
6419
a2fbb9ea
ET
6420 /* start the timer */
6421 mod_timer(&bp->timer, jiffies + bp->current_interval);
6422
34f80b04 6423
a2fbb9ea
ET
6424 return 0;
6425
228241eb 6426load_stop_netif:
a2fbb9ea
ET
6427 for_each_queue(bp, i)
6428 napi_disable(&bnx2x_fp(bp, i, napi));
6429
228241eb 6430load_int_disable:
615f8fd9 6431 bnx2x_int_disable_sync(bp);
a2fbb9ea 6432
34f80b04 6433 /* Release IRQs */
a2fbb9ea
ET
6434 bnx2x_free_irq(bp);
6435
7a9b2557
VZ
6436 /* Free SKBs, SGEs, TPA pool and driver internals */
6437 bnx2x_free_skbs(bp);
6438 for_each_queue(bp, i)
6439 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6440 RX_SGE_CNT*NUM_RX_SGE_PAGES);
228241eb 6441load_error:
a2fbb9ea
ET
6442 bnx2x_free_mem(bp);
6443
6444 /* TBD we really need to reset the chip
6445 if we want to recover from this */
34f80b04 6446 return rc;
a2fbb9ea
ET
6447}
6448
6449static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6450{
a2fbb9ea
ET
6451 int rc;
6452
c14423fe 6453 /* halt the connection */
a2fbb9ea
ET
6454 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6455 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6456
34f80b04 6457 /* Wait for completion */
a2fbb9ea 6458 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6459 &(bp->fp[index].state), 1);
c14423fe 6460 if (rc) /* timeout */
a2fbb9ea
ET
6461 return rc;
6462
6463 /* delete cfc entry */
6464 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6465
34f80b04
EG
6466 /* Wait for completion */
6467 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6468 &(bp->fp[index].state), 1);
6469 return rc;
a2fbb9ea
ET
6470}
6471
da5a662a 6472static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6473{
49d66772 6474 u16 dsb_sp_prod_idx;
c14423fe 6475 /* if the other port is handling traffic,
a2fbb9ea 6476 this can take a lot of time */
34f80b04
EG
6477 int cnt = 500;
6478 int rc;
a2fbb9ea
ET
6479
6480 might_sleep();
6481
6482 /* Send HALT ramrod */
6483 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6484 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6485
34f80b04
EG
6486 /* Wait for completion */
6487 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6488 &(bp->fp[0].state), 1);
6489 if (rc) /* timeout */
da5a662a 6490 return rc;
a2fbb9ea 6491
49d66772 6492 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6493
228241eb 6494 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6495 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6496
49d66772 6497 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6498 we are going to reset the chip anyway
6499 so there is not much to do if this times out
6500 */
34f80b04 6501 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
49d66772 6502 msleep(1);
34f80b04
EG
6503 if (!cnt) {
6504 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6505 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6506 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6507#ifdef BNX2X_STOP_ON_ERROR
6508 bnx2x_panic();
da5a662a
VZ
6509#else
6510 rc = -EBUSY;
34f80b04
EG
6511#endif
6512 break;
6513 }
6514 cnt--;
da5a662a 6515 msleep(1);
49d66772
ET
6516 }
6517 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6518 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6519
6520 return rc;
a2fbb9ea
ET
6521}
6522
34f80b04
EG
6523static void bnx2x_reset_func(struct bnx2x *bp)
6524{
6525 int port = BP_PORT(bp);
6526 int func = BP_FUNC(bp);
6527 int base, i;
6528
6529 /* Configure IGU */
6530 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6531 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6532
6533 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6534
6535 /* Clear ILT */
6536 base = FUNC_ILT_BASE(func);
6537 for (i = base; i < base + ILT_PER_FUNC; i++)
6538 bnx2x_ilt_wr(bp, i, 0);
6539}
6540
6541static void bnx2x_reset_port(struct bnx2x *bp)
6542{
6543 int port = BP_PORT(bp);
6544 u32 val;
6545
6546 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6547
6548 /* Do not rcv packets to BRB */
6549 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6550 /* Do not direct rcv packets that are not for MCP to the BRB */
6551 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6552 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6553
6554 /* Configure AEU */
6555 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6556
6557 msleep(100);
6558 /* Check for BRB port occupancy */
6559 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6560 if (val)
6561 DP(NETIF_MSG_IFDOWN,
6562 "BRB1 is not empty %d blooks are occupied\n", val);
6563
6564 /* TODO: Close Doorbell port? */
6565}
6566
6567static void bnx2x_reset_common(struct bnx2x *bp)
6568{
6569 /* reset_common */
6570 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6571 0xd3ffff7f);
6572 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6573}
6574
6575static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6576{
6577 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6578 BP_FUNC(bp), reset_code);
6579
6580 switch (reset_code) {
6581 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6582 bnx2x_reset_port(bp);
6583 bnx2x_reset_func(bp);
6584 bnx2x_reset_common(bp);
6585 break;
6586
6587 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6588 bnx2x_reset_port(bp);
6589 bnx2x_reset_func(bp);
6590 break;
6591
6592 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6593 bnx2x_reset_func(bp);
6594 break;
49d66772 6595
34f80b04
EG
6596 default:
6597 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6598 break;
6599 }
6600}
6601
6602/* msut be called with rtnl_lock */
6603static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6604{
da5a662a 6605 int port = BP_PORT(bp);
a2fbb9ea 6606 u32 reset_code = 0;
da5a662a 6607 int i, cnt, rc;
a2fbb9ea
ET
6608
6609 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6610
228241eb
ET
6611 bp->rx_mode = BNX2X_RX_MODE_NONE;
6612 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6613
228241eb
ET
6614 if (netif_running(bp->dev)) {
6615 netif_tx_disable(bp->dev);
6616 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6617 }
6618
34f80b04
EG
6619 del_timer_sync(&bp->timer);
6620 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6621 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6622 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6623
da5a662a 6624 /* Wait until tx fast path tasks complete */
228241eb
ET
6625 for_each_queue(bp, i) {
6626 struct bnx2x_fastpath *fp = &bp->fp[i];
6627
34f80b04
EG
6628 cnt = 1000;
6629 smp_rmb();
da5a662a
VZ
6630 while (BNX2X_HAS_TX_WORK(fp)) {
6631
6632 if (!netif_running(bp->dev))
6633 bnx2x_tx_int(fp, 1000);
6634
34f80b04
EG
6635 if (!cnt) {
6636 BNX2X_ERR("timeout waiting for queue[%d]\n",
6637 i);
6638#ifdef BNX2X_STOP_ON_ERROR
6639 bnx2x_panic();
6640 return -EBUSY;
6641#else
6642 break;
6643#endif
6644 }
6645 cnt--;
da5a662a 6646 msleep(1);
34f80b04
EG
6647 smp_rmb();
6648 }
228241eb 6649 }
a2fbb9ea 6650
da5a662a
VZ
6651 /* Give HW time to discard old tx messages */
6652 msleep(1);
a2fbb9ea 6653
228241eb
ET
6654 for_each_queue(bp, i)
6655 napi_disable(&bnx2x_fp(bp, i, napi));
6656 /* Disable interrupts after Tx and Rx are disabled on stack level */
6657 bnx2x_int_disable_sync(bp);
a2fbb9ea 6658
34f80b04
EG
6659 /* Release IRQs */
6660 bnx2x_free_irq(bp);
6661
da5a662a
VZ
6662 if (unload_mode == UNLOAD_NORMAL)
6663 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6664
6665 else if (bp->flags & NO_WOL_FLAG) {
a2fbb9ea 6666 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
da5a662a
VZ
6667 if (CHIP_IS_E1H(bp))
6668 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
228241eb 6669
da5a662a
VZ
6670 } else if (bp->wol) {
6671 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6672 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6673 u32 val;
34f80b04
EG
6674 /* The mac address is written to entries 1-4 to
6675 preserve entry 0 which is used by the PMF */
da5a662a
VZ
6676 u8 entry = (BP_E1HVN(bp) + 1)*8;
6677
a2fbb9ea 6678 val = (mac_addr[0] << 8) | mac_addr[1];
da5a662a 6679 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
a2fbb9ea
ET
6680
6681 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6682 (mac_addr[4] << 8) | mac_addr[5];
da5a662a 6683 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
a2fbb9ea
ET
6684
6685 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6686
a2fbb9ea
ET
6687 } else
6688 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6689
da5a662a
VZ
6690 if (CHIP_IS_E1H(bp))
6691 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6692
34f80b04
EG
6693 /* Close multi and leading connections
6694 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6695 for_each_nondefault_queue(bp, i)
6696 if (bnx2x_stop_multi(bp, i))
228241eb 6697 goto unload_error;
a2fbb9ea 6698
da5a662a
VZ
6699 rc = bnx2x_stop_leading(bp);
6700 if (rc) {
34f80b04 6701 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6702#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6703 return -EBUSY;
da5a662a
VZ
6704#else
6705 goto unload_error;
34f80b04 6706#endif
228241eb
ET
6707 }
6708
6709unload_error:
34f80b04 6710 if (!BP_NOMCP(bp))
228241eb 6711 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6712 else {
6713 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6714 load_count[0], load_count[1], load_count[2]);
6715 load_count[0]--;
da5a662a 6716 load_count[1 + port]--;
34f80b04
EG
6717 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6718 load_count[0], load_count[1], load_count[2]);
6719 if (load_count[0] == 0)
6720 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6721 else if (load_count[1 + port] == 0)
34f80b04
EG
6722 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6723 else
6724 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6725 }
a2fbb9ea 6726
34f80b04
EG
6727 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6728 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6729 bnx2x__link_reset(bp);
a2fbb9ea
ET
6730
6731 /* Reset the chip */
228241eb 6732 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6733
6734 /* Report UNLOAD_DONE to MCP */
34f80b04 6735 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6736 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6737
7a9b2557 6738 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6739 bnx2x_free_skbs(bp);
7a9b2557
VZ
6740 for_each_queue(bp, i)
6741 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6742 RX_SGE_CNT*NUM_RX_SGE_PAGES);
a2fbb9ea
ET
6743 bnx2x_free_mem(bp);
6744
6745 bp->state = BNX2X_STATE_CLOSED;
228241eb 6746
a2fbb9ea
ET
6747 netif_carrier_off(bp->dev);
6748
6749 return 0;
6750}
6751
34f80b04
EG
6752static void bnx2x_reset_task(struct work_struct *work)
6753{
6754 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6755
6756#ifdef BNX2X_STOP_ON_ERROR
6757 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6758 " so reset not done to allow debug dump,\n"
6759 KERN_ERR " you will need to reboot when done\n");
6760 return;
6761#endif
6762
6763 rtnl_lock();
6764
6765 if (!netif_running(bp->dev))
6766 goto reset_task_exit;
6767
6768 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6769 bnx2x_nic_load(bp, LOAD_NORMAL);
6770
6771reset_task_exit:
6772 rtnl_unlock();
6773}
6774
a2fbb9ea
ET
6775/* end of nic load/unload */
6776
6777/* ethtool_ops */
6778
6779/*
6780 * Init service functions
6781 */
6782
34f80b04
EG
6783static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6784{
6785 u32 val;
6786
6787 /* Check if there is any driver already loaded */
6788 val = REG_RD(bp, MISC_REG_UNPREPARED);
6789 if (val == 0x1) {
6790 /* Check if it is the UNDI driver
6791 * UNDI driver initializes CID offset for normal bell to 0x7
6792 */
4a37fb66 6793 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6794 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6795 if (val == 0x7) {
6796 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6797 /* save our func */
34f80b04 6798 int func = BP_FUNC(bp);
da5a662a
VZ
6799 u32 swap_en;
6800 u32 swap_val;
34f80b04
EG
6801
6802 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6803
6804 /* try unload UNDI on port 0 */
6805 bp->func = 0;
da5a662a
VZ
6806 bp->fw_seq =
6807 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6808 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6809 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6810
6811 /* if UNDI is loaded on the other port */
6812 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6813
da5a662a
VZ
6814 /* send "DONE" for previous unload */
6815 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6816
6817 /* unload UNDI on port 1 */
34f80b04 6818 bp->func = 1;
da5a662a
VZ
6819 bp->fw_seq =
6820 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6821 DRV_MSG_SEQ_NUMBER_MASK);
6822 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6823
6824 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6825 }
6826
da5a662a
VZ
6827 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6828 HC_REG_CONFIG_0), 0x1000);
6829
6830 /* close input traffic and wait for it */
6831 /* Do not rcv packets to BRB */
6832 REG_WR(bp,
6833 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6834 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6835 /* Do not direct rcv packets that are not for MCP to
6836 * the BRB */
6837 REG_WR(bp,
6838 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6839 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6840 /* clear AEU */
6841 REG_WR(bp,
6842 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6843 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6844 msleep(10);
6845
6846 /* save NIG port swap info */
6847 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6848 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6849 /* reset device */
6850 REG_WR(bp,
6851 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6852 0xd3ffffff);
34f80b04
EG
6853 REG_WR(bp,
6854 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6855 0x1403);
da5a662a
VZ
6856 /* take the NIG out of reset and restore swap values */
6857 REG_WR(bp,
6858 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6859 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6860 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6861 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6862
6863 /* send unload done to the MCP */
6864 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6865
6866 /* restore our func and fw_seq */
6867 bp->func = func;
6868 bp->fw_seq =
6869 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6870 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6871 }
4a37fb66 6872 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6873 }
6874}
6875
6876static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6877{
6878 u32 val, val2, val3, val4, id;
6879
6880 /* Get the chip revision id and number. */
6881 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6882 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6883 id = ((val & 0xffff) << 16);
6884 val = REG_RD(bp, MISC_REG_CHIP_REV);
6885 id |= ((val & 0xf) << 12);
6886 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6887 id |= ((val & 0xff) << 4);
6888 REG_RD(bp, MISC_REG_BOND_ID);
6889 id |= (val & 0xf);
6890 bp->common.chip_id = id;
6891 bp->link_params.chip_id = bp->common.chip_id;
6892 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6893
6894 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6895 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6896 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6897 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6898 bp->common.flash_size, bp->common.flash_size);
6899
6900 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6901 bp->link_params.shmem_base = bp->common.shmem_base;
6902 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6903
6904 if (!bp->common.shmem_base ||
6905 (bp->common.shmem_base < 0xA0000) ||
6906 (bp->common.shmem_base >= 0xC0000)) {
6907 BNX2X_DEV_INFO("MCP not active\n");
6908 bp->flags |= NO_MCP_FLAG;
6909 return;
6910 }
6911
6912 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6913 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6914 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6915 BNX2X_ERR("BAD MCP validity signature\n");
6916
6917 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6918 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6919
6920 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6921 bp->common.hw_config, bp->common.board);
6922
6923 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6924 SHARED_HW_CFG_LED_MODE_MASK) >>
6925 SHARED_HW_CFG_LED_MODE_SHIFT);
6926
6927 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6928 bp->common.bc_ver = val;
6929 BNX2X_DEV_INFO("bc_ver %X\n", val);
6930 if (val < BNX2X_BC_VER) {
6931 /* for now only warn
6932 * later we might need to enforce this */
6933 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6934 " please upgrade BC\n", BNX2X_BC_VER, val);
6935 }
6936 BNX2X_DEV_INFO("%sWoL Capable\n",
6937 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6938
6939 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6940 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6941 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6942 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6943
6944 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6945 val, val2, val3, val4);
6946}
6947
6948static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6949 u32 switch_cfg)
a2fbb9ea 6950{
34f80b04 6951 int port = BP_PORT(bp);
a2fbb9ea
ET
6952 u32 ext_phy_type;
6953
a2fbb9ea
ET
6954 switch (switch_cfg) {
6955 case SWITCH_CFG_1G:
6956 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6957
c18487ee
YR
6958 ext_phy_type =
6959 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6960 switch (ext_phy_type) {
6961 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6962 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6963 ext_phy_type);
6964
34f80b04
EG
6965 bp->port.supported |= (SUPPORTED_10baseT_Half |
6966 SUPPORTED_10baseT_Full |
6967 SUPPORTED_100baseT_Half |
6968 SUPPORTED_100baseT_Full |
6969 SUPPORTED_1000baseT_Full |
6970 SUPPORTED_2500baseX_Full |
6971 SUPPORTED_TP |
6972 SUPPORTED_FIBRE |
6973 SUPPORTED_Autoneg |
6974 SUPPORTED_Pause |
6975 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6976 break;
6977
6978 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6979 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6980 ext_phy_type);
6981
34f80b04
EG
6982 bp->port.supported |= (SUPPORTED_10baseT_Half |
6983 SUPPORTED_10baseT_Full |
6984 SUPPORTED_100baseT_Half |
6985 SUPPORTED_100baseT_Full |
6986 SUPPORTED_1000baseT_Full |
6987 SUPPORTED_TP |
6988 SUPPORTED_FIBRE |
6989 SUPPORTED_Autoneg |
6990 SUPPORTED_Pause |
6991 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6992 break;
6993
6994 default:
6995 BNX2X_ERR("NVRAM config error. "
6996 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6997 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6998 return;
6999 }
7000
34f80b04
EG
7001 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7002 port*0x10);
7003 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7004 break;
7005
7006 case SWITCH_CFG_10G:
7007 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7008
c18487ee
YR
7009 ext_phy_type =
7010 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7011 switch (ext_phy_type) {
7012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7013 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7014 ext_phy_type);
7015
34f80b04
EG
7016 bp->port.supported |= (SUPPORTED_10baseT_Half |
7017 SUPPORTED_10baseT_Full |
7018 SUPPORTED_100baseT_Half |
7019 SUPPORTED_100baseT_Full |
7020 SUPPORTED_1000baseT_Full |
7021 SUPPORTED_2500baseX_Full |
7022 SUPPORTED_10000baseT_Full |
7023 SUPPORTED_TP |
7024 SUPPORTED_FIBRE |
7025 SUPPORTED_Autoneg |
7026 SUPPORTED_Pause |
7027 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7028 break;
7029
7030 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7031 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7032 ext_phy_type);
f1410647 7033
34f80b04
EG
7034 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7035 SUPPORTED_FIBRE |
7036 SUPPORTED_Pause |
7037 SUPPORTED_Asym_Pause);
f1410647
ET
7038 break;
7039
a2fbb9ea 7040 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7041 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7042 ext_phy_type);
7043
34f80b04
EG
7044 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7045 SUPPORTED_1000baseT_Full |
7046 SUPPORTED_FIBRE |
7047 SUPPORTED_Pause |
7048 SUPPORTED_Asym_Pause);
f1410647
ET
7049 break;
7050
7051 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7052 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7053 ext_phy_type);
7054
34f80b04
EG
7055 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7056 SUPPORTED_1000baseT_Full |
7057 SUPPORTED_FIBRE |
7058 SUPPORTED_Autoneg |
7059 SUPPORTED_Pause |
7060 SUPPORTED_Asym_Pause);
f1410647
ET
7061 break;
7062
c18487ee
YR
7063 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7064 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7065 ext_phy_type);
7066
34f80b04
EG
7067 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7068 SUPPORTED_2500baseX_Full |
7069 SUPPORTED_1000baseT_Full |
7070 SUPPORTED_FIBRE |
7071 SUPPORTED_Autoneg |
7072 SUPPORTED_Pause |
7073 SUPPORTED_Asym_Pause);
c18487ee
YR
7074 break;
7075
f1410647
ET
7076 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7077 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7078 ext_phy_type);
7079
34f80b04
EG
7080 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7081 SUPPORTED_TP |
7082 SUPPORTED_Autoneg |
7083 SUPPORTED_Pause |
7084 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7085 break;
7086
c18487ee
YR
7087 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7088 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7089 bp->link_params.ext_phy_config);
7090 break;
7091
a2fbb9ea
ET
7092 default:
7093 BNX2X_ERR("NVRAM config error. "
7094 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7095 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7096 return;
7097 }
7098
34f80b04
EG
7099 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7100 port*0x18);
7101 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7102
a2fbb9ea
ET
7103 break;
7104
7105 default:
7106 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7107 bp->port.link_config);
a2fbb9ea
ET
7108 return;
7109 }
34f80b04 7110 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7111
7112 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7113 if (!(bp->link_params.speed_cap_mask &
7114 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7115 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7116
c18487ee
YR
7117 if (!(bp->link_params.speed_cap_mask &
7118 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7119 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7120
c18487ee
YR
7121 if (!(bp->link_params.speed_cap_mask &
7122 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7123 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7124
c18487ee
YR
7125 if (!(bp->link_params.speed_cap_mask &
7126 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7127 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7128
c18487ee
YR
7129 if (!(bp->link_params.speed_cap_mask &
7130 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7131 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7132 SUPPORTED_1000baseT_Full);
a2fbb9ea 7133
c18487ee
YR
7134 if (!(bp->link_params.speed_cap_mask &
7135 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7136 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7137
c18487ee
YR
7138 if (!(bp->link_params.speed_cap_mask &
7139 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7140 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7141
34f80b04 7142 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7143}
7144
34f80b04 7145static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7146{
c18487ee 7147 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7148
34f80b04 7149 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7150 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7151 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7152 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7153 bp->port.advertising = bp->port.supported;
a2fbb9ea 7154 } else {
c18487ee
YR
7155 u32 ext_phy_type =
7156 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7157
7158 if ((ext_phy_type ==
7159 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7160 (ext_phy_type ==
7161 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7162 /* force 10G, no AN */
c18487ee 7163 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7164 bp->port.advertising =
a2fbb9ea
ET
7165 (ADVERTISED_10000baseT_Full |
7166 ADVERTISED_FIBRE);
7167 break;
7168 }
7169 BNX2X_ERR("NVRAM config error. "
7170 "Invalid link_config 0x%x"
7171 " Autoneg not supported\n",
34f80b04 7172 bp->port.link_config);
a2fbb9ea
ET
7173 return;
7174 }
7175 break;
7176
7177 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7178 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7179 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7180 bp->port.advertising = (ADVERTISED_10baseT_Full |
7181 ADVERTISED_TP);
a2fbb9ea
ET
7182 } else {
7183 BNX2X_ERR("NVRAM config error. "
7184 "Invalid link_config 0x%x"
7185 " speed_cap_mask 0x%x\n",
34f80b04 7186 bp->port.link_config,
c18487ee 7187 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7188 return;
7189 }
7190 break;
7191
7192 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7193 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7194 bp->link_params.req_line_speed = SPEED_10;
7195 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7196 bp->port.advertising = (ADVERTISED_10baseT_Half |
7197 ADVERTISED_TP);
a2fbb9ea
ET
7198 } else {
7199 BNX2X_ERR("NVRAM config error. "
7200 "Invalid link_config 0x%x"
7201 " speed_cap_mask 0x%x\n",
34f80b04 7202 bp->port.link_config,
c18487ee 7203 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7204 return;
7205 }
7206 break;
7207
7208 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7209 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7210 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7211 bp->port.advertising = (ADVERTISED_100baseT_Full |
7212 ADVERTISED_TP);
a2fbb9ea
ET
7213 } else {
7214 BNX2X_ERR("NVRAM config error. "
7215 "Invalid link_config 0x%x"
7216 " speed_cap_mask 0x%x\n",
34f80b04 7217 bp->port.link_config,
c18487ee 7218 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7219 return;
7220 }
7221 break;
7222
7223 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7224 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7225 bp->link_params.req_line_speed = SPEED_100;
7226 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7227 bp->port.advertising = (ADVERTISED_100baseT_Half |
7228 ADVERTISED_TP);
a2fbb9ea
ET
7229 } else {
7230 BNX2X_ERR("NVRAM config error. "
7231 "Invalid link_config 0x%x"
7232 " speed_cap_mask 0x%x\n",
34f80b04 7233 bp->port.link_config,
c18487ee 7234 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7235 return;
7236 }
7237 break;
7238
7239 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7240 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7241 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7242 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7243 ADVERTISED_TP);
a2fbb9ea
ET
7244 } else {
7245 BNX2X_ERR("NVRAM config error. "
7246 "Invalid link_config 0x%x"
7247 " speed_cap_mask 0x%x\n",
34f80b04 7248 bp->port.link_config,
c18487ee 7249 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7250 return;
7251 }
7252 break;
7253
7254 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7255 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7256 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7257 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7258 ADVERTISED_TP);
a2fbb9ea
ET
7259 } else {
7260 BNX2X_ERR("NVRAM config error. "
7261 "Invalid link_config 0x%x"
7262 " speed_cap_mask 0x%x\n",
34f80b04 7263 bp->port.link_config,
c18487ee 7264 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7265 return;
7266 }
7267 break;
7268
7269 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7270 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7271 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7272 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7273 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7274 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7275 ADVERTISED_FIBRE);
a2fbb9ea
ET
7276 } else {
7277 BNX2X_ERR("NVRAM config error. "
7278 "Invalid link_config 0x%x"
7279 " speed_cap_mask 0x%x\n",
34f80b04 7280 bp->port.link_config,
c18487ee 7281 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7282 return;
7283 }
7284 break;
7285
7286 default:
7287 BNX2X_ERR("NVRAM config error. "
7288 "BAD link speed link_config 0x%x\n",
34f80b04 7289 bp->port.link_config);
c18487ee 7290 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7291 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7292 break;
7293 }
a2fbb9ea 7294
34f80b04
EG
7295 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7296 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7297 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7298 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7299 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7300
c18487ee 7301 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7302 " advertising 0x%x\n",
c18487ee
YR
7303 bp->link_params.req_line_speed,
7304 bp->link_params.req_duplex,
34f80b04 7305 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7306}
7307
34f80b04 7308static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7309{
34f80b04
EG
7310 int port = BP_PORT(bp);
7311 u32 val, val2;
a2fbb9ea 7312
c18487ee 7313 bp->link_params.bp = bp;
34f80b04 7314 bp->link_params.port = port;
c18487ee 7315
c18487ee 7316 bp->link_params.serdes_config =
f1410647 7317 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7318 bp->link_params.lane_config =
a2fbb9ea 7319 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7320 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7321 SHMEM_RD(bp,
7322 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7323 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7324 SHMEM_RD(bp,
7325 dev_info.port_hw_config[port].speed_capability_mask);
7326
34f80b04 7327 bp->port.link_config =
a2fbb9ea
ET
7328 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7329
34f80b04
EG
7330 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7331 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7332 " link_config 0x%08x\n",
c18487ee
YR
7333 bp->link_params.serdes_config,
7334 bp->link_params.lane_config,
7335 bp->link_params.ext_phy_config,
34f80b04 7336 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7337
34f80b04 7338 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7339 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7340 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7341
7342 bnx2x_link_settings_requested(bp);
7343
7344 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7345 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7346 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7347 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7348 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7349 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7350 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7351 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7352 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7353 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7354}
7355
7356static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7357{
7358 int func = BP_FUNC(bp);
7359 u32 val, val2;
7360 int rc = 0;
a2fbb9ea 7361
34f80b04 7362 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7363
34f80b04
EG
7364 bp->e1hov = 0;
7365 bp->e1hmf = 0;
7366 if (CHIP_IS_E1H(bp)) {
7367 bp->mf_config =
7368 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7369
34f80b04
EG
7370 val =
7371 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7372 FUNC_MF_CFG_E1HOV_TAG_MASK);
7373 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7374
34f80b04
EG
7375 bp->e1hov = val;
7376 bp->e1hmf = 1;
7377 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7378 "(0x%04x)\n",
7379 func, bp->e1hov, bp->e1hov);
7380 } else {
7381 BNX2X_DEV_INFO("Single function mode\n");
7382 if (BP_E1HVN(bp)) {
7383 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7384 " aborting\n", func);
7385 rc = -EPERM;
7386 }
7387 }
7388 }
a2fbb9ea 7389
34f80b04
EG
7390 if (!BP_NOMCP(bp)) {
7391 bnx2x_get_port_hwinfo(bp);
7392
7393 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7394 DRV_MSG_SEQ_NUMBER_MASK);
7395 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7396 }
7397
7398 if (IS_E1HMF(bp)) {
7399 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7400 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7401 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7402 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7403 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7404 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7405 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7406 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7407 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7408 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7409 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7410 ETH_ALEN);
7411 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7412 ETH_ALEN);
a2fbb9ea 7413 }
34f80b04
EG
7414
7415 return rc;
a2fbb9ea
ET
7416 }
7417
34f80b04
EG
7418 if (BP_NOMCP(bp)) {
7419 /* only supposed to happen on emulation/FPGA */
7420 BNX2X_ERR("warning rendom MAC workaround active\n");
7421 random_ether_addr(bp->dev->dev_addr);
7422 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7423 }
a2fbb9ea 7424
34f80b04
EG
7425 return rc;
7426}
7427
7428static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7429{
7430 int func = BP_FUNC(bp);
7431 int rc;
7432
da5a662a
VZ
7433 /* Disable interrupt handling until HW is initialized */
7434 atomic_set(&bp->intr_sem, 1);
7435
34f80b04 7436 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7437
34f80b04
EG
7438 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7439 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7440
7441 rc = bnx2x_get_hwinfo(bp);
7442
7443 /* need to reset chip if undi was active */
7444 if (!BP_NOMCP(bp))
7445 bnx2x_undi_unload(bp);
7446
7447 if (CHIP_REV_IS_FPGA(bp))
7448 printk(KERN_ERR PFX "FPGA detected\n");
7449
7450 if (BP_NOMCP(bp) && (func == 0))
7451 printk(KERN_ERR PFX
7452 "MCP disabled, must load devices in order!\n");
7453
7a9b2557
VZ
7454 /* Set TPA flags */
7455 if (disable_tpa) {
7456 bp->flags &= ~TPA_ENABLE_FLAG;
7457 bp->dev->features &= ~NETIF_F_LRO;
7458 } else {
7459 bp->flags |= TPA_ENABLE_FLAG;
7460 bp->dev->features |= NETIF_F_LRO;
7461 }
7462
7463
34f80b04
EG
7464 bp->tx_ring_size = MAX_TX_AVAIL;
7465 bp->rx_ring_size = MAX_RX_AVAIL;
7466
7467 bp->rx_csum = 1;
7468 bp->rx_offset = 0;
7469
7470 bp->tx_ticks = 50;
7471 bp->rx_ticks = 25;
7472
34f80b04
EG
7473 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7474 bp->current_interval = (poll ? poll : bp->timer_interval);
7475
7476 init_timer(&bp->timer);
7477 bp->timer.expires = jiffies + bp->current_interval;
7478 bp->timer.data = (unsigned long) bp;
7479 bp->timer.function = bnx2x_timer;
7480
7481 return rc;
a2fbb9ea
ET
7482}
7483
7484/*
7485 * ethtool service functions
7486 */
7487
7488/* All ethtool functions called with rtnl_lock */
7489
7490static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7491{
7492 struct bnx2x *bp = netdev_priv(dev);
7493
34f80b04
EG
7494 cmd->supported = bp->port.supported;
7495 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7496
7497 if (netif_carrier_ok(dev)) {
c18487ee
YR
7498 cmd->speed = bp->link_vars.line_speed;
7499 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7500 } else {
c18487ee
YR
7501 cmd->speed = bp->link_params.req_line_speed;
7502 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7503 }
34f80b04
EG
7504 if (IS_E1HMF(bp)) {
7505 u16 vn_max_rate;
7506
7507 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7508 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7509 if (vn_max_rate < cmd->speed)
7510 cmd->speed = vn_max_rate;
7511 }
a2fbb9ea 7512
c18487ee
YR
7513 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7514 u32 ext_phy_type =
7515 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7516
7517 switch (ext_phy_type) {
7518 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7519 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7520 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7521 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7522 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7523 cmd->port = PORT_FIBRE;
7524 break;
7525
7526 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7527 cmd->port = PORT_TP;
7528 break;
7529
c18487ee
YR
7530 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7531 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7532 bp->link_params.ext_phy_config);
7533 break;
7534
f1410647
ET
7535 default:
7536 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7537 bp->link_params.ext_phy_config);
7538 break;
f1410647
ET
7539 }
7540 } else
a2fbb9ea 7541 cmd->port = PORT_TP;
a2fbb9ea 7542
34f80b04 7543 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7544 cmd->transceiver = XCVR_INTERNAL;
7545
c18487ee 7546 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7547 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7548 else
a2fbb9ea 7549 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7550
7551 cmd->maxtxpkt = 0;
7552 cmd->maxrxpkt = 0;
7553
7554 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7555 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7556 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7557 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7558 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7559 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7560 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7561
7562 return 0;
7563}
7564
7565static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7566{
7567 struct bnx2x *bp = netdev_priv(dev);
7568 u32 advertising;
7569
34f80b04
EG
7570 if (IS_E1HMF(bp))
7571 return 0;
7572
a2fbb9ea
ET
7573 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7574 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7575 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7576 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7577 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7578 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7579 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7580
a2fbb9ea 7581 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7582 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7583 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7584 return -EINVAL;
f1410647 7585 }
a2fbb9ea
ET
7586
7587 /* advertise the requested speed and duplex if supported */
34f80b04 7588 cmd->advertising &= bp->port.supported;
a2fbb9ea 7589
c18487ee
YR
7590 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7591 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7592 bp->port.advertising |= (ADVERTISED_Autoneg |
7593 cmd->advertising);
a2fbb9ea
ET
7594
7595 } else { /* forced speed */
7596 /* advertise the requested speed and duplex if supported */
7597 switch (cmd->speed) {
7598 case SPEED_10:
7599 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7600 if (!(bp->port.supported &
f1410647
ET
7601 SUPPORTED_10baseT_Full)) {
7602 DP(NETIF_MSG_LINK,
7603 "10M full not supported\n");
a2fbb9ea 7604 return -EINVAL;
f1410647 7605 }
a2fbb9ea
ET
7606
7607 advertising = (ADVERTISED_10baseT_Full |
7608 ADVERTISED_TP);
7609 } else {
34f80b04 7610 if (!(bp->port.supported &
f1410647
ET
7611 SUPPORTED_10baseT_Half)) {
7612 DP(NETIF_MSG_LINK,
7613 "10M half not supported\n");
a2fbb9ea 7614 return -EINVAL;
f1410647 7615 }
a2fbb9ea
ET
7616
7617 advertising = (ADVERTISED_10baseT_Half |
7618 ADVERTISED_TP);
7619 }
7620 break;
7621
7622 case SPEED_100:
7623 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7624 if (!(bp->port.supported &
f1410647
ET
7625 SUPPORTED_100baseT_Full)) {
7626 DP(NETIF_MSG_LINK,
7627 "100M full not supported\n");
a2fbb9ea 7628 return -EINVAL;
f1410647 7629 }
a2fbb9ea
ET
7630
7631 advertising = (ADVERTISED_100baseT_Full |
7632 ADVERTISED_TP);
7633 } else {
34f80b04 7634 if (!(bp->port.supported &
f1410647
ET
7635 SUPPORTED_100baseT_Half)) {
7636 DP(NETIF_MSG_LINK,
7637 "100M half not supported\n");
a2fbb9ea 7638 return -EINVAL;
f1410647 7639 }
a2fbb9ea
ET
7640
7641 advertising = (ADVERTISED_100baseT_Half |
7642 ADVERTISED_TP);
7643 }
7644 break;
7645
7646 case SPEED_1000:
f1410647
ET
7647 if (cmd->duplex != DUPLEX_FULL) {
7648 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7649 return -EINVAL;
f1410647 7650 }
a2fbb9ea 7651
34f80b04 7652 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7653 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7654 return -EINVAL;
f1410647 7655 }
a2fbb9ea
ET
7656
7657 advertising = (ADVERTISED_1000baseT_Full |
7658 ADVERTISED_TP);
7659 break;
7660
7661 case SPEED_2500:
f1410647
ET
7662 if (cmd->duplex != DUPLEX_FULL) {
7663 DP(NETIF_MSG_LINK,
7664 "2.5G half not supported\n");
a2fbb9ea 7665 return -EINVAL;
f1410647 7666 }
a2fbb9ea 7667
34f80b04 7668 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7669 DP(NETIF_MSG_LINK,
7670 "2.5G full not supported\n");
a2fbb9ea 7671 return -EINVAL;
f1410647 7672 }
a2fbb9ea 7673
f1410647 7674 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7675 ADVERTISED_TP);
7676 break;
7677
7678 case SPEED_10000:
f1410647
ET
7679 if (cmd->duplex != DUPLEX_FULL) {
7680 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7681 return -EINVAL;
f1410647 7682 }
a2fbb9ea 7683
34f80b04 7684 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7685 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7686 return -EINVAL;
f1410647 7687 }
a2fbb9ea
ET
7688
7689 advertising = (ADVERTISED_10000baseT_Full |
7690 ADVERTISED_FIBRE);
7691 break;
7692
7693 default:
f1410647 7694 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7695 return -EINVAL;
7696 }
7697
c18487ee
YR
7698 bp->link_params.req_line_speed = cmd->speed;
7699 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7700 bp->port.advertising = advertising;
a2fbb9ea
ET
7701 }
7702
c18487ee 7703 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7704 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7705 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7706 bp->port.advertising);
a2fbb9ea 7707
34f80b04 7708 if (netif_running(dev)) {
bb2a0f7a 7709 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7710 bnx2x_link_set(bp);
7711 }
a2fbb9ea
ET
7712
7713 return 0;
7714}
7715
c18487ee
YR
7716#define PHY_FW_VER_LEN 10
7717
a2fbb9ea
ET
7718static void bnx2x_get_drvinfo(struct net_device *dev,
7719 struct ethtool_drvinfo *info)
7720{
7721 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7722 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7723
7724 strcpy(info->driver, DRV_MODULE_NAME);
7725 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7726
7727 phy_fw_ver[0] = '\0';
34f80b04 7728 if (bp->port.pmf) {
4a37fb66 7729 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7730 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7731 (bp->state != BNX2X_STATE_CLOSED),
7732 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7733 bnx2x_release_phy_lock(bp);
34f80b04 7734 }
c18487ee
YR
7735
7736 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7737 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7738 BCM_5710_FW_REVISION_VERSION,
34f80b04 7739 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7740 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7741 strcpy(info->bus_info, pci_name(bp->pdev));
7742 info->n_stats = BNX2X_NUM_STATS;
7743 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7744 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7745 info->regdump_len = 0;
7746}
7747
7748static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7749{
7750 struct bnx2x *bp = netdev_priv(dev);
7751
7752 if (bp->flags & NO_WOL_FLAG) {
7753 wol->supported = 0;
7754 wol->wolopts = 0;
7755 } else {
7756 wol->supported = WAKE_MAGIC;
7757 if (bp->wol)
7758 wol->wolopts = WAKE_MAGIC;
7759 else
7760 wol->wolopts = 0;
7761 }
7762 memset(&wol->sopass, 0, sizeof(wol->sopass));
7763}
7764
7765static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7766{
7767 struct bnx2x *bp = netdev_priv(dev);
7768
7769 if (wol->wolopts & ~WAKE_MAGIC)
7770 return -EINVAL;
7771
7772 if (wol->wolopts & WAKE_MAGIC) {
7773 if (bp->flags & NO_WOL_FLAG)
7774 return -EINVAL;
7775
7776 bp->wol = 1;
34f80b04 7777 } else
a2fbb9ea 7778 bp->wol = 0;
34f80b04 7779
a2fbb9ea
ET
7780 return 0;
7781}
7782
7783static u32 bnx2x_get_msglevel(struct net_device *dev)
7784{
7785 struct bnx2x *bp = netdev_priv(dev);
7786
7787 return bp->msglevel;
7788}
7789
7790static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7791{
7792 struct bnx2x *bp = netdev_priv(dev);
7793
7794 if (capable(CAP_NET_ADMIN))
7795 bp->msglevel = level;
7796}
7797
7798static int bnx2x_nway_reset(struct net_device *dev)
7799{
7800 struct bnx2x *bp = netdev_priv(dev);
7801
34f80b04
EG
7802 if (!bp->port.pmf)
7803 return 0;
a2fbb9ea 7804
34f80b04 7805 if (netif_running(dev)) {
bb2a0f7a 7806 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7807 bnx2x_link_set(bp);
7808 }
a2fbb9ea
ET
7809
7810 return 0;
7811}
7812
7813static int bnx2x_get_eeprom_len(struct net_device *dev)
7814{
7815 struct bnx2x *bp = netdev_priv(dev);
7816
34f80b04 7817 return bp->common.flash_size;
a2fbb9ea
ET
7818}
7819
7820static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7821{
34f80b04 7822 int port = BP_PORT(bp);
a2fbb9ea
ET
7823 int count, i;
7824 u32 val = 0;
7825
7826 /* adjust timeout for emulation/FPGA */
7827 count = NVRAM_TIMEOUT_COUNT;
7828 if (CHIP_REV_IS_SLOW(bp))
7829 count *= 100;
7830
7831 /* request access to nvram interface */
7832 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7833 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7834
7835 for (i = 0; i < count*10; i++) {
7836 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7837 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7838 break;
7839
7840 udelay(5);
7841 }
7842
7843 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7844 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7845 return -EBUSY;
7846 }
7847
7848 return 0;
7849}
7850
7851static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7852{
34f80b04 7853 int port = BP_PORT(bp);
a2fbb9ea
ET
7854 int count, i;
7855 u32 val = 0;
7856
7857 /* adjust timeout for emulation/FPGA */
7858 count = NVRAM_TIMEOUT_COUNT;
7859 if (CHIP_REV_IS_SLOW(bp))
7860 count *= 100;
7861
7862 /* relinquish nvram interface */
7863 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7864 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7865
7866 for (i = 0; i < count*10; i++) {
7867 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7868 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7869 break;
7870
7871 udelay(5);
7872 }
7873
7874 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7875 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7876 return -EBUSY;
7877 }
7878
7879 return 0;
7880}
7881
7882static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7883{
7884 u32 val;
7885
7886 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7887
7888 /* enable both bits, even on read */
7889 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7890 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7891 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7892}
7893
7894static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7895{
7896 u32 val;
7897
7898 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7899
7900 /* disable both bits, even after read */
7901 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7902 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7903 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7904}
7905
7906static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7907 u32 cmd_flags)
7908{
f1410647 7909 int count, i, rc;
a2fbb9ea
ET
7910 u32 val;
7911
7912 /* build the command word */
7913 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7914
7915 /* need to clear DONE bit separately */
7916 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7917
7918 /* address of the NVRAM to read from */
7919 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7920 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7921
7922 /* issue a read command */
7923 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7924
7925 /* adjust timeout for emulation/FPGA */
7926 count = NVRAM_TIMEOUT_COUNT;
7927 if (CHIP_REV_IS_SLOW(bp))
7928 count *= 100;
7929
7930 /* wait for completion */
7931 *ret_val = 0;
7932 rc = -EBUSY;
7933 for (i = 0; i < count; i++) {
7934 udelay(5);
7935 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7936
7937 if (val & MCPR_NVM_COMMAND_DONE) {
7938 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7939 /* we read nvram data in cpu order
7940 * but ethtool sees it as an array of bytes
7941 * converting to big-endian will do the work */
7942 val = cpu_to_be32(val);
7943 *ret_val = val;
7944 rc = 0;
7945 break;
7946 }
7947 }
7948
7949 return rc;
7950}
7951
7952static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7953 int buf_size)
7954{
7955 int rc;
7956 u32 cmd_flags;
7957 u32 val;
7958
7959 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7960 DP(BNX2X_MSG_NVM,
c14423fe 7961 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7962 offset, buf_size);
7963 return -EINVAL;
7964 }
7965
34f80b04
EG
7966 if (offset + buf_size > bp->common.flash_size) {
7967 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7968 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7969 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7970 return -EINVAL;
7971 }
7972
7973 /* request access to nvram interface */
7974 rc = bnx2x_acquire_nvram_lock(bp);
7975 if (rc)
7976 return rc;
7977
7978 /* enable access to nvram interface */
7979 bnx2x_enable_nvram_access(bp);
7980
7981 /* read the first word(s) */
7982 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7983 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7984 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7985 memcpy(ret_buf, &val, 4);
7986
7987 /* advance to the next dword */
7988 offset += sizeof(u32);
7989 ret_buf += sizeof(u32);
7990 buf_size -= sizeof(u32);
7991 cmd_flags = 0;
7992 }
7993
7994 if (rc == 0) {
7995 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7996 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7997 memcpy(ret_buf, &val, 4);
7998 }
7999
8000 /* disable access to nvram interface */
8001 bnx2x_disable_nvram_access(bp);
8002 bnx2x_release_nvram_lock(bp);
8003
8004 return rc;
8005}
8006
8007static int bnx2x_get_eeprom(struct net_device *dev,
8008 struct ethtool_eeprom *eeprom, u8 *eebuf)
8009{
8010 struct bnx2x *bp = netdev_priv(dev);
8011 int rc;
8012
34f80b04 8013 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8014 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8015 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8016 eeprom->len, eeprom->len);
8017
8018 /* parameters already validated in ethtool_get_eeprom */
8019
8020 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8021
8022 return rc;
8023}
8024
8025static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8026 u32 cmd_flags)
8027{
f1410647 8028 int count, i, rc;
a2fbb9ea
ET
8029
8030 /* build the command word */
8031 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8032
8033 /* need to clear DONE bit separately */
8034 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8035
8036 /* write the data */
8037 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8038
8039 /* address of the NVRAM to write to */
8040 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8041 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8042
8043 /* issue the write command */
8044 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8045
8046 /* adjust timeout for emulation/FPGA */
8047 count = NVRAM_TIMEOUT_COUNT;
8048 if (CHIP_REV_IS_SLOW(bp))
8049 count *= 100;
8050
8051 /* wait for completion */
8052 rc = -EBUSY;
8053 for (i = 0; i < count; i++) {
8054 udelay(5);
8055 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8056 if (val & MCPR_NVM_COMMAND_DONE) {
8057 rc = 0;
8058 break;
8059 }
8060 }
8061
8062 return rc;
8063}
8064
f1410647 8065#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8066
8067static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8068 int buf_size)
8069{
8070 int rc;
8071 u32 cmd_flags;
8072 u32 align_offset;
8073 u32 val;
8074
34f80b04
EG
8075 if (offset + buf_size > bp->common.flash_size) {
8076 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8077 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8078 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8079 return -EINVAL;
8080 }
8081
8082 /* request access to nvram interface */
8083 rc = bnx2x_acquire_nvram_lock(bp);
8084 if (rc)
8085 return rc;
8086
8087 /* enable access to nvram interface */
8088 bnx2x_enable_nvram_access(bp);
8089
8090 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8091 align_offset = (offset & ~0x03);
8092 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8093
8094 if (rc == 0) {
8095 val &= ~(0xff << BYTE_OFFSET(offset));
8096 val |= (*data_buf << BYTE_OFFSET(offset));
8097
8098 /* nvram data is returned as an array of bytes
8099 * convert it back to cpu order */
8100 val = be32_to_cpu(val);
8101
a2fbb9ea
ET
8102 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8103 cmd_flags);
8104 }
8105
8106 /* disable access to nvram interface */
8107 bnx2x_disable_nvram_access(bp);
8108 bnx2x_release_nvram_lock(bp);
8109
8110 return rc;
8111}
8112
8113static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8114 int buf_size)
8115{
8116 int rc;
8117 u32 cmd_flags;
8118 u32 val;
8119 u32 written_so_far;
8120
34f80b04 8121 if (buf_size == 1) /* ethtool */
a2fbb9ea 8122 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8123
8124 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8125 DP(BNX2X_MSG_NVM,
c14423fe 8126 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8127 offset, buf_size);
8128 return -EINVAL;
8129 }
8130
34f80b04
EG
8131 if (offset + buf_size > bp->common.flash_size) {
8132 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8133 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8134 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8135 return -EINVAL;
8136 }
8137
8138 /* request access to nvram interface */
8139 rc = bnx2x_acquire_nvram_lock(bp);
8140 if (rc)
8141 return rc;
8142
8143 /* enable access to nvram interface */
8144 bnx2x_enable_nvram_access(bp);
8145
8146 written_so_far = 0;
8147 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8148 while ((written_so_far < buf_size) && (rc == 0)) {
8149 if (written_so_far == (buf_size - sizeof(u32)))
8150 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8151 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8152 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8153 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8154 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8155
8156 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8157
8158 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8159
8160 /* advance to the next dword */
8161 offset += sizeof(u32);
8162 data_buf += sizeof(u32);
8163 written_so_far += sizeof(u32);
8164 cmd_flags = 0;
8165 }
8166
8167 /* disable access to nvram interface */
8168 bnx2x_disable_nvram_access(bp);
8169 bnx2x_release_nvram_lock(bp);
8170
8171 return rc;
8172}
8173
8174static int bnx2x_set_eeprom(struct net_device *dev,
8175 struct ethtool_eeprom *eeprom, u8 *eebuf)
8176{
8177 struct bnx2x *bp = netdev_priv(dev);
8178 int rc;
8179
34f80b04 8180 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8181 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8182 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8183 eeprom->len, eeprom->len);
8184
8185 /* parameters already validated in ethtool_set_eeprom */
8186
c18487ee 8187 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8188 if (eeprom->magic == 0x00504859)
8189 if (bp->port.pmf) {
8190
4a37fb66 8191 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8192 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8193 bp->link_params.ext_phy_config,
8194 (bp->state != BNX2X_STATE_CLOSED),
8195 eebuf, eeprom->len);
bb2a0f7a
YG
8196 if ((bp->state == BNX2X_STATE_OPEN) ||
8197 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8198 rc |= bnx2x_link_reset(&bp->link_params,
8199 &bp->link_vars);
8200 rc |= bnx2x_phy_init(&bp->link_params,
8201 &bp->link_vars);
bb2a0f7a 8202 }
4a37fb66 8203 bnx2x_release_phy_lock(bp);
34f80b04
EG
8204
8205 } else /* Only the PMF can access the PHY */
8206 return -EINVAL;
8207 else
c18487ee 8208 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8209
8210 return rc;
8211}
8212
8213static int bnx2x_get_coalesce(struct net_device *dev,
8214 struct ethtool_coalesce *coal)
8215{
8216 struct bnx2x *bp = netdev_priv(dev);
8217
8218 memset(coal, 0, sizeof(struct ethtool_coalesce));
8219
8220 coal->rx_coalesce_usecs = bp->rx_ticks;
8221 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8222
8223 return 0;
8224}
8225
8226static int bnx2x_set_coalesce(struct net_device *dev,
8227 struct ethtool_coalesce *coal)
8228{
8229 struct bnx2x *bp = netdev_priv(dev);
8230
8231 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8232 if (bp->rx_ticks > 3000)
8233 bp->rx_ticks = 3000;
8234
8235 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8236 if (bp->tx_ticks > 0x3000)
8237 bp->tx_ticks = 0x3000;
8238
34f80b04 8239 if (netif_running(dev))
a2fbb9ea
ET
8240 bnx2x_update_coalesce(bp);
8241
8242 return 0;
8243}
8244
7a9b2557
VZ
8245static int bnx2x_set_flags(struct net_device *dev, u32 data)
8246{
8247 struct bnx2x *bp = netdev_priv(dev);
8248 int changed = 0;
8249 int rc = 0;
8250
8251 if (data & ETH_FLAG_LRO) {
8252 if (!(dev->features & NETIF_F_LRO)) {
8253 dev->features |= NETIF_F_LRO;
8254 bp->flags |= TPA_ENABLE_FLAG;
8255 changed = 1;
8256 }
8257
8258 } else if (dev->features & NETIF_F_LRO) {
8259 dev->features &= ~NETIF_F_LRO;
8260 bp->flags &= ~TPA_ENABLE_FLAG;
8261 changed = 1;
8262 }
8263
8264 if (changed && netif_running(dev)) {
8265 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8266 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8267 }
8268
8269 return rc;
8270}
8271
a2fbb9ea
ET
8272static void bnx2x_get_ringparam(struct net_device *dev,
8273 struct ethtool_ringparam *ering)
8274{
8275 struct bnx2x *bp = netdev_priv(dev);
8276
8277 ering->rx_max_pending = MAX_RX_AVAIL;
8278 ering->rx_mini_max_pending = 0;
8279 ering->rx_jumbo_max_pending = 0;
8280
8281 ering->rx_pending = bp->rx_ring_size;
8282 ering->rx_mini_pending = 0;
8283 ering->rx_jumbo_pending = 0;
8284
8285 ering->tx_max_pending = MAX_TX_AVAIL;
8286 ering->tx_pending = bp->tx_ring_size;
8287}
8288
8289static int bnx2x_set_ringparam(struct net_device *dev,
8290 struct ethtool_ringparam *ering)
8291{
8292 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8293 int rc = 0;
a2fbb9ea
ET
8294
8295 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8296 (ering->tx_pending > MAX_TX_AVAIL) ||
8297 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8298 return -EINVAL;
8299
8300 bp->rx_ring_size = ering->rx_pending;
8301 bp->tx_ring_size = ering->tx_pending;
8302
34f80b04
EG
8303 if (netif_running(dev)) {
8304 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8305 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8306 }
8307
34f80b04 8308 return rc;
a2fbb9ea
ET
8309}
8310
8311static void bnx2x_get_pauseparam(struct net_device *dev,
8312 struct ethtool_pauseparam *epause)
8313{
8314 struct bnx2x *bp = netdev_priv(dev);
8315
c18487ee
YR
8316 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8317 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8318
8319 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8320 FLOW_CTRL_RX);
8321 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8322 FLOW_CTRL_TX);
a2fbb9ea
ET
8323
8324 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8325 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8326 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8327}
8328
8329static int bnx2x_set_pauseparam(struct net_device *dev,
8330 struct ethtool_pauseparam *epause)
8331{
8332 struct bnx2x *bp = netdev_priv(dev);
8333
34f80b04
EG
8334 if (IS_E1HMF(bp))
8335 return 0;
8336
a2fbb9ea
ET
8337 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8338 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8339 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8340
c18487ee 8341 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8342
f1410647 8343 if (epause->rx_pause)
c18487ee
YR
8344 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8345
f1410647 8346 if (epause->tx_pause)
c18487ee
YR
8347 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8348
8349 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8350 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8351
c18487ee 8352 if (epause->autoneg) {
34f80b04 8353 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
8354 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8355 return -EINVAL;
8356 }
a2fbb9ea 8357
c18487ee
YR
8358 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8359 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8360 }
a2fbb9ea 8361
c18487ee
YR
8362 DP(NETIF_MSG_LINK,
8363 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8364
8365 if (netif_running(dev)) {
bb2a0f7a 8366 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8367 bnx2x_link_set(bp);
8368 }
a2fbb9ea
ET
8369
8370 return 0;
8371}
8372
8373static u32 bnx2x_get_rx_csum(struct net_device *dev)
8374{
8375 struct bnx2x *bp = netdev_priv(dev);
8376
8377 return bp->rx_csum;
8378}
8379
8380static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8381{
8382 struct bnx2x *bp = netdev_priv(dev);
8383
8384 bp->rx_csum = data;
8385 return 0;
8386}
8387
8388static int bnx2x_set_tso(struct net_device *dev, u32 data)
8389{
755735eb 8390 if (data) {
a2fbb9ea 8391 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8392 dev->features |= NETIF_F_TSO6;
8393 } else {
a2fbb9ea 8394 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8395 dev->features &= ~NETIF_F_TSO6;
8396 }
8397
a2fbb9ea
ET
8398 return 0;
8399}
8400
f3c87cdd 8401static const struct {
a2fbb9ea
ET
8402 char string[ETH_GSTRING_LEN];
8403} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8404 { "register_test (offline)" },
8405 { "memory_test (offline)" },
8406 { "loopback_test (offline)" },
8407 { "nvram_test (online)" },
8408 { "interrupt_test (online)" },
8409 { "link_test (online)" },
8410 { "idle check (online)" },
8411 { "MC errors (online)" }
a2fbb9ea
ET
8412};
8413
8414static int bnx2x_self_test_count(struct net_device *dev)
8415{
8416 return BNX2X_NUM_TESTS;
8417}
8418
f3c87cdd
YG
8419static int bnx2x_test_registers(struct bnx2x *bp)
8420{
8421 int idx, i, rc = -ENODEV;
8422 u32 wr_val = 0;
9dabc424 8423 int port = BP_PORT(bp);
f3c87cdd
YG
8424 static const struct {
8425 u32 offset0;
8426 u32 offset1;
8427 u32 mask;
8428 } reg_tbl[] = {
8429/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8430 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8431 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8432 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8433 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8434 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8435 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8436 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8437 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8438 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8439/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8440 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8441 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8442 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8443 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8444 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8445 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8446 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8447 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8448 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8449/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8450 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8451 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8452 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8453 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8454 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8455 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8456 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8457 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8458 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8459/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8460 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8461 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8462 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8463 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8464 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8465 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8466 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8467
8468 { 0xffffffff, 0, 0x00000000 }
8469 };
8470
8471 if (!netif_running(bp->dev))
8472 return rc;
8473
8474 /* Repeat the test twice:
8475 First by writing 0x00000000, second by writing 0xffffffff */
8476 for (idx = 0; idx < 2; idx++) {
8477
8478 switch (idx) {
8479 case 0:
8480 wr_val = 0;
8481 break;
8482 case 1:
8483 wr_val = 0xffffffff;
8484 break;
8485 }
8486
8487 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8488 u32 offset, mask, save_val, val;
f3c87cdd
YG
8489
8490 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8491 mask = reg_tbl[i].mask;
8492
8493 save_val = REG_RD(bp, offset);
8494
8495 REG_WR(bp, offset, wr_val);
8496 val = REG_RD(bp, offset);
8497
8498 /* Restore the original register's value */
8499 REG_WR(bp, offset, save_val);
8500
8501 /* verify that value is as expected value */
8502 if ((val & mask) != (wr_val & mask))
8503 goto test_reg_exit;
8504 }
8505 }
8506
8507 rc = 0;
8508
8509test_reg_exit:
8510 return rc;
8511}
8512
8513static int bnx2x_test_memory(struct bnx2x *bp)
8514{
8515 int i, j, rc = -ENODEV;
8516 u32 val;
8517 static const struct {
8518 u32 offset;
8519 int size;
8520 } mem_tbl[] = {
8521 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8522 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8523 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8524 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8525 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8526 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8527 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8528
8529 { 0xffffffff, 0 }
8530 };
8531 static const struct {
8532 char *name;
8533 u32 offset;
9dabc424
YG
8534 u32 e1_mask;
8535 u32 e1h_mask;
f3c87cdd 8536 } prty_tbl[] = {
9dabc424
YG
8537 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8538 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8539 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8540 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8541 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8542 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8543
8544 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8545 };
8546
8547 if (!netif_running(bp->dev))
8548 return rc;
8549
8550 /* Go through all the memories */
8551 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8552 for (j = 0; j < mem_tbl[i].size; j++)
8553 REG_RD(bp, mem_tbl[i].offset + j*4);
8554
8555 /* Check the parity status */
8556 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8557 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8558 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8559 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8560 DP(NETIF_MSG_HW,
8561 "%s is 0x%x\n", prty_tbl[i].name, val);
8562 goto test_mem_exit;
8563 }
8564 }
8565
8566 rc = 0;
8567
8568test_mem_exit:
8569 return rc;
8570}
8571
8572static void bnx2x_netif_start(struct bnx2x *bp)
8573{
8574 int i;
8575
8576 if (atomic_dec_and_test(&bp->intr_sem)) {
8577 if (netif_running(bp->dev)) {
8578 bnx2x_int_enable(bp);
8579 for_each_queue(bp, i)
8580 napi_enable(&bnx2x_fp(bp, i, napi));
8581 if (bp->state == BNX2X_STATE_OPEN)
8582 netif_wake_queue(bp->dev);
8583 }
8584 }
8585}
8586
8587static void bnx2x_netif_stop(struct bnx2x *bp)
8588{
8589 int i;
8590
8591 if (netif_running(bp->dev)) {
8592 netif_tx_disable(bp->dev);
8593 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8594 for_each_queue(bp, i)
8595 napi_disable(&bnx2x_fp(bp, i, napi));
8596 }
8597 bnx2x_int_disable_sync(bp);
8598}
8599
8600static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8601{
8602 int cnt = 1000;
8603
8604 if (link_up)
8605 while (bnx2x_link_test(bp) && cnt--)
8606 msleep(10);
8607}
8608
8609static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8610{
8611 unsigned int pkt_size, num_pkts, i;
8612 struct sk_buff *skb;
8613 unsigned char *packet;
8614 struct bnx2x_fastpath *fp = &bp->fp[0];
8615 u16 tx_start_idx, tx_idx;
8616 u16 rx_start_idx, rx_idx;
8617 u16 pkt_prod;
8618 struct sw_tx_bd *tx_buf;
8619 struct eth_tx_bd *tx_bd;
8620 dma_addr_t mapping;
8621 union eth_rx_cqe *cqe;
8622 u8 cqe_fp_flags;
8623 struct sw_rx_bd *rx_buf;
8624 u16 len;
8625 int rc = -ENODEV;
8626
8627 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8628 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8629 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8630 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8631 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8632
8633 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8634 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8635 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8636 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8637 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8638 /* wait until link state is restored */
8639 bnx2x_wait_for_link(bp, link_up);
8640
8641 } else
8642 return -EINVAL;
8643
8644 pkt_size = 1514;
8645 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8646 if (!skb) {
8647 rc = -ENOMEM;
8648 goto test_loopback_exit;
8649 }
8650 packet = skb_put(skb, pkt_size);
8651 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8652 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8653 for (i = ETH_HLEN; i < pkt_size; i++)
8654 packet[i] = (unsigned char) (i & 0xff);
8655
8656 num_pkts = 0;
8657 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8658 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8659
8660 pkt_prod = fp->tx_pkt_prod++;
8661 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8662 tx_buf->first_bd = fp->tx_bd_prod;
8663 tx_buf->skb = skb;
8664
8665 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8666 mapping = pci_map_single(bp->pdev, skb->data,
8667 skb_headlen(skb), PCI_DMA_TODEVICE);
8668 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8669 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8670 tx_bd->nbd = cpu_to_le16(1);
8671 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8672 tx_bd->vlan = cpu_to_le16(pkt_prod);
8673 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8674 ETH_TX_BD_FLAGS_END_BD);
8675 tx_bd->general_data = ((UNICAST_ADDRESS <<
8676 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8677
8678 fp->hw_tx_prods->bds_prod =
8679 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8680 mb(); /* FW restriction: must not reorder writing nbd and packets */
8681 fp->hw_tx_prods->packets_prod =
8682 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8683 DOORBELL(bp, FP_IDX(fp), 0);
8684
8685 mmiowb();
8686
8687 num_pkts++;
8688 fp->tx_bd_prod++;
8689 bp->dev->trans_start = jiffies;
8690
8691 udelay(100);
8692
8693 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8694 if (tx_idx != tx_start_idx + num_pkts)
8695 goto test_loopback_exit;
8696
8697 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8698 if (rx_idx != rx_start_idx + num_pkts)
8699 goto test_loopback_exit;
8700
8701 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8702 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8703 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8704 goto test_loopback_rx_exit;
8705
8706 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8707 if (len != pkt_size)
8708 goto test_loopback_rx_exit;
8709
8710 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8711 skb = rx_buf->skb;
8712 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8713 for (i = ETH_HLEN; i < pkt_size; i++)
8714 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8715 goto test_loopback_rx_exit;
8716
8717 rc = 0;
8718
8719test_loopback_rx_exit:
8720 bp->dev->last_rx = jiffies;
8721
8722 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8723 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8724 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8725 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8726
8727 /* Update producers */
8728 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8729 fp->rx_sge_prod);
8730 mmiowb(); /* keep prod updates ordered */
8731
8732test_loopback_exit:
8733 bp->link_params.loopback_mode = LOOPBACK_NONE;
8734
8735 return rc;
8736}
8737
8738static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8739{
8740 int rc = 0;
8741
8742 if (!netif_running(bp->dev))
8743 return BNX2X_LOOPBACK_FAILED;
8744
8745 bnx2x_netif_stop(bp);
8746
8747 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8748 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8749 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8750 }
8751
8752 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8753 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8754 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8755 }
8756
8757 bnx2x_netif_start(bp);
8758
8759 return rc;
8760}
8761
8762#define CRC32_RESIDUAL 0xdebb20e3
8763
8764static int bnx2x_test_nvram(struct bnx2x *bp)
8765{
8766 static const struct {
8767 int offset;
8768 int size;
8769 } nvram_tbl[] = {
8770 { 0, 0x14 }, /* bootstrap */
8771 { 0x14, 0xec }, /* dir */
8772 { 0x100, 0x350 }, /* manuf_info */
8773 { 0x450, 0xf0 }, /* feature_info */
8774 { 0x640, 0x64 }, /* upgrade_key_info */
8775 { 0x6a4, 0x64 },
8776 { 0x708, 0x70 }, /* manuf_key_info */
8777 { 0x778, 0x70 },
8778 { 0, 0 }
8779 };
8780 u32 buf[0x350 / 4];
8781 u8 *data = (u8 *)buf;
8782 int i, rc;
8783 u32 magic, csum;
8784
8785 rc = bnx2x_nvram_read(bp, 0, data, 4);
8786 if (rc) {
8787 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8788 goto test_nvram_exit;
8789 }
8790
8791 magic = be32_to_cpu(buf[0]);
8792 if (magic != 0x669955aa) {
8793 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8794 rc = -ENODEV;
8795 goto test_nvram_exit;
8796 }
8797
8798 for (i = 0; nvram_tbl[i].size; i++) {
8799
8800 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8801 nvram_tbl[i].size);
8802 if (rc) {
8803 DP(NETIF_MSG_PROBE,
8804 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8805 goto test_nvram_exit;
8806 }
8807
8808 csum = ether_crc_le(nvram_tbl[i].size, data);
8809 if (csum != CRC32_RESIDUAL) {
8810 DP(NETIF_MSG_PROBE,
8811 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8812 rc = -ENODEV;
8813 goto test_nvram_exit;
8814 }
8815 }
8816
8817test_nvram_exit:
8818 return rc;
8819}
8820
8821static int bnx2x_test_intr(struct bnx2x *bp)
8822{
8823 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8824 int i, rc;
8825
8826 if (!netif_running(bp->dev))
8827 return -ENODEV;
8828
8829 config->hdr.length_6b = 0;
8830 config->hdr.offset = 0;
8831 config->hdr.client_id = BP_CL_ID(bp);
8832 config->hdr.reserved1 = 0;
8833
8834 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8835 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8836 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8837 if (rc == 0) {
8838 bp->set_mac_pending++;
8839 for (i = 0; i < 10; i++) {
8840 if (!bp->set_mac_pending)
8841 break;
8842 msleep_interruptible(10);
8843 }
8844 if (i == 10)
8845 rc = -ENODEV;
8846 }
8847
8848 return rc;
8849}
8850
a2fbb9ea
ET
8851static void bnx2x_self_test(struct net_device *dev,
8852 struct ethtool_test *etest, u64 *buf)
8853{
8854 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8855
8856 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8857
f3c87cdd 8858 if (!netif_running(dev))
a2fbb9ea 8859 return;
a2fbb9ea 8860
f3c87cdd
YG
8861 /* offline tests are not suppoerted in MF mode */
8862 if (IS_E1HMF(bp))
8863 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8864
8865 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8866 u8 link_up;
8867
8868 link_up = bp->link_vars.link_up;
8869 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8870 bnx2x_nic_load(bp, LOAD_DIAG);
8871 /* wait until link state is restored */
8872 bnx2x_wait_for_link(bp, link_up);
8873
8874 if (bnx2x_test_registers(bp) != 0) {
8875 buf[0] = 1;
8876 etest->flags |= ETH_TEST_FL_FAILED;
8877 }
8878 if (bnx2x_test_memory(bp) != 0) {
8879 buf[1] = 1;
8880 etest->flags |= ETH_TEST_FL_FAILED;
8881 }
8882 buf[2] = bnx2x_test_loopback(bp, link_up);
8883 if (buf[2] != 0)
8884 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8885
f3c87cdd
YG
8886 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8887 bnx2x_nic_load(bp, LOAD_NORMAL);
8888 /* wait until link state is restored */
8889 bnx2x_wait_for_link(bp, link_up);
8890 }
8891 if (bnx2x_test_nvram(bp) != 0) {
8892 buf[3] = 1;
a2fbb9ea
ET
8893 etest->flags |= ETH_TEST_FL_FAILED;
8894 }
f3c87cdd
YG
8895 if (bnx2x_test_intr(bp) != 0) {
8896 buf[4] = 1;
8897 etest->flags |= ETH_TEST_FL_FAILED;
8898 }
8899 if (bp->port.pmf)
8900 if (bnx2x_link_test(bp) != 0) {
8901 buf[5] = 1;
8902 etest->flags |= ETH_TEST_FL_FAILED;
8903 }
8904 buf[7] = bnx2x_mc_assert(bp);
8905 if (buf[7] != 0)
8906 etest->flags |= ETH_TEST_FL_FAILED;
8907
8908#ifdef BNX2X_EXTRA_DEBUG
8909 bnx2x_panic_dump(bp);
8910#endif
a2fbb9ea
ET
8911}
8912
bb2a0f7a
YG
8913static const struct {
8914 long offset;
8915 int size;
8916 u32 flags;
66e855f3
YG
8917#define STATS_FLAGS_PORT 1
8918#define STATS_FLAGS_FUNC 2
8919 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8920} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8921/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8922 8, STATS_FLAGS_FUNC, "rx_bytes" },
8923 { STATS_OFFSET32(error_bytes_received_hi),
8924 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8925 { STATS_OFFSET32(total_bytes_transmitted_hi),
8926 8, STATS_FLAGS_FUNC, "tx_bytes" },
8927 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8928 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8929 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8930 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8931 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8932 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8933 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8934 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8935 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8936 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8937 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8938 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8939/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8940 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8941 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8942 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8943 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8944 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8945 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8946 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8947 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 8948 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 8949 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 8950 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 8951 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 8952 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 8953 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 8954 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 8955 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 8956 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 8957 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
8958 8, STATS_FLAGS_PORT, "rx_fragments" },
8959/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8960 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 8961 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 8962 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 8963 { STATS_OFFSET32(jabber_packets_received),
66e855f3 8964 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 8965 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 8966 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 8967 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 8968 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 8969 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 8970 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 8971 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 8972 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 8973 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 8974 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 8975 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 8976 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 8977 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 8978 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 8979/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 8980 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 8981 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
8982 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8983 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8984 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8985 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8986 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 8987 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
8988 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8989 { STATS_OFFSET32(mac_filter_discard),
8990 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8991 { STATS_OFFSET32(no_buff_discard),
8992 4, STATS_FLAGS_FUNC, "rx_discards" },
8993 { STATS_OFFSET32(xxoverflow_discard),
8994 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8995 { STATS_OFFSET32(brb_drop_hi),
8996 8, STATS_FLAGS_PORT, "brb_discard" },
8997 { STATS_OFFSET32(brb_truncate_hi),
8998 8, STATS_FLAGS_PORT, "brb_truncate" },
8999/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9000 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9001 { STATS_OFFSET32(rx_skb_alloc_failed),
9002 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9003/* 42 */{ STATS_OFFSET32(hw_csum_err),
9004 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9005};
9006
66e855f3
YG
9007#define IS_NOT_E1HMF_STAT(bp, i) \
9008 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9009
a2fbb9ea
ET
9010static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9011{
bb2a0f7a
YG
9012 struct bnx2x *bp = netdev_priv(dev);
9013 int i, j;
9014
a2fbb9ea
ET
9015 switch (stringset) {
9016 case ETH_SS_STATS:
bb2a0f7a 9017 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9018 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9019 continue;
9020 strcpy(buf + j*ETH_GSTRING_LEN,
9021 bnx2x_stats_arr[i].string);
9022 j++;
9023 }
a2fbb9ea
ET
9024 break;
9025
9026 case ETH_SS_TEST:
9027 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9028 break;
9029 }
9030}
9031
9032static int bnx2x_get_stats_count(struct net_device *dev)
9033{
bb2a0f7a
YG
9034 struct bnx2x *bp = netdev_priv(dev);
9035 int i, num_stats = 0;
9036
9037 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9038 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9039 continue;
9040 num_stats++;
9041 }
9042 return num_stats;
a2fbb9ea
ET
9043}
9044
9045static void bnx2x_get_ethtool_stats(struct net_device *dev,
9046 struct ethtool_stats *stats, u64 *buf)
9047{
9048 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9049 u32 *hw_stats = (u32 *)&bp->eth_stats;
9050 int i, j;
a2fbb9ea 9051
bb2a0f7a 9052 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9053 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9054 continue;
bb2a0f7a
YG
9055
9056 if (bnx2x_stats_arr[i].size == 0) {
9057 /* skip this counter */
9058 buf[j] = 0;
9059 j++;
a2fbb9ea
ET
9060 continue;
9061 }
bb2a0f7a 9062 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9063 /* 4-byte counter */
bb2a0f7a
YG
9064 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9065 j++;
a2fbb9ea
ET
9066 continue;
9067 }
9068 /* 8-byte counter */
bb2a0f7a
YG
9069 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9070 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9071 j++;
a2fbb9ea
ET
9072 }
9073}
9074
9075static int bnx2x_phys_id(struct net_device *dev, u32 data)
9076{
9077 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9078 int port = BP_PORT(bp);
a2fbb9ea
ET
9079 int i;
9080
34f80b04
EG
9081 if (!netif_running(dev))
9082 return 0;
9083
9084 if (!bp->port.pmf)
9085 return 0;
9086
a2fbb9ea
ET
9087 if (data == 0)
9088 data = 2;
9089
9090 for (i = 0; i < (data * 2); i++) {
c18487ee 9091 if ((i % 2) == 0)
34f80b04 9092 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9093 bp->link_params.hw_led_mode,
9094 bp->link_params.chip_id);
9095 else
34f80b04 9096 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9097 bp->link_params.hw_led_mode,
9098 bp->link_params.chip_id);
9099
a2fbb9ea
ET
9100 msleep_interruptible(500);
9101 if (signal_pending(current))
9102 break;
9103 }
9104
c18487ee 9105 if (bp->link_vars.link_up)
34f80b04 9106 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9107 bp->link_vars.line_speed,
9108 bp->link_params.hw_led_mode,
9109 bp->link_params.chip_id);
a2fbb9ea
ET
9110
9111 return 0;
9112}
9113
9114static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9115 .get_settings = bnx2x_get_settings,
9116 .set_settings = bnx2x_set_settings,
9117 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9118 .get_wol = bnx2x_get_wol,
9119 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9120 .get_msglevel = bnx2x_get_msglevel,
9121 .set_msglevel = bnx2x_set_msglevel,
9122 .nway_reset = bnx2x_nway_reset,
9123 .get_link = ethtool_op_get_link,
9124 .get_eeprom_len = bnx2x_get_eeprom_len,
9125 .get_eeprom = bnx2x_get_eeprom,
9126 .set_eeprom = bnx2x_set_eeprom,
9127 .get_coalesce = bnx2x_get_coalesce,
9128 .set_coalesce = bnx2x_set_coalesce,
9129 .get_ringparam = bnx2x_get_ringparam,
9130 .set_ringparam = bnx2x_set_ringparam,
9131 .get_pauseparam = bnx2x_get_pauseparam,
9132 .set_pauseparam = bnx2x_set_pauseparam,
9133 .get_rx_csum = bnx2x_get_rx_csum,
9134 .set_rx_csum = bnx2x_set_rx_csum,
9135 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9136 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9137 .set_flags = bnx2x_set_flags,
9138 .get_flags = ethtool_op_get_flags,
9139 .get_sg = ethtool_op_get_sg,
9140 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9141 .get_tso = ethtool_op_get_tso,
9142 .set_tso = bnx2x_set_tso,
9143 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9144 .self_test = bnx2x_self_test,
9145 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9146 .phys_id = bnx2x_phys_id,
9147 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9148 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9149};
9150
9151/* end of ethtool_ops */
9152
9153/****************************************************************************
9154* General service functions
9155****************************************************************************/
9156
9157static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9158{
9159 u16 pmcsr;
9160
9161 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9162
9163 switch (state) {
9164 case PCI_D0:
34f80b04 9165 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9166 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9167 PCI_PM_CTRL_PME_STATUS));
9168
9169 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9170 /* delay required during transition out of D3hot */
9171 msleep(20);
34f80b04 9172 break;
a2fbb9ea 9173
34f80b04
EG
9174 case PCI_D3hot:
9175 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9176 pmcsr |= 3;
a2fbb9ea 9177
34f80b04
EG
9178 if (bp->wol)
9179 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9180
34f80b04
EG
9181 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9182 pmcsr);
a2fbb9ea 9183
34f80b04
EG
9184 /* No more memory access after this point until
9185 * device is brought back to D0.
9186 */
9187 break;
9188
9189 default:
9190 return -EINVAL;
9191 }
9192 return 0;
a2fbb9ea
ET
9193}
9194
34f80b04
EG
9195/*
9196 * net_device service functions
9197 */
9198
a2fbb9ea
ET
9199static int bnx2x_poll(struct napi_struct *napi, int budget)
9200{
9201 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9202 napi);
9203 struct bnx2x *bp = fp->bp;
9204 int work_done = 0;
9205
9206#ifdef BNX2X_STOP_ON_ERROR
9207 if (unlikely(bp->panic))
34f80b04 9208 goto poll_panic;
a2fbb9ea
ET
9209#endif
9210
9211 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9212 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9213 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9214
9215 bnx2x_update_fpsb_idx(fp);
9216
da5a662a 9217 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9218 bnx2x_tx_int(fp, budget);
9219
da5a662a 9220 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9221 work_done = bnx2x_rx_int(fp, budget);
9222
da5a662a 9223 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9224
9225 /* must not complete if we consumed full budget */
da5a662a 9226 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9227
9228#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9229poll_panic:
a2fbb9ea
ET
9230#endif
9231 netif_rx_complete(bp->dev, napi);
9232
34f80b04 9233 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9234 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9235 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9236 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9237 }
a2fbb9ea
ET
9238 return work_done;
9239}
9240
755735eb
EG
9241
9242/* we split the first BD into headers and data BDs
9243 * to ease the pain of our fellow micocode engineers
9244 * we use one mapping for both BDs
9245 * So far this has only been observed to happen
9246 * in Other Operating Systems(TM)
9247 */
9248static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9249 struct bnx2x_fastpath *fp,
9250 struct eth_tx_bd **tx_bd, u16 hlen,
9251 u16 bd_prod, int nbd)
9252{
9253 struct eth_tx_bd *h_tx_bd = *tx_bd;
9254 struct eth_tx_bd *d_tx_bd;
9255 dma_addr_t mapping;
9256 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9257
9258 /* first fix first BD */
9259 h_tx_bd->nbd = cpu_to_le16(nbd);
9260 h_tx_bd->nbytes = cpu_to_le16(hlen);
9261
9262 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9263 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9264 h_tx_bd->addr_lo, h_tx_bd->nbd);
9265
9266 /* now get a new data BD
9267 * (after the pbd) and fill it */
9268 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9269 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9270
9271 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9272 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9273
9274 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9275 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9276 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9277 d_tx_bd->vlan = 0;
9278 /* this marks the BD as one that has no individual mapping
9279 * the FW ignores this flag in a BD not marked start
9280 */
9281 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9282 DP(NETIF_MSG_TX_QUEUED,
9283 "TSO split data size is %d (%x:%x)\n",
9284 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9285
9286 /* update tx_bd for marking the last BD flag */
9287 *tx_bd = d_tx_bd;
9288
9289 return bd_prod;
9290}
9291
9292static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9293{
9294 if (fix > 0)
9295 csum = (u16) ~csum_fold(csum_sub(csum,
9296 csum_partial(t_header - fix, fix, 0)));
9297
9298 else if (fix < 0)
9299 csum = (u16) ~csum_fold(csum_add(csum,
9300 csum_partial(t_header, -fix, 0)));
9301
9302 return swab16(csum);
9303}
9304
9305static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9306{
9307 u32 rc;
9308
9309 if (skb->ip_summed != CHECKSUM_PARTIAL)
9310 rc = XMIT_PLAIN;
9311
9312 else {
9313 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9314 rc = XMIT_CSUM_V6;
9315 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9316 rc |= XMIT_CSUM_TCP;
9317
9318 } else {
9319 rc = XMIT_CSUM_V4;
9320 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9321 rc |= XMIT_CSUM_TCP;
9322 }
9323 }
9324
9325 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9326 rc |= XMIT_GSO_V4;
9327
9328 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9329 rc |= XMIT_GSO_V6;
9330
9331 return rc;
9332}
9333
9334/* check if packet requires linearization (packet is too fragmented) */
9335static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9336 u32 xmit_type)
9337{
9338 int to_copy = 0;
9339 int hlen = 0;
9340 int first_bd_sz = 0;
9341
9342 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9343 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9344
9345 if (xmit_type & XMIT_GSO) {
9346 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9347 /* Check if LSO packet needs to be copied:
9348 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9349 int wnd_size = MAX_FETCH_BD - 3;
9350 /* Number of widnows to check */
9351 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9352 int wnd_idx = 0;
9353 int frag_idx = 0;
9354 u32 wnd_sum = 0;
9355
9356 /* Headers length */
9357 hlen = (int)(skb_transport_header(skb) - skb->data) +
9358 tcp_hdrlen(skb);
9359
9360 /* Amount of data (w/o headers) on linear part of SKB*/
9361 first_bd_sz = skb_headlen(skb) - hlen;
9362
9363 wnd_sum = first_bd_sz;
9364
9365 /* Calculate the first sum - it's special */
9366 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9367 wnd_sum +=
9368 skb_shinfo(skb)->frags[frag_idx].size;
9369
9370 /* If there was data on linear skb data - check it */
9371 if (first_bd_sz > 0) {
9372 if (unlikely(wnd_sum < lso_mss)) {
9373 to_copy = 1;
9374 goto exit_lbl;
9375 }
9376
9377 wnd_sum -= first_bd_sz;
9378 }
9379
9380 /* Others are easier: run through the frag list and
9381 check all windows */
9382 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9383 wnd_sum +=
9384 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9385
9386 if (unlikely(wnd_sum < lso_mss)) {
9387 to_copy = 1;
9388 break;
9389 }
9390 wnd_sum -=
9391 skb_shinfo(skb)->frags[wnd_idx].size;
9392 }
9393
9394 } else {
9395 /* in non-LSO too fragmented packet should always
9396 be linearized */
9397 to_copy = 1;
9398 }
9399 }
9400
9401exit_lbl:
9402 if (unlikely(to_copy))
9403 DP(NETIF_MSG_TX_QUEUED,
9404 "Linearization IS REQUIRED for %s packet. "
9405 "num_frags %d hlen %d first_bd_sz %d\n",
9406 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9407 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9408
9409 return to_copy;
9410}
9411
9412/* called with netif_tx_lock
a2fbb9ea 9413 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9414 * netif_wake_queue()
a2fbb9ea
ET
9415 */
9416static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9417{
9418 struct bnx2x *bp = netdev_priv(dev);
9419 struct bnx2x_fastpath *fp;
9420 struct sw_tx_bd *tx_buf;
9421 struct eth_tx_bd *tx_bd;
9422 struct eth_tx_parse_bd *pbd = NULL;
9423 u16 pkt_prod, bd_prod;
755735eb 9424 int nbd, fp_index;
a2fbb9ea 9425 dma_addr_t mapping;
755735eb
EG
9426 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9427 int vlan_off = (bp->e1hov ? 4 : 0);
9428 int i;
9429 u8 hlen = 0;
a2fbb9ea
ET
9430
9431#ifdef BNX2X_STOP_ON_ERROR
9432 if (unlikely(bp->panic))
9433 return NETDEV_TX_BUSY;
9434#endif
9435
755735eb 9436 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9437 fp = &bp->fp[fp_index];
755735eb 9438
a2fbb9ea
ET
9439 if (unlikely(bnx2x_tx_avail(bp->fp) <
9440 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9441 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9442 netif_stop_queue(dev);
9443 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9444 return NETDEV_TX_BUSY;
9445 }
9446
755735eb
EG
9447 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9448 " gso type %x xmit_type %x\n",
9449 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9450 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9451
9452 /* First, check if we need to linearaize the skb
9453 (due to FW restrictions) */
9454 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9455 /* Statistics of linearization */
9456 bp->lin_cnt++;
9457 if (skb_linearize(skb) != 0) {
9458 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9459 "silently dropping this SKB\n");
9460 dev_kfree_skb_any(skb);
da5a662a 9461 return NETDEV_TX_OK;
755735eb
EG
9462 }
9463 }
9464
a2fbb9ea 9465 /*
755735eb 9466 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9467 then for TSO or xsum we have a parsing info BD,
755735eb 9468 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9469 (don't forget to mark the last one as last,
9470 and to unmap only AFTER you write to the BD ...)
755735eb 9471 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9472 */
9473
9474 pkt_prod = fp->tx_pkt_prod++;
755735eb 9475 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9476
755735eb 9477 /* get a tx_buf and first BD */
a2fbb9ea
ET
9478 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9479 tx_bd = &fp->tx_desc_ring[bd_prod];
9480
9481 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9482 tx_bd->general_data = (UNICAST_ADDRESS <<
9483 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9484 tx_bd->general_data |= 1; /* header nbd */
9485
755735eb
EG
9486 /* remember the first BD of the packet */
9487 tx_buf->first_bd = fp->tx_bd_prod;
9488 tx_buf->skb = skb;
a2fbb9ea
ET
9489
9490 DP(NETIF_MSG_TX_QUEUED,
9491 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9492 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9493
755735eb
EG
9494 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9495 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9496 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9497 vlan_off += 4;
9498 } else
9499 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9500
755735eb 9501 if (xmit_type) {
a2fbb9ea 9502
755735eb 9503 /* turn on parsing and get a BD */
a2fbb9ea
ET
9504 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9505 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9506
9507 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9508 }
9509
9510 if (xmit_type & XMIT_CSUM) {
9511 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9512
9513 /* for now NS flag is not used in Linux */
755735eb 9514 pbd->global_data = (hlen |
96fc1784 9515 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9516 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9517
755735eb
EG
9518 pbd->ip_hlen = (skb_transport_header(skb) -
9519 skb_network_header(skb)) / 2;
9520
9521 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9522
755735eb
EG
9523 pbd->total_hlen = cpu_to_le16(hlen);
9524 hlen = hlen*2 - vlan_off;
a2fbb9ea 9525
755735eb
EG
9526 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9527
9528 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9529 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9530 ETH_TX_BD_FLAGS_IP_CSUM;
9531 else
9532 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9533
9534 if (xmit_type & XMIT_CSUM_TCP) {
9535 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9536
9537 } else {
9538 s8 fix = SKB_CS_OFF(skb); /* signed! */
9539
a2fbb9ea 9540 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9541 pbd->cs_offset = fix / 2;
a2fbb9ea 9542
755735eb
EG
9543 DP(NETIF_MSG_TX_QUEUED,
9544 "hlen %d offset %d fix %d csum before fix %x\n",
9545 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9546 SKB_CS(skb));
9547
9548 /* HW bug: fixup the CSUM */
9549 pbd->tcp_pseudo_csum =
9550 bnx2x_csum_fix(skb_transport_header(skb),
9551 SKB_CS(skb), fix);
9552
9553 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9554 pbd->tcp_pseudo_csum);
9555 }
a2fbb9ea
ET
9556 }
9557
9558 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9559 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9560
9561 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9562 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9563 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9564 tx_bd->nbd = cpu_to_le16(nbd);
9565 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9566
9567 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9568 " nbytes %d flags %x vlan %x\n",
9569 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9570 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9571 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9572
755735eb 9573 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9574
9575 DP(NETIF_MSG_TX_QUEUED,
9576 "TSO packet len %d hlen %d total len %d tso size %d\n",
9577 skb->len, hlen, skb_headlen(skb),
9578 skb_shinfo(skb)->gso_size);
9579
9580 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9581
755735eb
EG
9582 if (unlikely(skb_headlen(skb) > hlen))
9583 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9584 bd_prod, ++nbd);
a2fbb9ea
ET
9585
9586 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9587 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9588 pbd->tcp_flags = pbd_tcp_flags(skb);
9589
9590 if (xmit_type & XMIT_GSO_V4) {
9591 pbd->ip_id = swab16(ip_hdr(skb)->id);
9592 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9593 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9594 ip_hdr(skb)->daddr,
9595 0, IPPROTO_TCP, 0));
755735eb
EG
9596
9597 } else
9598 pbd->tcp_pseudo_csum =
9599 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9600 &ipv6_hdr(skb)->daddr,
9601 0, IPPROTO_TCP, 0));
9602
a2fbb9ea
ET
9603 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9604 }
9605
755735eb
EG
9606 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9607 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9608
755735eb
EG
9609 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9610 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9611
755735eb
EG
9612 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9613 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9614
755735eb
EG
9615 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9616 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9617 tx_bd->nbytes = cpu_to_le16(frag->size);
9618 tx_bd->vlan = cpu_to_le16(pkt_prod);
9619 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9620
755735eb
EG
9621 DP(NETIF_MSG_TX_QUEUED,
9622 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9623 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9624 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9625 }
9626
755735eb 9627 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9628 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9629
9630 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9631 tx_bd, tx_bd->bd_flags.as_bitfield);
9632
a2fbb9ea
ET
9633 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9634
755735eb 9635 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9636 * if the packet contains or ends with it
9637 */
9638 if (TX_BD_POFF(bd_prod) < nbd)
9639 nbd++;
9640
9641 if (pbd)
9642 DP(NETIF_MSG_TX_QUEUED,
9643 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9644 " tcp_flags %x xsum %x seq %u hlen %u\n",
9645 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9646 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9647 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9648
755735eb 9649 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9650
96fc1784
ET
9651 fp->hw_tx_prods->bds_prod =
9652 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9653 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9654 fp->hw_tx_prods->packets_prod =
9655 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9656 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9657
9658 mmiowb();
9659
755735eb 9660 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9661 dev->trans_start = jiffies;
9662
9663 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9664 netif_stop_queue(dev);
bb2a0f7a 9665 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9666 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9667 netif_wake_queue(dev);
9668 }
9669 fp->tx_pkt++;
9670
9671 return NETDEV_TX_OK;
9672}
9673
bb2a0f7a 9674/* called with rtnl_lock */
a2fbb9ea
ET
9675static int bnx2x_open(struct net_device *dev)
9676{
9677 struct bnx2x *bp = netdev_priv(dev);
9678
9679 bnx2x_set_power_state(bp, PCI_D0);
9680
bb2a0f7a 9681 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9682}
9683
bb2a0f7a 9684/* called with rtnl_lock */
a2fbb9ea
ET
9685static int bnx2x_close(struct net_device *dev)
9686{
a2fbb9ea
ET
9687 struct bnx2x *bp = netdev_priv(dev);
9688
9689 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9690 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9691 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9692 if (!CHIP_REV_IS_SLOW(bp))
9693 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9694
9695 return 0;
9696}
9697
34f80b04
EG
9698/* called with netif_tx_lock from set_multicast */
9699static void bnx2x_set_rx_mode(struct net_device *dev)
9700{
9701 struct bnx2x *bp = netdev_priv(dev);
9702 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9703 int port = BP_PORT(bp);
9704
9705 if (bp->state != BNX2X_STATE_OPEN) {
9706 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9707 return;
9708 }
9709
9710 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9711
9712 if (dev->flags & IFF_PROMISC)
9713 rx_mode = BNX2X_RX_MODE_PROMISC;
9714
9715 else if ((dev->flags & IFF_ALLMULTI) ||
9716 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9717 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9718
9719 else { /* some multicasts */
9720 if (CHIP_IS_E1(bp)) {
9721 int i, old, offset;
9722 struct dev_mc_list *mclist;
9723 struct mac_configuration_cmd *config =
9724 bnx2x_sp(bp, mcast_config);
9725
9726 for (i = 0, mclist = dev->mc_list;
9727 mclist && (i < dev->mc_count);
9728 i++, mclist = mclist->next) {
9729
9730 config->config_table[i].
9731 cam_entry.msb_mac_addr =
9732 swab16(*(u16 *)&mclist->dmi_addr[0]);
9733 config->config_table[i].
9734 cam_entry.middle_mac_addr =
9735 swab16(*(u16 *)&mclist->dmi_addr[2]);
9736 config->config_table[i].
9737 cam_entry.lsb_mac_addr =
9738 swab16(*(u16 *)&mclist->dmi_addr[4]);
9739 config->config_table[i].cam_entry.flags =
9740 cpu_to_le16(port);
9741 config->config_table[i].
9742 target_table_entry.flags = 0;
9743 config->config_table[i].
9744 target_table_entry.client_id = 0;
9745 config->config_table[i].
9746 target_table_entry.vlan_id = 0;
9747
9748 DP(NETIF_MSG_IFUP,
9749 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9750 config->config_table[i].
9751 cam_entry.msb_mac_addr,
9752 config->config_table[i].
9753 cam_entry.middle_mac_addr,
9754 config->config_table[i].
9755 cam_entry.lsb_mac_addr);
9756 }
9757 old = config->hdr.length_6b;
9758 if (old > i) {
9759 for (; i < old; i++) {
9760 if (CAM_IS_INVALID(config->
9761 config_table[i])) {
9762 i--; /* already invalidated */
9763 break;
9764 }
9765 /* invalidate */
9766 CAM_INVALIDATE(config->
9767 config_table[i]);
9768 }
9769 }
9770
9771 if (CHIP_REV_IS_SLOW(bp))
9772 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9773 else
9774 offset = BNX2X_MAX_MULTICAST*(1 + port);
9775
9776 config->hdr.length_6b = i;
9777 config->hdr.offset = offset;
9778 config->hdr.client_id = BP_CL_ID(bp);
9779 config->hdr.reserved1 = 0;
9780
9781 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9782 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9783 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9784 0);
9785 } else { /* E1H */
9786 /* Accept one or more multicasts */
9787 struct dev_mc_list *mclist;
9788 u32 mc_filter[MC_HASH_SIZE];
9789 u32 crc, bit, regidx;
9790 int i;
9791
9792 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9793
9794 for (i = 0, mclist = dev->mc_list;
9795 mclist && (i < dev->mc_count);
9796 i++, mclist = mclist->next) {
9797
9798 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9799 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9800 mclist->dmi_addr[0], mclist->dmi_addr[1],
9801 mclist->dmi_addr[2], mclist->dmi_addr[3],
9802 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9803
9804 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9805 bit = (crc >> 24) & 0xff;
9806 regidx = bit >> 5;
9807 bit &= 0x1f;
9808 mc_filter[regidx] |= (1 << bit);
9809 }
9810
9811 for (i = 0; i < MC_HASH_SIZE; i++)
9812 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9813 mc_filter[i]);
9814 }
9815 }
9816
9817 bp->rx_mode = rx_mode;
9818 bnx2x_set_storm_rx_mode(bp);
9819}
9820
9821/* called with rtnl_lock */
a2fbb9ea
ET
9822static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9823{
9824 struct sockaddr *addr = p;
9825 struct bnx2x *bp = netdev_priv(dev);
9826
34f80b04 9827 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9828 return -EINVAL;
9829
9830 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9831 if (netif_running(dev)) {
9832 if (CHIP_IS_E1(bp))
9833 bnx2x_set_mac_addr_e1(bp);
9834 else
9835 bnx2x_set_mac_addr_e1h(bp);
9836 }
a2fbb9ea
ET
9837
9838 return 0;
9839}
9840
c18487ee 9841/* called with rtnl_lock */
a2fbb9ea
ET
9842static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9843{
9844 struct mii_ioctl_data *data = if_mii(ifr);
9845 struct bnx2x *bp = netdev_priv(dev);
9846 int err;
9847
9848 switch (cmd) {
9849 case SIOCGMIIPHY:
34f80b04 9850 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9851
c14423fe 9852 /* fallthrough */
c18487ee 9853
a2fbb9ea 9854 case SIOCGMIIREG: {
c18487ee 9855 u16 mii_regval;
a2fbb9ea 9856
c18487ee
YR
9857 if (!netif_running(dev))
9858 return -EAGAIN;
a2fbb9ea 9859
34f80b04
EG
9860 mutex_lock(&bp->port.phy_mutex);
9861 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9862 DEFAULT_PHY_DEV_ADDR,
9863 (data->reg_num & 0x1f), &mii_regval);
9864 data->val_out = mii_regval;
34f80b04 9865 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9866 return err;
9867 }
9868
9869 case SIOCSMIIREG:
9870 if (!capable(CAP_NET_ADMIN))
9871 return -EPERM;
9872
c18487ee
YR
9873 if (!netif_running(dev))
9874 return -EAGAIN;
9875
34f80b04
EG
9876 mutex_lock(&bp->port.phy_mutex);
9877 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9878 DEFAULT_PHY_DEV_ADDR,
9879 (data->reg_num & 0x1f), data->val_in);
34f80b04 9880 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9881 return err;
9882
9883 default:
9884 /* do nothing */
9885 break;
9886 }
9887
9888 return -EOPNOTSUPP;
9889}
9890
34f80b04 9891/* called with rtnl_lock */
a2fbb9ea
ET
9892static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9893{
9894 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9895 int rc = 0;
a2fbb9ea
ET
9896
9897 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9898 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9899 return -EINVAL;
9900
9901 /* This does not race with packet allocation
c14423fe 9902 * because the actual alloc size is
a2fbb9ea
ET
9903 * only updated as part of load
9904 */
9905 dev->mtu = new_mtu;
9906
9907 if (netif_running(dev)) {
34f80b04
EG
9908 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9909 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9910 }
34f80b04
EG
9911
9912 return rc;
a2fbb9ea
ET
9913}
9914
9915static void bnx2x_tx_timeout(struct net_device *dev)
9916{
9917 struct bnx2x *bp = netdev_priv(dev);
9918
9919#ifdef BNX2X_STOP_ON_ERROR
9920 if (!bp->panic)
9921 bnx2x_panic();
9922#endif
9923 /* This allows the netif to be shutdown gracefully before resetting */
9924 schedule_work(&bp->reset_task);
9925}
9926
9927#ifdef BCM_VLAN
34f80b04 9928/* called with rtnl_lock */
a2fbb9ea
ET
9929static void bnx2x_vlan_rx_register(struct net_device *dev,
9930 struct vlan_group *vlgrp)
9931{
9932 struct bnx2x *bp = netdev_priv(dev);
9933
9934 bp->vlgrp = vlgrp;
9935 if (netif_running(dev))
49d66772 9936 bnx2x_set_client_config(bp);
a2fbb9ea 9937}
34f80b04 9938
a2fbb9ea
ET
9939#endif
9940
9941#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9942static void poll_bnx2x(struct net_device *dev)
9943{
9944 struct bnx2x *bp = netdev_priv(dev);
9945
9946 disable_irq(bp->pdev->irq);
9947 bnx2x_interrupt(bp->pdev->irq, dev);
9948 enable_irq(bp->pdev->irq);
9949}
9950#endif
9951
34f80b04
EG
9952static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9953 struct net_device *dev)
a2fbb9ea
ET
9954{
9955 struct bnx2x *bp;
9956 int rc;
9957
9958 SET_NETDEV_DEV(dev, &pdev->dev);
9959 bp = netdev_priv(dev);
9960
34f80b04
EG
9961 bp->dev = dev;
9962 bp->pdev = pdev;
a2fbb9ea 9963 bp->flags = 0;
34f80b04 9964 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9965
9966 rc = pci_enable_device(pdev);
9967 if (rc) {
9968 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9969 goto err_out;
9970 }
9971
9972 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9973 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9974 " aborting\n");
9975 rc = -ENODEV;
9976 goto err_out_disable;
9977 }
9978
9979 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9980 printk(KERN_ERR PFX "Cannot find second PCI device"
9981 " base address, aborting\n");
9982 rc = -ENODEV;
9983 goto err_out_disable;
9984 }
9985
34f80b04
EG
9986 if (atomic_read(&pdev->enable_cnt) == 1) {
9987 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9988 if (rc) {
9989 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9990 " aborting\n");
9991 goto err_out_disable;
9992 }
a2fbb9ea 9993
34f80b04
EG
9994 pci_set_master(pdev);
9995 pci_save_state(pdev);
9996 }
a2fbb9ea
ET
9997
9998 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9999 if (bp->pm_cap == 0) {
10000 printk(KERN_ERR PFX "Cannot find power management"
10001 " capability, aborting\n");
10002 rc = -EIO;
10003 goto err_out_release;
10004 }
10005
10006 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10007 if (bp->pcie_cap == 0) {
10008 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10009 " aborting\n");
10010 rc = -EIO;
10011 goto err_out_release;
10012 }
10013
10014 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10015 bp->flags |= USING_DAC_FLAG;
10016 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10017 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10018 " failed, aborting\n");
10019 rc = -EIO;
10020 goto err_out_release;
10021 }
10022
10023 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10024 printk(KERN_ERR PFX "System does not support DMA,"
10025 " aborting\n");
10026 rc = -EIO;
10027 goto err_out_release;
10028 }
10029
34f80b04
EG
10030 dev->mem_start = pci_resource_start(pdev, 0);
10031 dev->base_addr = dev->mem_start;
10032 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10033
10034 dev->irq = pdev->irq;
10035
10036 bp->regview = ioremap_nocache(dev->base_addr,
10037 pci_resource_len(pdev, 0));
10038 if (!bp->regview) {
10039 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10040 rc = -ENOMEM;
10041 goto err_out_release;
10042 }
10043
34f80b04
EG
10044 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10045 min_t(u64, BNX2X_DB_SIZE,
10046 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10047 if (!bp->doorbells) {
10048 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10049 rc = -ENOMEM;
10050 goto err_out_unmap;
10051 }
10052
10053 bnx2x_set_power_state(bp, PCI_D0);
10054
34f80b04
EG
10055 /* clean indirect addresses */
10056 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10057 PCICFG_VENDOR_ID_OFFSET);
10058 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10059 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10060 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10061 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10062
34f80b04
EG
10063 dev->hard_start_xmit = bnx2x_start_xmit;
10064 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10065
34f80b04
EG
10066 dev->ethtool_ops = &bnx2x_ethtool_ops;
10067 dev->open = bnx2x_open;
10068 dev->stop = bnx2x_close;
10069 dev->set_multicast_list = bnx2x_set_rx_mode;
10070 dev->set_mac_address = bnx2x_change_mac_addr;
10071 dev->do_ioctl = bnx2x_ioctl;
10072 dev->change_mtu = bnx2x_change_mtu;
10073 dev->tx_timeout = bnx2x_tx_timeout;
10074#ifdef BCM_VLAN
10075 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10076#endif
10077#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10078 dev->poll_controller = poll_bnx2x;
10079#endif
10080 dev->features |= NETIF_F_SG;
10081 dev->features |= NETIF_F_HW_CSUM;
10082 if (bp->flags & USING_DAC_FLAG)
10083 dev->features |= NETIF_F_HIGHDMA;
10084#ifdef BCM_VLAN
10085 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10086#endif
10087 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10088 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10089
10090 return 0;
10091
10092err_out_unmap:
10093 if (bp->regview) {
10094 iounmap(bp->regview);
10095 bp->regview = NULL;
10096 }
a2fbb9ea
ET
10097 if (bp->doorbells) {
10098 iounmap(bp->doorbells);
10099 bp->doorbells = NULL;
10100 }
10101
10102err_out_release:
34f80b04
EG
10103 if (atomic_read(&pdev->enable_cnt) == 1)
10104 pci_release_regions(pdev);
a2fbb9ea
ET
10105
10106err_out_disable:
10107 pci_disable_device(pdev);
10108 pci_set_drvdata(pdev, NULL);
10109
10110err_out:
10111 return rc;
10112}
10113
25047950
ET
10114static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10115{
10116 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10117
10118 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10119 return val;
10120}
10121
10122/* return value of 1=2.5GHz 2=5GHz */
10123static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10124{
10125 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10126
10127 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10128 return val;
10129}
10130
a2fbb9ea
ET
10131static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10132 const struct pci_device_id *ent)
10133{
10134 static int version_printed;
10135 struct net_device *dev = NULL;
10136 struct bnx2x *bp;
25047950 10137 int rc;
25047950 10138 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10139
10140 if (version_printed++ == 0)
10141 printk(KERN_INFO "%s", version);
10142
10143 /* dev zeroed in init_etherdev */
10144 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10145 if (!dev) {
10146 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10147 return -ENOMEM;
34f80b04 10148 }
a2fbb9ea
ET
10149
10150 netif_carrier_off(dev);
10151
10152 bp = netdev_priv(dev);
10153 bp->msglevel = debug;
10154
34f80b04 10155 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10156 if (rc < 0) {
10157 free_netdev(dev);
10158 return rc;
10159 }
10160
a2fbb9ea
ET
10161 rc = register_netdev(dev);
10162 if (rc) {
c14423fe 10163 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10164 goto init_one_exit;
a2fbb9ea
ET
10165 }
10166
10167 pci_set_drvdata(pdev, dev);
10168
34f80b04
EG
10169 rc = bnx2x_init_bp(bp);
10170 if (rc) {
10171 unregister_netdev(dev);
10172 goto init_one_exit;
10173 }
10174
10175 bp->common.name = board_info[ent->driver_data].name;
25047950 10176 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10177 " IRQ %d, ", dev->name, bp->common.name,
10178 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10179 bnx2x_get_pcie_width(bp),
10180 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10181 dev->base_addr, bp->pdev->irq);
10182 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10183 return 0;
34f80b04
EG
10184
10185init_one_exit:
10186 if (bp->regview)
10187 iounmap(bp->regview);
10188
10189 if (bp->doorbells)
10190 iounmap(bp->doorbells);
10191
10192 free_netdev(dev);
10193
10194 if (atomic_read(&pdev->enable_cnt) == 1)
10195 pci_release_regions(pdev);
10196
10197 pci_disable_device(pdev);
10198 pci_set_drvdata(pdev, NULL);
10199
10200 return rc;
a2fbb9ea
ET
10201}
10202
10203static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10204{
10205 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10206 struct bnx2x *bp;
10207
10208 if (!dev) {
228241eb
ET
10209 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10210 return;
10211 }
228241eb 10212 bp = netdev_priv(dev);
a2fbb9ea 10213
a2fbb9ea
ET
10214 unregister_netdev(dev);
10215
10216 if (bp->regview)
10217 iounmap(bp->regview);
10218
10219 if (bp->doorbells)
10220 iounmap(bp->doorbells);
10221
10222 free_netdev(dev);
34f80b04
EG
10223
10224 if (atomic_read(&pdev->enable_cnt) == 1)
10225 pci_release_regions(pdev);
10226
a2fbb9ea
ET
10227 pci_disable_device(pdev);
10228 pci_set_drvdata(pdev, NULL);
10229}
10230
10231static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10232{
10233 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10234 struct bnx2x *bp;
10235
34f80b04
EG
10236 if (!dev) {
10237 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10238 return -ENODEV;
10239 }
10240 bp = netdev_priv(dev);
a2fbb9ea 10241
34f80b04 10242 rtnl_lock();
a2fbb9ea 10243
34f80b04 10244 pci_save_state(pdev);
228241eb 10245
34f80b04
EG
10246 if (!netif_running(dev)) {
10247 rtnl_unlock();
10248 return 0;
10249 }
a2fbb9ea
ET
10250
10251 netif_device_detach(dev);
a2fbb9ea 10252
da5a662a 10253 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10254
a2fbb9ea 10255 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10256
34f80b04
EG
10257 rtnl_unlock();
10258
a2fbb9ea
ET
10259 return 0;
10260}
10261
10262static int bnx2x_resume(struct pci_dev *pdev)
10263{
10264 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10265 struct bnx2x *bp;
a2fbb9ea
ET
10266 int rc;
10267
228241eb
ET
10268 if (!dev) {
10269 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10270 return -ENODEV;
10271 }
228241eb 10272 bp = netdev_priv(dev);
a2fbb9ea 10273
34f80b04
EG
10274 rtnl_lock();
10275
228241eb 10276 pci_restore_state(pdev);
34f80b04
EG
10277
10278 if (!netif_running(dev)) {
10279 rtnl_unlock();
10280 return 0;
10281 }
10282
a2fbb9ea
ET
10283 bnx2x_set_power_state(bp, PCI_D0);
10284 netif_device_attach(dev);
10285
da5a662a 10286 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10287
34f80b04
EG
10288 rtnl_unlock();
10289
10290 return rc;
a2fbb9ea
ET
10291}
10292
493adb1f
WX
10293/**
10294 * bnx2x_io_error_detected - called when PCI error is detected
10295 * @pdev: Pointer to PCI device
10296 * @state: The current pci connection state
10297 *
10298 * This function is called after a PCI bus error affecting
10299 * this device has been detected.
10300 */
10301static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10302 pci_channel_state_t state)
10303{
10304 struct net_device *dev = pci_get_drvdata(pdev);
10305 struct bnx2x *bp = netdev_priv(dev);
10306
10307 rtnl_lock();
10308
10309 netif_device_detach(dev);
10310
10311 if (netif_running(dev))
10312 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10313
10314 pci_disable_device(pdev);
10315
10316 rtnl_unlock();
10317
10318 /* Request a slot reset */
10319 return PCI_ERS_RESULT_NEED_RESET;
10320}
10321
10322/**
10323 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10324 * @pdev: Pointer to PCI device
10325 *
10326 * Restart the card from scratch, as if from a cold-boot.
10327 */
10328static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10329{
10330 struct net_device *dev = pci_get_drvdata(pdev);
10331 struct bnx2x *bp = netdev_priv(dev);
10332
10333 rtnl_lock();
10334
10335 if (pci_enable_device(pdev)) {
10336 dev_err(&pdev->dev,
10337 "Cannot re-enable PCI device after reset\n");
10338 rtnl_unlock();
10339 return PCI_ERS_RESULT_DISCONNECT;
10340 }
10341
10342 pci_set_master(pdev);
10343 pci_restore_state(pdev);
10344
10345 if (netif_running(dev))
10346 bnx2x_set_power_state(bp, PCI_D0);
10347
10348 rtnl_unlock();
10349
10350 return PCI_ERS_RESULT_RECOVERED;
10351}
10352
10353/**
10354 * bnx2x_io_resume - called when traffic can start flowing again
10355 * @pdev: Pointer to PCI device
10356 *
10357 * This callback is called when the error recovery driver tells us that
10358 * its OK to resume normal operation.
10359 */
10360static void bnx2x_io_resume(struct pci_dev *pdev)
10361{
10362 struct net_device *dev = pci_get_drvdata(pdev);
10363 struct bnx2x *bp = netdev_priv(dev);
10364
10365 rtnl_lock();
10366
10367 if (netif_running(dev))
10368 bnx2x_nic_load(bp, LOAD_OPEN);
10369
10370 netif_device_attach(dev);
10371
10372 rtnl_unlock();
10373}
10374
10375static struct pci_error_handlers bnx2x_err_handler = {
10376 .error_detected = bnx2x_io_error_detected,
10377 .slot_reset = bnx2x_io_slot_reset,
10378 .resume = bnx2x_io_resume,
10379};
10380
a2fbb9ea 10381static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10382 .name = DRV_MODULE_NAME,
10383 .id_table = bnx2x_pci_tbl,
10384 .probe = bnx2x_init_one,
10385 .remove = __devexit_p(bnx2x_remove_one),
10386 .suspend = bnx2x_suspend,
10387 .resume = bnx2x_resume,
10388 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10389};
10390
10391static int __init bnx2x_init(void)
10392{
10393 return pci_register_driver(&bnx2x_pci_driver);
10394}
10395
10396static void __exit bnx2x_cleanup(void)
10397{
10398 pci_unregister_driver(&bnx2x_pci_driver);
10399}
10400
10401module_init(bnx2x_init);
10402module_exit(bnx2x_cleanup);
10403