]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: HW lock mechanism
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
e35c3269
EG
63#define DRV_MODULE_VERSION "1.45.6"
64#define DRV_MODULE_RELDATE "2008/06/23"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 78
19680c48 79static int disable_tpa;
a2fbb9ea
ET
80static int use_inta;
81static int poll;
a2fbb9ea 82static int debug;
34f80b04 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
19680c48 86module_param(disable_tpa, int, 0);
a2fbb9ea
ET
87module_param(use_inta, int, 0);
88module_param(poll, int, 0);
a2fbb9ea 89module_param(debug, int, 0);
19680c48 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 93MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
240 msleep(100);
241 else
242 udelay(5);
243
244 if (!cnt) {
a2fbb9ea
ET
245 BNX2X_ERR("dmae timeout!\n");
246 break;
247 }
ad8d3948 248 cnt--;
a2fbb9ea 249 }
ad8d3948
EG
250
251 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
252}
253
c18487ee 254void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 255{
ad8d3948 256 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
258 int cnt = 200;
259
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 int i;
263
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 return;
269 }
270
271 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
272
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
275
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
34f80b04
EG
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->len = len32;
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 293 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 294
ad8d3948 295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
302
303 *wb_comp = 0;
304
34f80b04 305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
306
307 udelay(5);
ad8d3948
EG
308
309 while (*wb_comp != DMAE_COMP_VAL) {
310
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
313 msleep(100);
314 else
315 udelay(5);
316
317 if (!cnt) {
a2fbb9ea
ET
318 BNX2X_ERR("dmae timeout!\n");
319 break;
320 }
ad8d3948 321 cnt--;
a2fbb9ea 322 }
ad8d3948 323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
326
327 mutex_unlock(&bp->dmae_mutex);
328}
329
330/* used only for slowpath so not inlined */
331static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332{
333 u32 wb_write[2];
334
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 338}
a2fbb9ea 339
ad8d3948
EG
340#ifdef USE_WB_RD
341static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342{
343 u32 wb_data[2];
344
345 REG_RD_DMAE(bp, reg, wb_data, 2);
346
347 return HILO_U64(wb_data[0], wb_data[1]);
348}
349#endif
350
a2fbb9ea
ET
351static int bnx2x_mc_assert(struct bnx2x *bp)
352{
a2fbb9ea 353 char last_idx;
34f80b04
EG
354 int i, rc = 0;
355 u32 row0, row1, row2, row3;
356
357 /* XSTORM */
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 if (last_idx)
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
379 rc++;
380 } else {
381 break;
382 }
383 }
384
385 /* TSTORM */
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 if (last_idx)
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
407 rc++;
408 } else {
409 break;
410 }
411 }
412
413 /* CSTORM */
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 if (last_idx)
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
435 rc++;
436 } else {
437 break;
438 }
439 }
440
441 /* USTORM */
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 if (last_idx)
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
463 rc++;
464 } else {
465 break;
a2fbb9ea
ET
466 }
467 }
34f80b04 468
a2fbb9ea
ET
469 return rc;
470}
c14423fe 471
a2fbb9ea
ET
472static void bnx2x_fw_dump(struct bnx2x *bp)
473{
474 u32 mark, offset;
475 u32 data[9];
476 int word;
477
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
481
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 offset + 4*word));
486 data[8] = 0x0;
49d66772 487 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
488 }
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
497}
498
499static void bnx2x_panic_dump(struct bnx2x *bp)
500{
501 int i;
502 u16 j, start, end;
503
66e855f3
YG
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
506
a2fbb9ea
ET
507 BNX2X_ERR("begin crash dump -----------------\n");
508
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
512
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
528 fp->fp_u_idx,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
531
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
536
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
539 }
540
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
545
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548 }
549
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
555
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
558 }
559
7a9b2557
VZ
560 start = 0;
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
565
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
568 }
569
a2fbb9ea
ET
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
574
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
577 }
578 }
579
49d66772
ET
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 582 " spq_prod_idx(%u)\n",
49d66772 583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
585
34f80b04 586 bnx2x_fw_dump(bp);
a2fbb9ea
ET
587 bnx2x_mc_assert(bp);
588 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
589}
590
615f8fd9 591static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 592{
34f80b04 593 int port = BP_PORT(bp);
a2fbb9ea
ET
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597
598 if (msix) {
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
602 } else {
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 607
615f8fd9
ET
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
610
611 REG_WR(bp, addr, val);
612
a2fbb9ea
ET
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614 }
615
615f8fd9 616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
617 val, port, addr, msix);
618
619 REG_WR(bp, addr, val);
34f80b04
EG
620
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
623 if (IS_E1HMF(bp)) {
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
625 if (bp->port.pmf)
626 /* enable nig attention */
627 val |= 0x0100;
628 } else
629 val = 0xffff;
630
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
633 }
a2fbb9ea
ET
634}
635
615f8fd9 636static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 637{
34f80b04 638 int port = BP_PORT(bp);
a2fbb9ea
ET
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
641
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
646
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648 val, port, addr);
649
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653}
654
615f8fd9 655static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 656{
a2fbb9ea
ET
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658 int i;
659
34f80b04 660 /* disable interrupt handling */
a2fbb9ea 661 atomic_inc(&bp->intr_sem);
c14423fe 662 /* prevent the HW from sending interrupts */
615f8fd9 663 bnx2x_int_disable(bp);
a2fbb9ea
ET
664
665 /* make sure all ISRs are done */
666 if (msix) {
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
669
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
672 } else
673 synchronize_irq(bp->pdev->irq);
674
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
34f80b04 688 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
689 struct igu_ack_register igu_ack;
690
691 igu_ack.status_block_index = index;
692 igu_ack.sb_id_and_flags =
34f80b04 693 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
694 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697
34f80b04
EG
698 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
700 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
701}
702
703static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704{
705 struct host_status_block *fpsb = fp->status_blk;
706 u16 rc = 0;
707
708 barrier(); /* status block is written to by the chip */
709 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
711 rc |= 1;
712 }
713 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
715 rc |= 2;
716 }
717 return rc;
718}
719
a2fbb9ea
ET
720static u16 bnx2x_ack_int(struct bnx2x *bp)
721{
34f80b04 722 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
723 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
724
34f80b04
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
726 result, BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
727
728#ifdef IGU_DEBUG
729#warning IGU_DEBUG active
730 if (result == 0) {
731 BNX2X_ERR("read %x from IGU\n", result);
732 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
733 }
734#endif
735 return result;
736}
737
738
739/*
740 * fast path service functions
741 */
742
743/* free skb in the packet ring at pos idx
744 * return idx of last bd freed
745 */
746static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
747 u16 idx)
748{
749 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
750 struct eth_tx_bd *tx_bd;
751 struct sk_buff *skb = tx_buf->skb;
34f80b04 752 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
753 int nbd;
754
755 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
756 idx, tx_buf, skb);
757
758 /* unmap first bd */
759 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
760 tx_bd = &fp->tx_desc_ring[bd_idx];
761 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
762 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
763
764 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 765 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
766#ifdef BNX2X_STOP_ON_ERROR
767 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 768 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
769 bnx2x_panic();
770 }
771#endif
772
773 /* Skip a parse bd and the TSO split header bd
774 since they have no mapping */
775 if (nbd)
776 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
777
778 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
779 ETH_TX_BD_FLAGS_TCP_CSUM |
780 ETH_TX_BD_FLAGS_SW_LSO)) {
781 if (--nbd)
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 tx_bd = &fp->tx_desc_ring[bd_idx];
784 /* is this a TSO split header bd? */
785 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
786 if (--nbd)
787 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
788 }
789 }
790
791 /* now free frags */
792 while (nbd > 0) {
793
794 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
795 tx_bd = &fp->tx_desc_ring[bd_idx];
796 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
797 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
798 if (--nbd)
799 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
800 }
801
802 /* release skb */
53e5e96e 803 WARN_ON(!skb);
a2fbb9ea
ET
804 dev_kfree_skb(skb);
805 tx_buf->first_bd = 0;
806 tx_buf->skb = NULL;
807
34f80b04 808 return new_cons;
a2fbb9ea
ET
809}
810
34f80b04 811static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 812{
34f80b04
EG
813 s16 used;
814 u16 prod;
815 u16 cons;
a2fbb9ea 816
34f80b04 817 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
818 prod = fp->tx_bd_prod;
819 cons = fp->tx_bd_cons;
820
34f80b04
EG
821 /* NUM_TX_RINGS = number of "next-page" entries
822 It will be used as a threshold */
823 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 824
34f80b04 825#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
826 WARN_ON(used < 0);
827 WARN_ON(used > fp->bp->tx_ring_size);
828 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 829#endif
a2fbb9ea 830
34f80b04 831 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
832}
833
834static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
835{
836 struct bnx2x *bp = fp->bp;
837 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
838 int done = 0;
839
840#ifdef BNX2X_STOP_ON_ERROR
841 if (unlikely(bp->panic))
842 return;
843#endif
844
845 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
846 sw_cons = fp->tx_pkt_cons;
847
848 while (sw_cons != hw_cons) {
849 u16 pkt_cons;
850
851 pkt_cons = TX_BD(sw_cons);
852
853 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
854
34f80b04 855 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
856 hw_cons, sw_cons, pkt_cons);
857
34f80b04 858/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
859 rmb();
860 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
861 }
862*/
863 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
864 sw_cons++;
865 done++;
866
867 if (done == work)
868 break;
869 }
870
871 fp->tx_pkt_cons = sw_cons;
872 fp->tx_bd_cons = bd_cons;
873
874 /* Need to make the tx_cons update visible to start_xmit()
875 * before checking for netif_queue_stopped(). Without the
876 * memory barrier, there is a small possibility that start_xmit()
877 * will miss it and cause the queue to be stopped forever.
878 */
879 smp_mb();
880
881 /* TBD need a thresh? */
882 if (unlikely(netif_queue_stopped(bp->dev))) {
883
884 netif_tx_lock(bp->dev);
885
886 if (netif_queue_stopped(bp->dev) &&
da5a662a 887 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
888 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
889 netif_wake_queue(bp->dev);
890
891 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
892 }
893}
894
895static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
896 union eth_rx_cqe *rr_cqe)
897{
898 struct bnx2x *bp = fp->bp;
899 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
900 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
901
34f80b04 902 DP(BNX2X_MSG_SP,
a2fbb9ea 903 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
904 FP_IDX(fp), cid, command, bp->state,
905 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
906
907 bp->spq_left++;
908
34f80b04 909 if (FP_IDX(fp)) {
a2fbb9ea
ET
910 switch (command | fp->state) {
911 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
912 BNX2X_FP_STATE_OPENING):
913 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
914 cid);
915 fp->state = BNX2X_FP_STATE_OPEN;
916 break;
917
918 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
919 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
920 cid);
921 fp->state = BNX2X_FP_STATE_HALTED;
922 break;
923
924 default:
34f80b04
EG
925 BNX2X_ERR("unexpected MC reply (%d) "
926 "fp->state is %x\n", command, fp->state);
927 break;
a2fbb9ea 928 }
34f80b04 929 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
930 return;
931 }
c14423fe 932
a2fbb9ea
ET
933 switch (command | bp->state) {
934 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
935 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
936 bp->state = BNX2X_STATE_OPEN;
937 break;
938
939 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
941 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
942 fp->state = BNX2X_FP_STATE_HALTED;
943 break;
944
a2fbb9ea 945 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 946 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 947 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
948 break;
949
950 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 952 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 953 bp->set_mac_pending = 0;
a2fbb9ea
ET
954 break;
955
49d66772 956 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 957 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
958 break;
959
a2fbb9ea 960 default:
34f80b04 961 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 962 command, bp->state);
34f80b04 963 break;
a2fbb9ea 964 }
34f80b04 965 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
966}
967
7a9b2557
VZ
968static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
969 struct bnx2x_fastpath *fp, u16 index)
970{
971 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
972 struct page *page = sw_buf->page;
973 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
974
975 /* Skip "next page" elements */
976 if (!page)
977 return;
978
979 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
980 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
981 __free_pages(page, PAGES_PER_SGE_SHIFT);
982
983 sw_buf->page = NULL;
984 sge->addr_hi = 0;
985 sge->addr_lo = 0;
986}
987
988static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
989 struct bnx2x_fastpath *fp, int last)
990{
991 int i;
992
993 for (i = 0; i < last; i++)
994 bnx2x_free_rx_sge(bp, fp, i);
995}
996
997static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
998 struct bnx2x_fastpath *fp, u16 index)
999{
1000 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1001 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1002 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1003 dma_addr_t mapping;
1004
1005 if (unlikely(page == NULL))
1006 return -ENOMEM;
1007
1008 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1009 PCI_DMA_FROMDEVICE);
8d8bb39b 1010 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1011 __free_pages(page, PAGES_PER_SGE_SHIFT);
1012 return -ENOMEM;
1013 }
1014
1015 sw_buf->page = page;
1016 pci_unmap_addr_set(sw_buf, mapping, mapping);
1017
1018 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1019 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1020
1021 return 0;
1022}
1023
a2fbb9ea
ET
1024static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1025 struct bnx2x_fastpath *fp, u16 index)
1026{
1027 struct sk_buff *skb;
1028 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1029 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1030 dma_addr_t mapping;
1031
1032 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1033 if (unlikely(skb == NULL))
1034 return -ENOMEM;
1035
1036 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1037 PCI_DMA_FROMDEVICE);
8d8bb39b 1038 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1039 dev_kfree_skb(skb);
1040 return -ENOMEM;
1041 }
1042
1043 rx_buf->skb = skb;
1044 pci_unmap_addr_set(rx_buf, mapping, mapping);
1045
1046 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1047 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1048
1049 return 0;
1050}
1051
1052/* note that we are not allocating a new skb,
1053 * we are just moving one from cons to prod
1054 * we are not creating a new mapping,
1055 * so there is no need to check for dma_mapping_error().
1056 */
1057static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1058 struct sk_buff *skb, u16 cons, u16 prod)
1059{
1060 struct bnx2x *bp = fp->bp;
1061 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1062 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1063 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1064 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1065
1066 pci_dma_sync_single_for_device(bp->pdev,
1067 pci_unmap_addr(cons_rx_buf, mapping),
1068 bp->rx_offset + RX_COPY_THRESH,
1069 PCI_DMA_FROMDEVICE);
1070
1071 prod_rx_buf->skb = cons_rx_buf->skb;
1072 pci_unmap_addr_set(prod_rx_buf, mapping,
1073 pci_unmap_addr(cons_rx_buf, mapping));
1074 *prod_bd = *cons_bd;
1075}
1076
7a9b2557
VZ
1077static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1078 u16 idx)
1079{
1080 u16 last_max = fp->last_max_sge;
1081
1082 if (SUB_S16(idx, last_max) > 0)
1083 fp->last_max_sge = idx;
1084}
1085
1086static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1087{
1088 int i, j;
1089
1090 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1091 int idx = RX_SGE_CNT * i - 1;
1092
1093 for (j = 0; j < 2; j++) {
1094 SGE_MASK_CLEAR_BIT(fp, idx);
1095 idx--;
1096 }
1097 }
1098}
1099
1100static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1101 struct eth_fast_path_rx_cqe *fp_cqe)
1102{
1103 struct bnx2x *bp = fp->bp;
1104 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1105 le16_to_cpu(fp_cqe->len_on_bd)) >>
1106 BCM_PAGE_SHIFT;
1107 u16 last_max, last_elem, first_elem;
1108 u16 delta = 0;
1109 u16 i;
1110
1111 if (!sge_len)
1112 return;
1113
1114 /* First mark all used pages */
1115 for (i = 0; i < sge_len; i++)
1116 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1117
1118 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1119 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1120
1121 /* Here we assume that the last SGE index is the biggest */
1122 prefetch((void *)(fp->sge_mask));
1123 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1124
1125 last_max = RX_SGE(fp->last_max_sge);
1126 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1127 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1128
1129 /* If ring is not full */
1130 if (last_elem + 1 != first_elem)
1131 last_elem++;
1132
1133 /* Now update the prod */
1134 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1135 if (likely(fp->sge_mask[i]))
1136 break;
1137
1138 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1139 delta += RX_SGE_MASK_ELEM_SZ;
1140 }
1141
1142 if (delta > 0) {
1143 fp->rx_sge_prod += delta;
1144 /* clear page-end entries */
1145 bnx2x_clear_sge_mask_next_elems(fp);
1146 }
1147
1148 DP(NETIF_MSG_RX_STATUS,
1149 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1150 fp->last_max_sge, fp->rx_sge_prod);
1151}
1152
1153static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1154{
1155 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1156 memset(fp->sge_mask, 0xff,
1157 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1158
1159 /* Clear the two last indeces in the page to 1:
1160 these are the indeces that correspond to the "next" element,
1161 hence will never be indicated and should be removed from
1162 the calculations. */
1163 bnx2x_clear_sge_mask_next_elems(fp);
1164}
1165
1166static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1167 struct sk_buff *skb, u16 cons, u16 prod)
1168{
1169 struct bnx2x *bp = fp->bp;
1170 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1171 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1172 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1173 dma_addr_t mapping;
1174
1175 /* move empty skb from pool to prod and map it */
1176 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1177 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1178 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1179 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1180
1181 /* move partial skb from cons to pool (don't unmap yet) */
1182 fp->tpa_pool[queue] = *cons_rx_buf;
1183
1184 /* mark bin state as start - print error if current state != stop */
1185 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1186 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1187
1188 fp->tpa_state[queue] = BNX2X_TPA_START;
1189
1190 /* point prod_bd to new skb */
1191 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1192 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1193
1194#ifdef BNX2X_STOP_ON_ERROR
1195 fp->tpa_queue_used |= (1 << queue);
1196#ifdef __powerpc64__
1197 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1198#else
1199 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1200#endif
1201 fp->tpa_queue_used);
1202#endif
1203}
1204
1205static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1206 struct sk_buff *skb,
1207 struct eth_fast_path_rx_cqe *fp_cqe,
1208 u16 cqe_idx)
1209{
1210 struct sw_rx_page *rx_pg, old_rx_pg;
1211 struct page *sge;
1212 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1213 u32 i, frag_len, frag_size, pages;
1214 int err;
1215 int j;
1216
1217 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1218 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1219
1220 /* This is needed in order to enable forwarding support */
1221 if (frag_size)
1222 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1223 max(frag_size, (u32)len_on_bd));
1224
1225#ifdef BNX2X_STOP_ON_ERROR
1226 if (pages > 8*PAGES_PER_SGE) {
1227 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1228 pages, cqe_idx);
1229 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1230 fp_cqe->pkt_len, len_on_bd);
1231 bnx2x_panic();
1232 return -EINVAL;
1233 }
1234#endif
1235
1236 /* Run through the SGL and compose the fragmented skb */
1237 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1238 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1239
1240 /* FW gives the indices of the SGE as if the ring is an array
1241 (meaning that "next" element will consume 2 indices) */
1242 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1243 rx_pg = &fp->rx_page_ring[sge_idx];
1244 sge = rx_pg->page;
1245 old_rx_pg = *rx_pg;
1246
1247 /* If we fail to allocate a substitute page, we simply stop
1248 where we are and drop the whole packet */
1249 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1250 if (unlikely(err)) {
66e855f3 1251 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1252 return err;
1253 }
1254
1255 /* Unmap the page as we r going to pass it to the stack */
1256 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1257 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1258
1259 /* Add one frag and update the appropriate fields in the skb */
1260 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1261
1262 skb->data_len += frag_len;
1263 skb->truesize += frag_len;
1264 skb->len += frag_len;
1265
1266 frag_size -= frag_len;
1267 }
1268
1269 return 0;
1270}
1271
1272static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1274 u16 cqe_idx)
1275{
1276 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1277 struct sk_buff *skb = rx_buf->skb;
1278 /* alloc new skb */
1279 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1280
1281 /* Unmap skb in the pool anyway, as we are going to change
1282 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1283 fails. */
1284 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1285 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1286
7a9b2557 1287 if (likely(new_skb)) {
66e855f3
YG
1288 /* fix ip xsum and give it to the stack */
1289 /* (no need to map the new skb) */
7a9b2557
VZ
1290
1291 prefetch(skb);
1292 prefetch(((char *)(skb)) + 128);
1293
7a9b2557
VZ
1294#ifdef BNX2X_STOP_ON_ERROR
1295 if (pad + len > bp->rx_buf_size) {
1296 BNX2X_ERR("skb_put is about to fail... "
1297 "pad %d len %d rx_buf_size %d\n",
1298 pad, len, bp->rx_buf_size);
1299 bnx2x_panic();
1300 return;
1301 }
1302#endif
1303
1304 skb_reserve(skb, pad);
1305 skb_put(skb, len);
1306
1307 skb->protocol = eth_type_trans(skb, bp->dev);
1308 skb->ip_summed = CHECKSUM_UNNECESSARY;
1309
1310 {
1311 struct iphdr *iph;
1312
1313 iph = (struct iphdr *)skb->data;
1314 iph->check = 0;
1315 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1316 }
1317
1318 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1319 &cqe->fast_path_cqe, cqe_idx)) {
1320#ifdef BCM_VLAN
1321 if ((bp->vlgrp != NULL) &&
1322 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1323 PARSING_FLAGS_VLAN))
1324 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1325 le16_to_cpu(cqe->fast_path_cqe.
1326 vlan_tag));
1327 else
1328#endif
1329 netif_receive_skb(skb);
1330 } else {
1331 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1332 " - dropping packet!\n");
1333 dev_kfree_skb(skb);
1334 }
1335
1336 bp->dev->last_rx = jiffies;
1337
1338 /* put new skb in bin */
1339 fp->tpa_pool[queue].skb = new_skb;
1340
1341 } else {
66e855f3 1342 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1343 DP(NETIF_MSG_RX_STATUS,
1344 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1345 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1346 }
1347
1348 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1349}
1350
1351static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1352 struct bnx2x_fastpath *fp,
1353 u16 bd_prod, u16 rx_comp_prod,
1354 u16 rx_sge_prod)
1355{
1356 struct tstorm_eth_rx_producers rx_prods = {0};
1357 int i;
1358
1359 /* Update producers */
1360 rx_prods.bd_prod = bd_prod;
1361 rx_prods.cqe_prod = rx_comp_prod;
1362 rx_prods.sge_prod = rx_sge_prod;
1363
1364 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1365 REG_WR(bp, BAR_TSTRORM_INTMEM +
1366 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1367 ((u32 *)&rx_prods)[i]);
1368
1369 DP(NETIF_MSG_RX_STATUS,
1370 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1371 bd_prod, rx_comp_prod, rx_sge_prod);
1372}
1373
a2fbb9ea
ET
1374static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1375{
1376 struct bnx2x *bp = fp->bp;
34f80b04 1377 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1378 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1379 int rx_pkt = 0;
7a9b2557 1380 u16 queue;
a2fbb9ea
ET
1381
1382#ifdef BNX2X_STOP_ON_ERROR
1383 if (unlikely(bp->panic))
1384 return 0;
1385#endif
1386
34f80b04
EG
1387 /* CQ "next element" is of the size of the regular element,
1388 that's why it's ok here */
a2fbb9ea
ET
1389 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1390 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1391 hw_comp_cons++;
1392
1393 bd_cons = fp->rx_bd_cons;
1394 bd_prod = fp->rx_bd_prod;
34f80b04 1395 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1396 sw_comp_cons = fp->rx_comp_cons;
1397 sw_comp_prod = fp->rx_comp_prod;
1398
1399 /* Memory barrier necessary as speculative reads of the rx
1400 * buffer can be ahead of the index in the status block
1401 */
1402 rmb();
1403
1404 DP(NETIF_MSG_RX_STATUS,
1405 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1406 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1407
1408 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1409 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1410 struct sk_buff *skb;
1411 union eth_rx_cqe *cqe;
34f80b04
EG
1412 u8 cqe_fp_flags;
1413 u16 len, pad;
a2fbb9ea
ET
1414
1415 comp_ring_cons = RCQ_BD(sw_comp_cons);
1416 bd_prod = RX_BD(bd_prod);
1417 bd_cons = RX_BD(bd_cons);
1418
1419 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1420 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1421
a2fbb9ea 1422 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1423 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1424 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1425 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1426 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1427 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1428
1429 /* is this a slowpath msg? */
34f80b04 1430 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1431 bnx2x_sp_event(fp, cqe);
1432 goto next_cqe;
1433
1434 /* this is an rx packet */
1435 } else {
1436 rx_buf = &fp->rx_buf_ring[bd_cons];
1437 skb = rx_buf->skb;
a2fbb9ea
ET
1438 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1439 pad = cqe->fast_path_cqe.placement_offset;
1440
7a9b2557
VZ
1441 /* If CQE is marked both TPA_START and TPA_END
1442 it is a non-TPA CQE */
1443 if ((!fp->disable_tpa) &&
1444 (TPA_TYPE(cqe_fp_flags) !=
1445 (TPA_TYPE_START | TPA_TYPE_END))) {
1446 queue = cqe->fast_path_cqe.queue_index;
1447
1448 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1449 DP(NETIF_MSG_RX_STATUS,
1450 "calling tpa_start on queue %d\n",
1451 queue);
1452
1453 bnx2x_tpa_start(fp, queue, skb,
1454 bd_cons, bd_prod);
1455 goto next_rx;
1456 }
1457
1458 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1459 DP(NETIF_MSG_RX_STATUS,
1460 "calling tpa_stop on queue %d\n",
1461 queue);
1462
1463 if (!BNX2X_RX_SUM_FIX(cqe))
1464 BNX2X_ERR("STOP on none TCP "
1465 "data\n");
1466
1467 /* This is a size of the linear data
1468 on this skb */
1469 len = le16_to_cpu(cqe->fast_path_cqe.
1470 len_on_bd);
1471 bnx2x_tpa_stop(bp, fp, queue, pad,
1472 len, cqe, comp_ring_cons);
1473#ifdef BNX2X_STOP_ON_ERROR
1474 if (bp->panic)
1475 return -EINVAL;
1476#endif
1477
1478 bnx2x_update_sge_prod(fp,
1479 &cqe->fast_path_cqe);
1480 goto next_cqe;
1481 }
1482 }
1483
a2fbb9ea
ET
1484 pci_dma_sync_single_for_device(bp->pdev,
1485 pci_unmap_addr(rx_buf, mapping),
1486 pad + RX_COPY_THRESH,
1487 PCI_DMA_FROMDEVICE);
1488 prefetch(skb);
1489 prefetch(((char *)(skb)) + 128);
1490
1491 /* is this an error packet? */
34f80b04 1492 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1493 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1494 "ERROR flags %x rx packet %u\n",
1495 cqe_fp_flags, sw_comp_cons);
66e855f3 1496 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1497 goto reuse_rx;
1498 }
1499
1500 /* Since we don't have a jumbo ring
1501 * copy small packets if mtu > 1500
1502 */
1503 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1504 (len <= RX_COPY_THRESH)) {
1505 struct sk_buff *new_skb;
1506
1507 new_skb = netdev_alloc_skb(bp->dev,
1508 len + pad);
1509 if (new_skb == NULL) {
1510 DP(NETIF_MSG_RX_ERR,
34f80b04 1511 "ERROR packet dropped "
a2fbb9ea 1512 "because of alloc failure\n");
66e855f3 1513 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1514 goto reuse_rx;
1515 }
1516
1517 /* aligned copy */
1518 skb_copy_from_linear_data_offset(skb, pad,
1519 new_skb->data + pad, len);
1520 skb_reserve(new_skb, pad);
1521 skb_put(new_skb, len);
1522
1523 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1524
1525 skb = new_skb;
1526
1527 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1528 pci_unmap_single(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 bp->rx_buf_use_size,
1531 PCI_DMA_FROMDEVICE);
1532 skb_reserve(skb, pad);
1533 skb_put(skb, len);
1534
1535 } else {
1536 DP(NETIF_MSG_RX_ERR,
34f80b04 1537 "ERROR packet dropped because "
a2fbb9ea 1538 "of alloc failure\n");
66e855f3 1539 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1540reuse_rx:
1541 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1542 goto next_rx;
1543 }
1544
1545 skb->protocol = eth_type_trans(skb, bp->dev);
1546
1547 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1548 if (bp->rx_csum) {
1adcd8be
EG
1549 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1551 else
1552 bp->eth_stats.hw_csum_err++;
1553 }
a2fbb9ea
ET
1554 }
1555
1556#ifdef BCM_VLAN
34f80b04
EG
1557 if ((bp->vlgrp != NULL) &&
1558 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1559 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1560 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1561 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1562 else
1563#endif
34f80b04 1564 netif_receive_skb(skb);
a2fbb9ea
ET
1565
1566 bp->dev->last_rx = jiffies;
1567
1568next_rx:
1569 rx_buf->skb = NULL;
1570
1571 bd_cons = NEXT_RX_IDX(bd_cons);
1572 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1573 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1574 rx_pkt++;
a2fbb9ea
ET
1575next_cqe:
1576 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1577 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1578
34f80b04 1579 if (rx_pkt == budget)
a2fbb9ea
ET
1580 break;
1581 } /* while */
1582
1583 fp->rx_bd_cons = bd_cons;
34f80b04 1584 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1585 fp->rx_comp_cons = sw_comp_cons;
1586 fp->rx_comp_prod = sw_comp_prod;
1587
7a9b2557
VZ
1588 /* Update producers */
1589 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1590 fp->rx_sge_prod);
a2fbb9ea
ET
1591 mmiowb(); /* keep prod updates ordered */
1592
1593 fp->rx_pkt += rx_pkt;
1594 fp->rx_calls++;
1595
1596 return rx_pkt;
1597}
1598
1599static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1600{
1601 struct bnx2x_fastpath *fp = fp_cookie;
1602 struct bnx2x *bp = fp->bp;
1603 struct net_device *dev = bp->dev;
34f80b04 1604 int index = FP_IDX(fp);
a2fbb9ea 1605
da5a662a
VZ
1606 /* Return here if interrupt is disabled */
1607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1609 return IRQ_HANDLED;
1610 }
1611
34f80b04
EG
1612 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1613 index, FP_SB_ID(fp));
1614 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1615
1616#ifdef BNX2X_STOP_ON_ERROR
1617 if (unlikely(bp->panic))
1618 return IRQ_HANDLED;
1619#endif
1620
1621 prefetch(fp->rx_cons_sb);
1622 prefetch(fp->tx_cons_sb);
1623 prefetch(&fp->status_blk->c_status_block.status_block_index);
1624 prefetch(&fp->status_blk->u_status_block.status_block_index);
1625
1626 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1627
a2fbb9ea
ET
1628 return IRQ_HANDLED;
1629}
1630
1631static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1632{
1633 struct net_device *dev = dev_instance;
1634 struct bnx2x *bp = netdev_priv(dev);
1635 u16 status = bnx2x_ack_int(bp);
34f80b04 1636 u16 mask;
a2fbb9ea 1637
34f80b04 1638 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1639 if (unlikely(status == 0)) {
1640 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1641 return IRQ_NONE;
1642 }
34f80b04 1643 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1644
1645#ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1647 return IRQ_HANDLED;
1648#endif
1649
34f80b04 1650 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1651 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1652 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1653 return IRQ_HANDLED;
1654 }
1655
34f80b04
EG
1656 mask = 0x2 << bp->fp[0].sb_id;
1657 if (status & mask) {
a2fbb9ea
ET
1658 struct bnx2x_fastpath *fp = &bp->fp[0];
1659
1660 prefetch(fp->rx_cons_sb);
1661 prefetch(fp->tx_cons_sb);
1662 prefetch(&fp->status_blk->c_status_block.status_block_index);
1663 prefetch(&fp->status_blk->u_status_block.status_block_index);
1664
1665 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1666
34f80b04 1667 status &= ~mask;
a2fbb9ea
ET
1668 }
1669
a2fbb9ea 1670
34f80b04 1671 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1672 schedule_work(&bp->sp_task);
1673
1674 status &= ~0x1;
1675 if (!status)
1676 return IRQ_HANDLED;
1677 }
1678
34f80b04
EG
1679 if (status)
1680 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1681 status);
a2fbb9ea 1682
c18487ee 1683 return IRQ_HANDLED;
a2fbb9ea
ET
1684}
1685
c18487ee 1686/* end of fast path */
a2fbb9ea 1687
bb2a0f7a 1688static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1689
c18487ee
YR
1690/* Link */
1691
1692/*
1693 * General service functions
1694 */
a2fbb9ea 1695
4a37fb66 1696static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1697{
1698 u32 lock_status;
1699 u32 resource_bit = (1 << resource);
4a37fb66
YG
1700 int func = BP_FUNC(bp);
1701 u32 hw_lock_control_reg;
c18487ee 1702 int cnt;
a2fbb9ea 1703
c18487ee
YR
1704 /* Validating that the resource is within range */
1705 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1706 DP(NETIF_MSG_HW,
1707 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1708 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1709 return -EINVAL;
1710 }
a2fbb9ea 1711
4a37fb66
YG
1712 if (func <= 5) {
1713 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1714 } else {
1715 hw_lock_control_reg =
1716 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1717 }
1718
c18487ee 1719 /* Validating that the resource is not already taken */
4a37fb66 1720 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1721 if (lock_status & resource_bit) {
1722 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1723 lock_status, resource_bit);
1724 return -EEXIST;
1725 }
a2fbb9ea 1726
c18487ee
YR
1727 /* Try for 1 second every 5ms */
1728 for (cnt = 0; cnt < 200; cnt++) {
1729 /* Try to acquire the lock */
4a37fb66
YG
1730 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1731 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1732 if (lock_status & resource_bit)
1733 return 0;
a2fbb9ea 1734
c18487ee 1735 msleep(5);
a2fbb9ea 1736 }
c18487ee
YR
1737 DP(NETIF_MSG_HW, "Timeout\n");
1738 return -EAGAIN;
1739}
a2fbb9ea 1740
4a37fb66 1741static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1742{
1743 u32 lock_status;
1744 u32 resource_bit = (1 << resource);
4a37fb66
YG
1745 int func = BP_FUNC(bp);
1746 u32 hw_lock_control_reg;
a2fbb9ea 1747
c18487ee
YR
1748 /* Validating that the resource is within range */
1749 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1750 DP(NETIF_MSG_HW,
1751 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1752 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1753 return -EINVAL;
1754 }
1755
4a37fb66
YG
1756 if (func <= 5) {
1757 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1758 } else {
1759 hw_lock_control_reg =
1760 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1761 }
1762
c18487ee 1763 /* Validating that the resource is currently taken */
4a37fb66 1764 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1765 if (!(lock_status & resource_bit)) {
1766 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1767 lock_status, resource_bit);
1768 return -EFAULT;
a2fbb9ea
ET
1769 }
1770
4a37fb66 1771 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1772 return 0;
1773}
1774
1775/* HW Lock for shared dual port PHYs */
4a37fb66 1776static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1777{
1778 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1779
34f80b04 1780 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1781
c18487ee
YR
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1785}
a2fbb9ea 1786
4a37fb66 1787static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1788{
1789 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1790
c18487ee
YR
1791 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1792 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1793 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1794
34f80b04 1795 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1796}
a2fbb9ea 1797
c18487ee
YR
1798int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1799{
1800 /* The GPIO should be swapped if swap register is set and active */
1801 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1802 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1803 int gpio_shift = gpio_num +
1804 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1805 u32 gpio_mask = (1 << gpio_shift);
1806 u32 gpio_reg;
a2fbb9ea 1807
c18487ee
YR
1808 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1809 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1810 return -EINVAL;
1811 }
a2fbb9ea 1812
4a37fb66 1813 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1814 /* read GPIO and mask except the float bits */
1815 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1816
c18487ee
YR
1817 switch (mode) {
1818 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1819 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1820 gpio_num, gpio_shift);
1821 /* clear FLOAT and set CLR */
1822 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1823 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1824 break;
a2fbb9ea 1825
c18487ee
YR
1826 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1827 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1828 gpio_num, gpio_shift);
1829 /* clear FLOAT and set SET */
1830 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1832 break;
a2fbb9ea 1833
c18487ee
YR
1834 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1835 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1836 gpio_num, gpio_shift);
1837 /* set FLOAT */
1838 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1839 break;
a2fbb9ea 1840
c18487ee
YR
1841 default:
1842 break;
a2fbb9ea
ET
1843 }
1844
c18487ee 1845 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1846 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1847
c18487ee 1848 return 0;
a2fbb9ea
ET
1849}
1850
c18487ee 1851static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1852{
c18487ee
YR
1853 u32 spio_mask = (1 << spio_num);
1854 u32 spio_reg;
a2fbb9ea 1855
c18487ee
YR
1856 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1857 (spio_num > MISC_REGISTERS_SPIO_7)) {
1858 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1859 return -EINVAL;
a2fbb9ea
ET
1860 }
1861
4a37fb66 1862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1863 /* read SPIO and mask except the float bits */
1864 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1865
c18487ee
YR
1866 switch (mode) {
1867 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1869 /* clear FLOAT and set CLR */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1872 break;
a2fbb9ea 1873
c18487ee
YR
1874 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1876 /* clear FLOAT and set SET */
1877 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1879 break;
a2fbb9ea 1880
c18487ee
YR
1881 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1882 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1883 /* set FLOAT */
1884 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1885 break;
a2fbb9ea 1886
c18487ee
YR
1887 default:
1888 break;
a2fbb9ea
ET
1889 }
1890
c18487ee 1891 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1892 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1893
a2fbb9ea
ET
1894 return 0;
1895}
1896
c18487ee 1897static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1898{
c18487ee
YR
1899 switch (bp->link_vars.ieee_fc) {
1900 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1901 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1902 ADVERTISED_Pause);
1903 break;
1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1905 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1909 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1910 break;
1911 default:
34f80b04 1912 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1913 ADVERTISED_Pause);
1914 break;
1915 }
1916}
f1410647 1917
c18487ee
YR
1918static void bnx2x_link_report(struct bnx2x *bp)
1919{
1920 if (bp->link_vars.link_up) {
1921 if (bp->state == BNX2X_STATE_OPEN)
1922 netif_carrier_on(bp->dev);
1923 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1924
c18487ee 1925 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1926
c18487ee
YR
1927 if (bp->link_vars.duplex == DUPLEX_FULL)
1928 printk("full duplex");
1929 else
1930 printk("half duplex");
f1410647 1931
c18487ee
YR
1932 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1933 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1934 printk(", receive ");
1935 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1936 printk("& transmit ");
1937 } else {
1938 printk(", transmit ");
1939 }
1940 printk("flow control ON");
1941 }
1942 printk("\n");
f1410647 1943
c18487ee
YR
1944 } else { /* link_down */
1945 netif_carrier_off(bp->dev);
1946 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1947 }
c18487ee
YR
1948}
1949
1950static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1951{
19680c48
EG
1952 if (!BP_NOMCP(bp)) {
1953 u8 rc;
a2fbb9ea 1954
19680c48
EG
1955 /* Initialize link parameters structure variables */
1956 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1957
4a37fb66 1958 bnx2x_acquire_phy_lock(bp);
19680c48 1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1960 bnx2x_release_phy_lock(bp);
a2fbb9ea 1961
19680c48
EG
1962 if (bp->link_vars.link_up)
1963 bnx2x_link_report(bp);
a2fbb9ea 1964
19680c48 1965 bnx2x_calc_fc_adv(bp);
34f80b04 1966
19680c48
EG
1967 return rc;
1968 }
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970 return -EINVAL;
a2fbb9ea
ET
1971}
1972
c18487ee 1973static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1974{
19680c48 1975 if (!BP_NOMCP(bp)) {
4a37fb66 1976 bnx2x_acquire_phy_lock(bp);
19680c48 1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1978 bnx2x_release_phy_lock(bp);
a2fbb9ea 1979
19680c48
EG
1980 bnx2x_calc_fc_adv(bp);
1981 } else
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1983}
a2fbb9ea 1984
c18487ee
YR
1985static void bnx2x__link_reset(struct bnx2x *bp)
1986{
19680c48 1987 if (!BP_NOMCP(bp)) {
4a37fb66 1988 bnx2x_acquire_phy_lock(bp);
19680c48 1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 1990 bnx2x_release_phy_lock(bp);
19680c48
EG
1991 } else
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1993}
a2fbb9ea 1994
c18487ee
YR
1995static u8 bnx2x_link_test(struct bnx2x *bp)
1996{
1997 u8 rc;
a2fbb9ea 1998
4a37fb66 1999 bnx2x_acquire_phy_lock(bp);
c18487ee 2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2001 bnx2x_release_phy_lock(bp);
a2fbb9ea 2002
c18487ee
YR
2003 return rc;
2004}
a2fbb9ea 2005
34f80b04
EG
2006/* Calculates the sum of vn_min_rates.
2007 It's needed for further normalizing of the min_rates.
2008
2009 Returns:
2010 sum of vn_min_rates
2011 or
2012 0 - if all the min_rates are 0.
2013 In the later case fainess algorithm should be deactivated.
2014 If not all min_rates are zero then those that are zeroes will
2015 be set to 1.
2016 */
2017static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2018{
2019 int i, port = BP_PORT(bp);
2020 u32 wsum = 0;
2021 int all_zero = 1;
2022
2023 for (i = 0; i < E1HVN_MAX; i++) {
2024 u32 vn_cfg =
2025 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029 /* If min rate is zero - set it to 1 */
2030 if (!vn_min_rate)
2031 vn_min_rate = DEF_MIN_RATE;
2032 else
2033 all_zero = 0;
2034
2035 wsum += vn_min_rate;
2036 }
2037 }
2038
2039 /* ... only if all min rates are zeros - disable FAIRNESS */
2040 if (all_zero)
2041 return 0;
2042
2043 return wsum;
2044}
2045
2046static void bnx2x_init_port_minmax(struct bnx2x *bp,
2047 int en_fness,
2048 u16 port_rate,
2049 struct cmng_struct_per_port *m_cmng_port)
2050{
2051 u32 r_param = port_rate / 8;
2052 int port = BP_PORT(bp);
2053 int i;
2054
2055 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2056
2057 /* Enable minmax only if we are in e1hmf mode */
2058 if (IS_E1HMF(bp)) {
2059 u32 fair_periodic_timeout_usec;
2060 u32 t_fair;
2061
2062 /* Enable rate shaping and fairness */
2063 m_cmng_port->flags.cmng_vn_enable = 1;
2064 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065 m_cmng_port->flags.rate_shaping_enable = 1;
2066
2067 if (!en_fness)
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2070
2071 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072 m_cmng_port->rs_vars.rs_periodic_timeout =
2073 RS_PERIODIC_TIMEOUT_USEC / 4;
2074
2075 /* this is the threshold below which no timer arming will occur
2076 1.25 coefficient is for the threshold to be a little bigger
2077 than the real time, to compensate for timer in-accuracy */
2078 m_cmng_port->rs_vars.rs_threshold =
2079 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2080
2081 /* resolution of fairness timer */
2082 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084 t_fair = T_FAIR_COEF / port_rate;
2085
2086 /* this is the threshold below which we won't arm
2087 the timer anymore */
2088 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2089
2090 /* we multiply by 1e3/8 to get bytes/msec.
2091 We don't want the credits to pass a credit
2092 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093 m_cmng_port->fair_vars.upper_bound =
2094 r_param * t_fair * FAIR_MEM;
2095 /* since each tick is 4 usec */
2096 m_cmng_port->fair_vars.fairness_timeout =
2097 fair_periodic_timeout_usec / 4;
2098
2099 } else {
2100 /* Disable rate shaping and fairness */
2101 m_cmng_port->flags.cmng_vn_enable = 0;
2102 m_cmng_port->flags.fairness_enable = 0;
2103 m_cmng_port->flags.rate_shaping_enable = 0;
2104
2105 DP(NETIF_MSG_IFUP,
2106 "Single function mode minmax will be disabled\n");
2107 }
2108
2109 /* Store it to internal memory */
2110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113 ((u32 *)(m_cmng_port))[i]);
2114}
2115
2116static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117 u32 wsum, u16 port_rate,
2118 struct cmng_struct_per_port *m_cmng_port)
2119{
2120 struct rate_shaping_vars_per_vn m_rs_vn;
2121 struct fairness_vars_per_vn m_fair_vn;
2122 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123 u16 vn_min_rate, vn_max_rate;
2124 int i;
2125
2126 /* If function is hidden - set min and max to zeroes */
2127 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2128 vn_min_rate = 0;
2129 vn_max_rate = 0;
2130
2131 } else {
2132 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135 if current min rate is zero - set it to 1.
2136 This is a requirment of the algorithm. */
2137 if ((vn_min_rate == 0) && wsum)
2138 vn_min_rate = DEF_MIN_RATE;
2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2141 }
2142
2143 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2144 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2145
2146 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2148
2149 /* global vn counter - maximal Mbps for this vn */
2150 m_rs_vn.vn_counter.rate = vn_max_rate;
2151
2152 /* quota - number of bytes transmitted in this period */
2153 m_rs_vn.vn_counter.quota =
2154 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2155
2156#ifdef BNX2X_PER_PROT_QOS
2157 /* per protocol counter */
2158 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159 /* maximal Mbps for this protocol */
2160 m_rs_vn.protocol_counters[protocol].rate =
2161 protocol_max_rate[protocol];
2162 /* the quota in each timer period -
2163 number of bytes transmitted in this period */
2164 m_rs_vn.protocol_counters[protocol].quota =
2165 (u32)(rs_periodic_timeout_usec *
2166 ((double)m_rs_vn.
2167 protocol_counters[protocol].rate/8));
2168 }
2169#endif
2170
2171 if (wsum) {
2172 /* credit for each period of the fairness algorithm:
2173 number of bytes in T_FAIR (the vn share the port rate).
2174 wsum should not be larger than 10000, thus
2175 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176 m_fair_vn.vn_credit_delta =
2177 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180 m_fair_vn.vn_credit_delta);
2181 }
2182
2183#ifdef BNX2X_PER_PROT_QOS
2184 do {
2185 u32 protocolWeightSum = 0;
2186
2187 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188 protocolWeightSum +=
2189 drvInit.protocol_min_rate[protocol];
2190 /* per protocol counter -
2191 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192 if (protocolWeightSum > 0) {
2193 for (protocol = 0;
2194 protocol < NUM_OF_PROTOCOLS; protocol++)
2195 /* credit for each period of the
2196 fairness algorithm - number of bytes in
2197 T_FAIR (the protocol share the vn rate) */
2198 m_fair_vn.protocol_credit_delta[protocol] =
2199 (u32)((vn_min_rate / 8) * t_fair *
2200 protocol_min_rate / protocolWeightSum);
2201 }
2202 } while (0);
2203#endif
2204
2205 /* Store it to internal memory */
2206 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209 ((u32 *)(&m_rs_vn))[i]);
2210
2211 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214 ((u32 *)(&m_fair_vn))[i]);
2215}
2216
c18487ee
YR
2217/* This function is called upon link interrupt */
2218static void bnx2x_link_attn(struct bnx2x *bp)
2219{
34f80b04
EG
2220 int vn;
2221
bb2a0f7a
YG
2222 /* Make sure that we are synced with the current statistics */
2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2224
4a37fb66 2225 bnx2x_acquire_phy_lock(bp);
c18487ee 2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2227 bnx2x_release_phy_lock(bp);
a2fbb9ea 2228
bb2a0f7a
YG
2229 if (bp->link_vars.link_up) {
2230
2231 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232 struct host_port_stats *pstats;
2233
2234 pstats = bnx2x_sp(bp, port_stats);
2235 /* reset old bmac stats */
2236 memset(&(pstats->mac_stx[0]), 0,
2237 sizeof(struct mac_stx));
2238 }
2239 if ((bp->state == BNX2X_STATE_OPEN) ||
2240 (bp->state == BNX2X_STATE_DISABLED))
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242 }
2243
c18487ee
YR
2244 /* indicate link status */
2245 bnx2x_link_report(bp);
34f80b04
EG
2246
2247 if (IS_E1HMF(bp)) {
2248 int func;
2249
2250 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251 if (vn == BP_E1HVN(bp))
2252 continue;
2253
2254 func = ((vn << 1) | BP_PORT(bp));
2255
2256 /* Set the attention towards other drivers
2257 on the same port */
2258 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2260 }
2261 }
2262
2263 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264 struct cmng_struct_per_port m_cmng_port;
2265 u32 wsum;
2266 int port = BP_PORT(bp);
2267
2268 /* Init RATE SHAPING and FAIRNESS contexts */
2269 wsum = bnx2x_calc_vn_wsum(bp);
2270 bnx2x_init_port_minmax(bp, (int)wsum,
2271 bp->link_vars.line_speed,
2272 &m_cmng_port);
2273 if (IS_E1HMF(bp))
2274 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276 wsum, bp->link_vars.line_speed,
2277 &m_cmng_port);
2278 }
c18487ee 2279}
a2fbb9ea 2280
c18487ee
YR
2281static void bnx2x__link_status_update(struct bnx2x *bp)
2282{
2283 if (bp->state != BNX2X_STATE_OPEN)
2284 return;
a2fbb9ea 2285
c18487ee 2286 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2287
bb2a0f7a
YG
2288 if (bp->link_vars.link_up)
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290 else
2291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2292
c18487ee
YR
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
a2fbb9ea 2295}
a2fbb9ea 2296
34f80b04
EG
2297static void bnx2x_pmf_update(struct bnx2x *bp)
2298{
2299 int port = BP_PORT(bp);
2300 u32 val;
2301
2302 bp->port.pmf = 1;
2303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2304
2305 /* enable nig attention */
2306 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2309
2310 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2311}
2312
c18487ee 2313/* end of Link */
a2fbb9ea
ET
2314
2315/* slow path */
2316
2317/*
2318 * General service functions
2319 */
2320
2321/* the slow path queue is odd since completions arrive on the fastpath ring */
2322static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323 u32 data_hi, u32 data_lo, int common)
2324{
34f80b04 2325 int func = BP_FUNC(bp);
a2fbb9ea 2326
34f80b04
EG
2327 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2329 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2332
2333#ifdef BNX2X_STOP_ON_ERROR
2334 if (unlikely(bp->panic))
2335 return -EIO;
2336#endif
2337
34f80b04 2338 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2339
2340 if (!bp->spq_left) {
2341 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2342 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2343 bnx2x_panic();
2344 return -EBUSY;
2345 }
f1410647 2346
a2fbb9ea
ET
2347 /* CID needs port number to be encoded int it */
2348 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2350 HW_CID(bp, cid)));
2351 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2352 if (common)
2353 bp->spq_prod_bd->hdr.type |=
2354 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2355
2356 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2358
2359 bp->spq_left--;
2360
2361 if (bp->spq_prod_bd == bp->spq_last_bd) {
2362 bp->spq_prod_bd = bp->spq;
2363 bp->spq_prod_idx = 0;
2364 DP(NETIF_MSG_TIMER, "end of spq\n");
2365
2366 } else {
2367 bp->spq_prod_bd++;
2368 bp->spq_prod_idx++;
2369 }
2370
34f80b04 2371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2372 bp->spq_prod_idx);
2373
34f80b04 2374 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2375 return 0;
2376}
2377
2378/* acquire split MCP access lock register */
4a37fb66 2379static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2380{
a2fbb9ea 2381 u32 i, j, val;
34f80b04 2382 int rc = 0;
a2fbb9ea
ET
2383
2384 might_sleep();
2385 i = 100;
2386 for (j = 0; j < i*10; j++) {
2387 val = (1UL << 31);
2388 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390 if (val & (1L << 31))
2391 break;
2392
2393 msleep(5);
2394 }
a2fbb9ea 2395 if (!(val & (1L << 31))) {
19680c48 2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2397 rc = -EBUSY;
2398 }
2399
2400 return rc;
2401}
2402
4a37fb66
YG
2403/* release split MCP access lock register */
2404static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2405{
2406 u32 val = 0;
2407
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409}
2410
2411static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2412{
2413 struct host_def_status_block *def_sb = bp->def_status_blk;
2414 u16 rc = 0;
2415
2416 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2419 rc |= 1;
2420 }
2421 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2423 rc |= 2;
2424 }
2425 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2427 rc |= 4;
2428 }
2429 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2431 rc |= 8;
2432 }
2433 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435 rc |= 16;
2436 }
2437 return rc;
2438}
2439
2440/*
2441 * slow path service functions
2442 */
2443
2444static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2445{
34f80b04
EG
2446 int port = BP_PORT(bp);
2447 int func = BP_FUNC(bp);
2448 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
a2fbb9ea
ET
2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452 NIG_REG_MASK_INTERRUPT_PORT0;
a2fbb9ea
ET
2453
2454 if (~bp->aeu_mask & (asserted & 0xff))
2455 BNX2X_ERR("IGU ERROR\n");
2456 if (bp->attn_state & asserted)
2457 BNX2X_ERR("IGU ERROR\n");
2458
2459 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2460 bp->aeu_mask, asserted);
2461 bp->aeu_mask &= ~(asserted & 0xff);
2462 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2463
2464 REG_WR(bp, aeu_addr, bp->aeu_mask);
2465
2466 bp->attn_state |= asserted;
2467
2468 if (asserted & ATTN_HARD_WIRED_MASK) {
2469 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2470
877e9aa4
ET
2471 /* save nig interrupt mask */
2472 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2473 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2474
c18487ee 2475 bnx2x_link_attn(bp);
a2fbb9ea
ET
2476
2477 /* handle unicore attn? */
2478 }
2479 if (asserted & ATTN_SW_TIMER_4_FUNC)
2480 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2481
2482 if (asserted & GPIO_2_FUNC)
2483 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2484
2485 if (asserted & GPIO_3_FUNC)
2486 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2487
2488 if (asserted & GPIO_4_FUNC)
2489 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2490
2491 if (port == 0) {
2492 if (asserted & ATTN_GENERAL_ATTN_1) {
2493 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2494 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2495 }
2496 if (asserted & ATTN_GENERAL_ATTN_2) {
2497 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2498 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2499 }
2500 if (asserted & ATTN_GENERAL_ATTN_3) {
2501 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2502 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2503 }
2504 } else {
2505 if (asserted & ATTN_GENERAL_ATTN_4) {
2506 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2508 }
2509 if (asserted & ATTN_GENERAL_ATTN_5) {
2510 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2511 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2512 }
2513 if (asserted & ATTN_GENERAL_ATTN_6) {
2514 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2515 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2516 }
2517 }
2518
2519 } /* if hardwired */
2520
2521 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2522 asserted, BAR_IGU_INTMEM + igu_addr);
2523 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2524
2525 /* now set back the mask */
2526 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2527 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2528}
2529
877e9aa4 2530static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2531{
34f80b04 2532 int port = BP_PORT(bp);
877e9aa4
ET
2533 int reg_offset;
2534 u32 val;
2535
34f80b04
EG
2536 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2537 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2538
34f80b04 2539 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2540
2541 val = REG_RD(bp, reg_offset);
2542 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2543 REG_WR(bp, reg_offset, val);
2544
2545 BNX2X_ERR("SPIO5 hw attention\n");
2546
34f80b04 2547 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2548 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2549 /* Fan failure attention */
2550
2551 /* The PHY reset is controled by GPIO 1 */
2552 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2553 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2554 /* Low power mode is controled by GPIO 2 */
2555 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2556 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2557 /* mark the failure */
c18487ee 2558 bp->link_params.ext_phy_config &=
877e9aa4 2559 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2560 bp->link_params.ext_phy_config |=
877e9aa4
ET
2561 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2562 SHMEM_WR(bp,
2563 dev_info.port_hw_config[port].
2564 external_phy_config,
c18487ee 2565 bp->link_params.ext_phy_config);
877e9aa4
ET
2566 /* log the failure */
2567 printk(KERN_ERR PFX "Fan Failure on Network"
2568 " Controller %s has caused the driver to"
2569 " shutdown the card to prevent permanent"
2570 " damage. Please contact Dell Support for"
2571 " assistance\n", bp->dev->name);
2572 break;
2573
2574 default:
2575 break;
2576 }
2577 }
34f80b04
EG
2578
2579 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2580
2581 val = REG_RD(bp, reg_offset);
2582 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2583 REG_WR(bp, reg_offset, val);
2584
2585 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2586 (attn & HW_INTERRUT_ASSERT_SET_0));
2587 bnx2x_panic();
2588 }
877e9aa4
ET
2589}
2590
2591static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2592{
2593 u32 val;
2594
2595 if (attn & BNX2X_DOORQ_ASSERT) {
2596
2597 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2598 BNX2X_ERR("DB hw attention 0x%x\n", val);
2599 /* DORQ discard attention */
2600 if (val & 0x2)
2601 BNX2X_ERR("FATAL error from DORQ\n");
2602 }
34f80b04
EG
2603
2604 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2605
2606 int port = BP_PORT(bp);
2607 int reg_offset;
2608
2609 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2610 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2611
2612 val = REG_RD(bp, reg_offset);
2613 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2614 REG_WR(bp, reg_offset, val);
2615
2616 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2617 (attn & HW_INTERRUT_ASSERT_SET_1));
2618 bnx2x_panic();
2619 }
877e9aa4
ET
2620}
2621
2622static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2623{
2624 u32 val;
2625
2626 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2627
2628 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2629 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2630 /* CFC error attention */
2631 if (val & 0x2)
2632 BNX2X_ERR("FATAL error from CFC\n");
2633 }
2634
2635 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2636
2637 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2638 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2639 /* RQ_USDMDP_FIFO_OVERFLOW */
2640 if (val & 0x18000)
2641 BNX2X_ERR("FATAL error from PXP\n");
2642 }
34f80b04
EG
2643
2644 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2645
2646 int port = BP_PORT(bp);
2647 int reg_offset;
2648
2649 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2650 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2651
2652 val = REG_RD(bp, reg_offset);
2653 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2654 REG_WR(bp, reg_offset, val);
2655
2656 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2657 (attn & HW_INTERRUT_ASSERT_SET_2));
2658 bnx2x_panic();
2659 }
877e9aa4
ET
2660}
2661
2662static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2663{
34f80b04
EG
2664 u32 val;
2665
877e9aa4
ET
2666 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2667
34f80b04
EG
2668 if (attn & BNX2X_PMF_LINK_ASSERT) {
2669 int func = BP_FUNC(bp);
2670
2671 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2672 bnx2x__link_status_update(bp);
2673 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2674 DRV_STATUS_PMF)
2675 bnx2x_pmf_update(bp);
2676
2677 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2678
2679 BNX2X_ERR("MC assert!\n");
2680 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2681 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2682 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2684 bnx2x_panic();
2685
2686 } else if (attn & BNX2X_MCP_ASSERT) {
2687
2688 BNX2X_ERR("MCP assert!\n");
2689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2690 bnx2x_fw_dump(bp);
877e9aa4
ET
2691
2692 } else
2693 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2694 }
2695
2696 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2697 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2698 if (attn & BNX2X_GRC_TIMEOUT) {
2699 val = CHIP_IS_E1H(bp) ?
2700 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2701 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2702 }
2703 if (attn & BNX2X_GRC_RSV) {
2704 val = CHIP_IS_E1H(bp) ?
2705 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2706 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2707 }
877e9aa4 2708 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2709 }
2710}
2711
2712static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2713{
a2fbb9ea
ET
2714 struct attn_route attn;
2715 struct attn_route group_mask;
34f80b04 2716 int port = BP_PORT(bp);
877e9aa4 2717 int index;
a2fbb9ea
ET
2718 u32 reg_addr;
2719 u32 val;
2720
2721 /* need to take HW lock because MCP or other port might also
2722 try to handle this event */
4a37fb66 2723 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2724
2725 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2726 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2727 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2728 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2729 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2730 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2731
2732 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2733 if (deasserted & (1 << index)) {
2734 group_mask = bp->attn_group[index];
2735
34f80b04
EG
2736 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2737 index, group_mask.sig[0], group_mask.sig[1],
2738 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2739
877e9aa4
ET
2740 bnx2x_attn_int_deasserted3(bp,
2741 attn.sig[3] & group_mask.sig[3]);
2742 bnx2x_attn_int_deasserted1(bp,
2743 attn.sig[1] & group_mask.sig[1]);
2744 bnx2x_attn_int_deasserted2(bp,
2745 attn.sig[2] & group_mask.sig[2]);
2746 bnx2x_attn_int_deasserted0(bp,
2747 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2748
a2fbb9ea
ET
2749 if ((attn.sig[0] & group_mask.sig[0] &
2750 HW_PRTY_ASSERT_SET_0) ||
2751 (attn.sig[1] & group_mask.sig[1] &
2752 HW_PRTY_ASSERT_SET_1) ||
2753 (attn.sig[2] & group_mask.sig[2] &
2754 HW_PRTY_ASSERT_SET_2))
877e9aa4 2755 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2756 }
2757 }
2758
4a37fb66 2759 bnx2x_release_alr(bp);
a2fbb9ea 2760
34f80b04 2761 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
2762
2763 val = ~deasserted;
34f80b04 2764/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
a2fbb9ea
ET
2765 val, BAR_IGU_INTMEM + reg_addr); */
2766 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2767
2768 if (bp->aeu_mask & (deasserted & 0xff))
34f80b04 2769 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea 2770 if (~bp->attn_state & deasserted)
34f80b04 2771 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea
ET
2772
2773 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2774 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2775
2776 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2777 bp->aeu_mask |= (deasserted & 0xff);
2778
2779 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2780 REG_WR(bp, reg_addr, bp->aeu_mask);
2781
2782 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2783 bp->attn_state &= ~deasserted;
2784 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2785}
2786
2787static void bnx2x_attn_int(struct bnx2x *bp)
2788{
2789 /* read local copy of bits */
2790 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2791 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2792 u32 attn_state = bp->attn_state;
2793
2794 /* look for changed bits */
2795 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2796 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2797
2798 DP(NETIF_MSG_HW,
2799 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2800 attn_bits, attn_ack, asserted, deasserted);
2801
2802 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2803 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2804
2805 /* handle bits that were raised */
2806 if (asserted)
2807 bnx2x_attn_int_asserted(bp, asserted);
2808
2809 if (deasserted)
2810 bnx2x_attn_int_deasserted(bp, deasserted);
2811}
2812
2813static void bnx2x_sp_task(struct work_struct *work)
2814{
2815 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2816 u16 status;
2817
34f80b04 2818
a2fbb9ea
ET
2819 /* Return here if interrupt is disabled */
2820 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2821 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2822 return;
2823 }
2824
2825 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2826/* if (status == 0) */
2827/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2828
34f80b04 2829 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2830
877e9aa4
ET
2831 /* HW attentions */
2832 if (status & 0x1)
a2fbb9ea 2833 bnx2x_attn_int(bp);
a2fbb9ea 2834
bb2a0f7a
YG
2835 /* CStorm events: query_stats, port delete ramrod */
2836 if (status & 0x2)
2837 bp->stats_pending = 0;
2838
a2fbb9ea
ET
2839 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2840 IGU_INT_NOP, 1);
2841 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2842 IGU_INT_NOP, 1);
2843 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2844 IGU_INT_NOP, 1);
2845 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2846 IGU_INT_NOP, 1);
2847 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2848 IGU_INT_ENABLE, 1);
877e9aa4 2849
a2fbb9ea
ET
2850}
2851
2852static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2853{
2854 struct net_device *dev = dev_instance;
2855 struct bnx2x *bp = netdev_priv(dev);
2856
2857 /* Return here if interrupt is disabled */
2858 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2859 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2860 return IRQ_HANDLED;
2861 }
2862
877e9aa4 2863 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2864
2865#ifdef BNX2X_STOP_ON_ERROR
2866 if (unlikely(bp->panic))
2867 return IRQ_HANDLED;
2868#endif
2869
2870 schedule_work(&bp->sp_task);
2871
2872 return IRQ_HANDLED;
2873}
2874
2875/* end of slow path */
2876
2877/* Statistics */
2878
2879/****************************************************************************
2880* Macros
2881****************************************************************************/
2882
a2fbb9ea
ET
2883/* sum[hi:lo] += add[hi:lo] */
2884#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2885 do { \
2886 s_lo += a_lo; \
2887 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2888 } while (0)
2889
2890/* difference = minuend - subtrahend */
2891#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2892 do { \
bb2a0f7a
YG
2893 if (m_lo < s_lo) { \
2894 /* underflow */ \
a2fbb9ea 2895 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2896 if (d_hi > 0) { \
2897 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2898 d_hi--; \
2899 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2900 } else { \
2901 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2902 d_hi = 0; \
2903 d_lo = 0; \
2904 } \
bb2a0f7a
YG
2905 } else { \
2906 /* m_lo >= s_lo */ \
a2fbb9ea 2907 if (m_hi < s_hi) { \
bb2a0f7a
YG
2908 d_hi = 0; \
2909 d_lo = 0; \
2910 } else { \
2911 /* m_hi >= s_hi */ \
2912 d_hi = m_hi - s_hi; \
2913 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2914 } \
2915 } \
2916 } while (0)
2917
bb2a0f7a 2918#define UPDATE_STAT64(s, t) \
a2fbb9ea 2919 do { \
bb2a0f7a
YG
2920 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2921 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2922 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2923 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2924 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2925 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2926 } while (0)
2927
bb2a0f7a 2928#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2929 do { \
bb2a0f7a
YG
2930 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2931 diff.lo, new->s##_lo, old->s##_lo); \
2932 ADD_64(estats->t##_hi, diff.hi, \
2933 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2934 } while (0)
2935
2936/* sum[hi:lo] += add */
2937#define ADD_EXTEND_64(s_hi, s_lo, a) \
2938 do { \
2939 s_lo += a; \
2940 s_hi += (s_lo < a) ? 1 : 0; \
2941 } while (0)
2942
bb2a0f7a 2943#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2944 do { \
bb2a0f7a
YG
2945 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2946 pstats->mac_stx[1].s##_lo, \
2947 new->s); \
a2fbb9ea
ET
2948 } while (0)
2949
bb2a0f7a 2950#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2951 do { \
2952 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2953 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2954 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2955 } while (0)
2956
2957#define UPDATE_EXTEND_XSTAT(s, t) \
2958 do { \
2959 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2960 old_xclient->s = le32_to_cpu(xclient->s); \
2961 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2962 } while (0)
2963
2964/*
2965 * General service functions
2966 */
2967
2968static inline long bnx2x_hilo(u32 *hiref)
2969{
2970 u32 lo = *(hiref + 1);
2971#if (BITS_PER_LONG == 64)
2972 u32 hi = *hiref;
2973
2974 return HILO_U64(hi, lo);
2975#else
2976 return lo;
2977#endif
2978}
2979
2980/*
2981 * Init service functions
2982 */
2983
bb2a0f7a
YG
2984static void bnx2x_storm_stats_post(struct bnx2x *bp)
2985{
2986 if (!bp->stats_pending) {
2987 struct eth_query_ramrod_data ramrod_data = {0};
2988 int rc;
2989
2990 ramrod_data.drv_counter = bp->stats_counter++;
2991 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2992 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2993
2994 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2995 ((u32 *)&ramrod_data)[1],
2996 ((u32 *)&ramrod_data)[0], 0);
2997 if (rc == 0) {
2998 /* stats ramrod has it's own slot on the spq */
2999 bp->spq_left++;
3000 bp->stats_pending = 1;
3001 }
3002 }
3003}
3004
3005static void bnx2x_stats_init(struct bnx2x *bp)
3006{
3007 int port = BP_PORT(bp);
3008
3009 bp->executer_idx = 0;
3010 bp->stats_counter = 0;
3011
3012 /* port stats */
3013 if (!BP_NOMCP(bp))
3014 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3015 else
3016 bp->port.port_stx = 0;
3017 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3018
3019 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3020 bp->port.old_nig_stats.brb_discard =
3021 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3022 bp->port.old_nig_stats.brb_truncate =
3023 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3024 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3025 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3026 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3027 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3028
3029 /* function stats */
3030 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3031 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3032 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3033 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3034
3035 bp->stats_state = STATS_STATE_DISABLED;
3036 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3037 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3038}
3039
3040static void bnx2x_hw_stats_post(struct bnx2x *bp)
3041{
3042 struct dmae_command *dmae = &bp->stats_dmae;
3043 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3044
3045 *stats_comp = DMAE_COMP_VAL;
3046
3047 /* loader */
3048 if (bp->executer_idx) {
3049 int loader_idx = PMF_DMAE_C(bp);
3050
3051 memset(dmae, 0, sizeof(struct dmae_command));
3052
3053 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3054 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3055 DMAE_CMD_DST_RESET |
3056#ifdef __BIG_ENDIAN
3057 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3058#else
3059 DMAE_CMD_ENDIANITY_DW_SWAP |
3060#endif
3061 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3062 DMAE_CMD_PORT_0) |
3063 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3064 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3065 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3066 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3067 sizeof(struct dmae_command) *
3068 (loader_idx + 1)) >> 2;
3069 dmae->dst_addr_hi = 0;
3070 dmae->len = sizeof(struct dmae_command) >> 2;
3071 if (CHIP_IS_E1(bp))
3072 dmae->len--;
3073 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3074 dmae->comp_addr_hi = 0;
3075 dmae->comp_val = 1;
3076
3077 *stats_comp = 0;
3078 bnx2x_post_dmae(bp, dmae, loader_idx);
3079
3080 } else if (bp->func_stx) {
3081 *stats_comp = 0;
3082 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3083 }
3084}
3085
3086static int bnx2x_stats_comp(struct bnx2x *bp)
3087{
3088 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3089 int cnt = 10;
3090
3091 might_sleep();
3092 while (*stats_comp != DMAE_COMP_VAL) {
3093 msleep(1);
3094 if (!cnt) {
3095 BNX2X_ERR("timeout waiting for stats finished\n");
3096 break;
3097 }
3098 cnt--;
3099 }
3100 return 1;
3101}
3102
3103/*
3104 * Statistics service functions
3105 */
3106
3107static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3108{
3109 struct dmae_command *dmae;
3110 u32 opcode;
3111 int loader_idx = PMF_DMAE_C(bp);
3112 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3113
3114 /* sanity */
3115 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3116 BNX2X_ERR("BUG!\n");
3117 return;
3118 }
3119
3120 bp->executer_idx = 0;
3121
3122 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3123 DMAE_CMD_C_ENABLE |
3124 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3125#ifdef __BIG_ENDIAN
3126 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3127#else
3128 DMAE_CMD_ENDIANITY_DW_SWAP |
3129#endif
3130 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3131 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3132
3133 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3134 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3135 dmae->src_addr_lo = bp->port.port_stx >> 2;
3136 dmae->src_addr_hi = 0;
3137 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3138 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3139 dmae->len = DMAE_LEN32_RD_MAX;
3140 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3141 dmae->comp_addr_hi = 0;
3142 dmae->comp_val = 1;
3143
3144 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3145 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3146 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3147 dmae->src_addr_hi = 0;
7a9b2557
VZ
3148 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3149 DMAE_LEN32_RD_MAX * 4);
3150 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3151 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3152 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3153 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3154 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3155 dmae->comp_val = DMAE_COMP_VAL;
3156
3157 *stats_comp = 0;
3158 bnx2x_hw_stats_post(bp);
3159 bnx2x_stats_comp(bp);
3160}
3161
3162static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3163{
3164 struct dmae_command *dmae;
34f80b04 3165 int port = BP_PORT(bp);
bb2a0f7a 3166 int vn = BP_E1HVN(bp);
a2fbb9ea 3167 u32 opcode;
bb2a0f7a 3168 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3169 u32 mac_addr;
bb2a0f7a
YG
3170 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3171
3172 /* sanity */
3173 if (!bp->link_vars.link_up || !bp->port.pmf) {
3174 BNX2X_ERR("BUG!\n");
3175 return;
3176 }
a2fbb9ea
ET
3177
3178 bp->executer_idx = 0;
bb2a0f7a
YG
3179
3180 /* MCP */
3181 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3182 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3183 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3184#ifdef __BIG_ENDIAN
bb2a0f7a 3185 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3186#else
bb2a0f7a 3187 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3188#endif
bb2a0f7a
YG
3189 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3190 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3191
bb2a0f7a 3192 if (bp->port.port_stx) {
a2fbb9ea
ET
3193
3194 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3195 dmae->opcode = opcode;
bb2a0f7a
YG
3196 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3197 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3198 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3199 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3200 dmae->len = sizeof(struct host_port_stats) >> 2;
3201 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3202 dmae->comp_addr_hi = 0;
3203 dmae->comp_val = 1;
a2fbb9ea
ET
3204 }
3205
bb2a0f7a
YG
3206 if (bp->func_stx) {
3207
3208 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3209 dmae->opcode = opcode;
3210 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3211 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3212 dmae->dst_addr_lo = bp->func_stx >> 2;
3213 dmae->dst_addr_hi = 0;
3214 dmae->len = sizeof(struct host_func_stats) >> 2;
3215 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3216 dmae->comp_addr_hi = 0;
3217 dmae->comp_val = 1;
a2fbb9ea
ET
3218 }
3219
bb2a0f7a 3220 /* MAC */
a2fbb9ea
ET
3221 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3222 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3224#ifdef __BIG_ENDIAN
3225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3226#else
3227 DMAE_CMD_ENDIANITY_DW_SWAP |
3228#endif
bb2a0f7a
YG
3229 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3230 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3231
c18487ee 3232 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3233
3234 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3235 NIG_REG_INGRESS_BMAC0_MEM);
3236
3237 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3238 BIGMAC_REGISTER_TX_STAT_GTBYT */
3239 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3240 dmae->opcode = opcode;
3241 dmae->src_addr_lo = (mac_addr +
3242 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3243 dmae->src_addr_hi = 0;
3244 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3245 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3246 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3247 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3248 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3249 dmae->comp_addr_hi = 0;
3250 dmae->comp_val = 1;
3251
3252 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3253 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3254 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3255 dmae->opcode = opcode;
3256 dmae->src_addr_lo = (mac_addr +
3257 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3258 dmae->src_addr_hi = 0;
3259 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3260 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3261 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3262 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3263 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3264 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3265 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3266 dmae->comp_addr_hi = 0;
3267 dmae->comp_val = 1;
3268
c18487ee 3269 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3270
3271 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3272
3273 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3275 dmae->opcode = opcode;
3276 dmae->src_addr_lo = (mac_addr +
3277 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3278 dmae->src_addr_hi = 0;
3279 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3280 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3281 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3282 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3283 dmae->comp_addr_hi = 0;
3284 dmae->comp_val = 1;
3285
3286 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3287 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3288 dmae->opcode = opcode;
3289 dmae->src_addr_lo = (mac_addr +
3290 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3291 dmae->src_addr_hi = 0;
3292 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3293 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3294 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3295 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3296 dmae->len = 1;
3297 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3298 dmae->comp_addr_hi = 0;
3299 dmae->comp_val = 1;
3300
3301 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3302 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3303 dmae->opcode = opcode;
3304 dmae->src_addr_lo = (mac_addr +
3305 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3306 dmae->src_addr_hi = 0;
3307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3308 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3309 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3310 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3311 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313 dmae->comp_addr_hi = 0;
3314 dmae->comp_val = 1;
3315 }
3316
3317 /* NIG */
bb2a0f7a
YG
3318 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3319 dmae->opcode = opcode;
3320 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3321 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3322 dmae->src_addr_hi = 0;
3323 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3324 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3325 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3326 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3327 dmae->comp_addr_hi = 0;
3328 dmae->comp_val = 1;
3329
3330 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3331 dmae->opcode = opcode;
3332 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3333 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3334 dmae->src_addr_hi = 0;
3335 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3336 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3337 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3338 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3339 dmae->len = (2*sizeof(u32)) >> 2;
3340 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3341 dmae->comp_addr_hi = 0;
3342 dmae->comp_val = 1;
3343
a2fbb9ea
ET
3344 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3345 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3346 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3347 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3348#ifdef __BIG_ENDIAN
3349 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3350#else
3351 DMAE_CMD_ENDIANITY_DW_SWAP |
3352#endif
bb2a0f7a
YG
3353 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3354 (vn << DMAE_CMD_E1HVN_SHIFT));
3355 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3356 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3357 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3358 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3359 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3360 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3361 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3362 dmae->len = (2*sizeof(u32)) >> 2;
3363 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3364 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3365 dmae->comp_val = DMAE_COMP_VAL;
3366
3367 *stats_comp = 0;
a2fbb9ea
ET
3368}
3369
bb2a0f7a 3370static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3371{
bb2a0f7a
YG
3372 struct dmae_command *dmae = &bp->stats_dmae;
3373 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3374
bb2a0f7a
YG
3375 /* sanity */
3376 if (!bp->func_stx) {
3377 BNX2X_ERR("BUG!\n");
3378 return;
3379 }
a2fbb9ea 3380
bb2a0f7a
YG
3381 bp->executer_idx = 0;
3382 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3383
bb2a0f7a
YG
3384 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3385 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3386 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3387#ifdef __BIG_ENDIAN
3388 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3389#else
3390 DMAE_CMD_ENDIANITY_DW_SWAP |
3391#endif
3392 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3393 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3394 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3395 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3396 dmae->dst_addr_lo = bp->func_stx >> 2;
3397 dmae->dst_addr_hi = 0;
3398 dmae->len = sizeof(struct host_func_stats) >> 2;
3399 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3400 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3401 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3402
bb2a0f7a
YG
3403 *stats_comp = 0;
3404}
a2fbb9ea 3405
bb2a0f7a
YG
3406static void bnx2x_stats_start(struct bnx2x *bp)
3407{
3408 if (bp->port.pmf)
3409 bnx2x_port_stats_init(bp);
3410
3411 else if (bp->func_stx)
3412 bnx2x_func_stats_init(bp);
3413
3414 bnx2x_hw_stats_post(bp);
3415 bnx2x_storm_stats_post(bp);
3416}
3417
3418static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3419{
3420 bnx2x_stats_comp(bp);
3421 bnx2x_stats_pmf_update(bp);
3422 bnx2x_stats_start(bp);
3423}
3424
3425static void bnx2x_stats_restart(struct bnx2x *bp)
3426{
3427 bnx2x_stats_comp(bp);
3428 bnx2x_stats_start(bp);
3429}
3430
3431static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3432{
3433 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3434 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3435 struct regpair diff;
3436
3437 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3438 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3439 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3440 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3441 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3442 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3443 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3444 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3445 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3446 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3447 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3448 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3449 UPDATE_STAT64(tx_stat_gt127,
3450 tx_stat_etherstatspkts65octetsto127octets);
3451 UPDATE_STAT64(tx_stat_gt255,
3452 tx_stat_etherstatspkts128octetsto255octets);
3453 UPDATE_STAT64(tx_stat_gt511,
3454 tx_stat_etherstatspkts256octetsto511octets);
3455 UPDATE_STAT64(tx_stat_gt1023,
3456 tx_stat_etherstatspkts512octetsto1023octets);
3457 UPDATE_STAT64(tx_stat_gt1518,
3458 tx_stat_etherstatspkts1024octetsto1522octets);
3459 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3460 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3461 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3462 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3463 UPDATE_STAT64(tx_stat_gterr,
3464 tx_stat_dot3statsinternalmactransmiterrors);
3465 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3466}
3467
3468static void bnx2x_emac_stats_update(struct bnx2x *bp)
3469{
3470 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3471 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3472
3473 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3474 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3475 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3476 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3477 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3478 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3479 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3480 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3481 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3482 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3483 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3484 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3485 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3486 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3487 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3488 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3489 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3490 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3491 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3492 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3493 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3494 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3495 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3496 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3497 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3498 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3499 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3500 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3501 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3502 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3504}
3505
3506static int bnx2x_hw_stats_update(struct bnx2x *bp)
3507{
3508 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3509 struct nig_stats *old = &(bp->port.old_nig_stats);
3510 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3511 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3512 struct regpair diff;
3513
3514 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3515 bnx2x_bmac_stats_update(bp);
3516
3517 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3518 bnx2x_emac_stats_update(bp);
3519
3520 else { /* unreached */
3521 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3522 return -1;
3523 }
a2fbb9ea 3524
bb2a0f7a
YG
3525 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3526 new->brb_discard - old->brb_discard);
66e855f3
YG
3527 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3528 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3529
bb2a0f7a
YG
3530 UPDATE_STAT64_NIG(egress_mac_pkt0,
3531 etherstatspkts1024octetsto1522octets);
3532 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3533
bb2a0f7a 3534 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3535
bb2a0f7a
YG
3536 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3537 sizeof(struct mac_stx));
3538 estats->brb_drop_hi = pstats->brb_drop_hi;
3539 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3540
bb2a0f7a 3541 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3542
bb2a0f7a 3543 return 0;
a2fbb9ea
ET
3544}
3545
bb2a0f7a 3546static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3547{
3548 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3549 int cl_id = BP_CL_ID(bp);
3550 struct tstorm_per_port_stats *tport =
3551 &stats->tstorm_common.port_statistics;
a2fbb9ea 3552 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3553 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3554 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3555 struct xstorm_per_client_stats *xclient =
3556 &stats->xstorm_common.client_statistics[cl_id];
3557 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3558 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3559 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3560 u32 diff;
3561
bb2a0f7a
YG
3562 /* are storm stats valid? */
3563 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3564 bp->stats_counter) {
3565 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3566 " tstorm counter (%d) != stats_counter (%d)\n",
3567 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3568 return -1;
3569 }
bb2a0f7a
YG
3570 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3571 bp->stats_counter) {
3572 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3573 " xstorm counter (%d) != stats_counter (%d)\n",
3574 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3575 return -2;
3576 }
a2fbb9ea 3577
bb2a0f7a
YG
3578 fstats->total_bytes_received_hi =
3579 fstats->valid_bytes_received_hi =
a2fbb9ea 3580 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3581 fstats->total_bytes_received_lo =
3582 fstats->valid_bytes_received_lo =
a2fbb9ea 3583 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3584
3585 estats->error_bytes_received_hi =
3586 le32_to_cpu(tclient->rcv_error_bytes.hi);
3587 estats->error_bytes_received_lo =
3588 le32_to_cpu(tclient->rcv_error_bytes.lo);
3589 ADD_64(estats->error_bytes_received_hi,
3590 estats->rx_stat_ifhcinbadoctets_hi,
3591 estats->error_bytes_received_lo,
3592 estats->rx_stat_ifhcinbadoctets_lo);
3593
3594 ADD_64(fstats->total_bytes_received_hi,
3595 estats->error_bytes_received_hi,
3596 fstats->total_bytes_received_lo,
3597 estats->error_bytes_received_lo);
3598
3599 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3600 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3601 total_multicast_packets_received);
a2fbb9ea 3602 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3603 total_broadcast_packets_received);
3604
3605 fstats->total_bytes_transmitted_hi =
3606 le32_to_cpu(xclient->total_sent_bytes.hi);
3607 fstats->total_bytes_transmitted_lo =
3608 le32_to_cpu(xclient->total_sent_bytes.lo);
3609
3610 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3611 total_unicast_packets_transmitted);
3612 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3613 total_multicast_packets_transmitted);
3614 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3615 total_broadcast_packets_transmitted);
3616
3617 memcpy(estats, &(fstats->total_bytes_received_hi),
3618 sizeof(struct host_func_stats) - 2*sizeof(u32));
3619
3620 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3621 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3622 estats->brb_truncate_discard =
3623 le32_to_cpu(tport->brb_truncate_discard);
3624 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3625
3626 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3627 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3628 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3629 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3630 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3631 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3632 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3633 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3634 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3635 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3636 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3637 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3638 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3639
bb2a0f7a
YG
3640 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3641 old_tclient->packets_too_big_discard =
a2fbb9ea 3642 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3643 estats->no_buff_discard =
3644 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3645 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3646
3647 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3648 old_xclient->unicast_bytes_sent.hi =
3649 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3650 old_xclient->unicast_bytes_sent.lo =
3651 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3652 old_xclient->multicast_bytes_sent.hi =
3653 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3654 old_xclient->multicast_bytes_sent.lo =
3655 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3656 old_xclient->broadcast_bytes_sent.hi =
3657 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3658 old_xclient->broadcast_bytes_sent.lo =
3659 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3660
3661 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3662
3663 return 0;
3664}
3665
bb2a0f7a 3666static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3667{
bb2a0f7a
YG
3668 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3669 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3670 struct net_device_stats *nstats = &bp->dev->stats;
3671
3672 nstats->rx_packets =
3673 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3674 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3675 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3676
3677 nstats->tx_packets =
3678 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3679 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3680 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3681
bb2a0f7a 3682 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3683
0e39e645 3684 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3685
bb2a0f7a
YG
3686 nstats->rx_dropped = old_tclient->checksum_discard +
3687 estats->mac_discard;
a2fbb9ea
ET
3688 nstats->tx_dropped = 0;
3689
3690 nstats->multicast =
3691 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3692
bb2a0f7a
YG
3693 nstats->collisions =
3694 estats->tx_stat_dot3statssinglecollisionframes_lo +
3695 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3696 estats->tx_stat_dot3statslatecollisions_lo +
3697 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3698
bb2a0f7a
YG
3699 estats->jabber_packets_received =
3700 old_tclient->packets_too_big_discard +
3701 estats->rx_stat_dot3statsframestoolong_lo;
3702
3703 nstats->rx_length_errors =
3704 estats->rx_stat_etherstatsundersizepkts_lo +
3705 estats->jabber_packets_received;
66e855f3 3706 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3707 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3708 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3709 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3710 nstats->rx_missed_errors = estats->xxoverflow_discard;
3711
3712 nstats->rx_errors = nstats->rx_length_errors +
3713 nstats->rx_over_errors +
3714 nstats->rx_crc_errors +
3715 nstats->rx_frame_errors +
0e39e645
ET
3716 nstats->rx_fifo_errors +
3717 nstats->rx_missed_errors;
a2fbb9ea 3718
bb2a0f7a
YG
3719 nstats->tx_aborted_errors =
3720 estats->tx_stat_dot3statslatecollisions_lo +
3721 estats->tx_stat_dot3statsexcessivecollisions_lo;
3722 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3723 nstats->tx_fifo_errors = 0;
3724 nstats->tx_heartbeat_errors = 0;
3725 nstats->tx_window_errors = 0;
3726
3727 nstats->tx_errors = nstats->tx_aborted_errors +
3728 nstats->tx_carrier_errors;
a2fbb9ea
ET
3729}
3730
bb2a0f7a 3731static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3732{
bb2a0f7a
YG
3733 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3734 int update = 0;
a2fbb9ea 3735
bb2a0f7a
YG
3736 if (*stats_comp != DMAE_COMP_VAL)
3737 return;
3738
3739 if (bp->port.pmf)
3740 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3741
bb2a0f7a 3742 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3743
bb2a0f7a
YG
3744 if (update)
3745 bnx2x_net_stats_update(bp);
a2fbb9ea 3746
bb2a0f7a
YG
3747 else {
3748 if (bp->stats_pending) {
3749 bp->stats_pending++;
3750 if (bp->stats_pending == 3) {
3751 BNX2X_ERR("stats not updated for 3 times\n");
3752 bnx2x_panic();
3753 return;
3754 }
3755 }
a2fbb9ea
ET
3756 }
3757
3758 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3759 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3760 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3761 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3762 int i;
a2fbb9ea
ET
3763
3764 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3765 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3766 " tx pkt (%lx)\n",
3767 bnx2x_tx_avail(bp->fp),
7a9b2557 3768 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3769 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3770 " rx pkt (%lx)\n",
7a9b2557
VZ
3771 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3772 bp->fp->rx_comp_cons),
3773 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea
ET
3774 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3775 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3776 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3777 printk(KERN_DEBUG "tstats: checksum_discard %u "
3778 "packets_too_big_discard %u no_buff_discard %u "
3779 "mac_discard %u mac_filter_discard %u "
3780 "xxovrflow_discard %u brb_truncate_discard %u "
3781 "ttl0_discard %u\n",
bb2a0f7a
YG
3782 old_tclient->checksum_discard,
3783 old_tclient->packets_too_big_discard,
3784 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3785 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3786 estats->brb_truncate_discard,
3787 old_tclient->ttl0_discard);
a2fbb9ea
ET
3788
3789 for_each_queue(bp, i) {
3790 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3791 bnx2x_fp(bp, i, tx_pkt),
3792 bnx2x_fp(bp, i, rx_pkt),
3793 bnx2x_fp(bp, i, rx_calls));
3794 }
3795 }
3796
bb2a0f7a
YG
3797 bnx2x_hw_stats_post(bp);
3798 bnx2x_storm_stats_post(bp);
3799}
a2fbb9ea 3800
bb2a0f7a
YG
3801static void bnx2x_port_stats_stop(struct bnx2x *bp)
3802{
3803 struct dmae_command *dmae;
3804 u32 opcode;
3805 int loader_idx = PMF_DMAE_C(bp);
3806 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3807
bb2a0f7a 3808 bp->executer_idx = 0;
a2fbb9ea 3809
bb2a0f7a
YG
3810 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3811 DMAE_CMD_C_ENABLE |
3812 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3813#ifdef __BIG_ENDIAN
bb2a0f7a 3814 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3815#else
bb2a0f7a 3816 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3817#endif
bb2a0f7a
YG
3818 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3819 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3820
3821 if (bp->port.port_stx) {
3822
3823 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3824 if (bp->func_stx)
3825 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3826 else
3827 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3828 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3829 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3830 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3831 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3832 dmae->len = sizeof(struct host_port_stats) >> 2;
3833 if (bp->func_stx) {
3834 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3835 dmae->comp_addr_hi = 0;
3836 dmae->comp_val = 1;
3837 } else {
3838 dmae->comp_addr_lo =
3839 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3840 dmae->comp_addr_hi =
3841 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3842 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3843
bb2a0f7a
YG
3844 *stats_comp = 0;
3845 }
a2fbb9ea
ET
3846 }
3847
bb2a0f7a
YG
3848 if (bp->func_stx) {
3849
3850 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3851 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3852 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3853 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3854 dmae->dst_addr_lo = bp->func_stx >> 2;
3855 dmae->dst_addr_hi = 0;
3856 dmae->len = sizeof(struct host_func_stats) >> 2;
3857 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3858 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3859 dmae->comp_val = DMAE_COMP_VAL;
3860
3861 *stats_comp = 0;
a2fbb9ea 3862 }
bb2a0f7a
YG
3863}
3864
3865static void bnx2x_stats_stop(struct bnx2x *bp)
3866{
3867 int update = 0;
3868
3869 bnx2x_stats_comp(bp);
3870
3871 if (bp->port.pmf)
3872 update = (bnx2x_hw_stats_update(bp) == 0);
3873
3874 update |= (bnx2x_storm_stats_update(bp) == 0);
3875
3876 if (update) {
3877 bnx2x_net_stats_update(bp);
a2fbb9ea 3878
bb2a0f7a
YG
3879 if (bp->port.pmf)
3880 bnx2x_port_stats_stop(bp);
3881
3882 bnx2x_hw_stats_post(bp);
3883 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3884 }
3885}
3886
bb2a0f7a
YG
3887static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3888{
3889}
3890
3891static const struct {
3892 void (*action)(struct bnx2x *bp);
3893 enum bnx2x_stats_state next_state;
3894} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3895/* state event */
3896{
3897/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3898/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3899/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3900/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3901},
3902{
3903/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3904/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3905/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3906/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3907}
3908};
3909
3910static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3911{
3912 enum bnx2x_stats_state state = bp->stats_state;
3913
3914 bnx2x_stats_stm[state][event].action(bp);
3915 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3916
3917 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3918 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3919 state, event, bp->stats_state);
3920}
3921
a2fbb9ea
ET
3922static void bnx2x_timer(unsigned long data)
3923{
3924 struct bnx2x *bp = (struct bnx2x *) data;
3925
3926 if (!netif_running(bp->dev))
3927 return;
3928
3929 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3930 goto timer_restart;
a2fbb9ea
ET
3931
3932 if (poll) {
3933 struct bnx2x_fastpath *fp = &bp->fp[0];
3934 int rc;
3935
3936 bnx2x_tx_int(fp, 1000);
3937 rc = bnx2x_rx_int(fp, 1000);
3938 }
3939
34f80b04
EG
3940 if (!BP_NOMCP(bp)) {
3941 int func = BP_FUNC(bp);
a2fbb9ea
ET
3942 u32 drv_pulse;
3943 u32 mcp_pulse;
3944
3945 ++bp->fw_drv_pulse_wr_seq;
3946 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3947 /* TBD - add SYSTEM_TIME */
3948 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3949 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3950
34f80b04 3951 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3952 MCP_PULSE_SEQ_MASK);
3953 /* The delta between driver pulse and mcp response
3954 * should be 1 (before mcp response) or 0 (after mcp response)
3955 */
3956 if ((drv_pulse != mcp_pulse) &&
3957 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3958 /* someone lost a heartbeat... */
3959 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3960 drv_pulse, mcp_pulse);
3961 }
3962 }
3963
bb2a0f7a
YG
3964 if ((bp->state == BNX2X_STATE_OPEN) ||
3965 (bp->state == BNX2X_STATE_DISABLED))
3966 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3967
f1410647 3968timer_restart:
a2fbb9ea
ET
3969 mod_timer(&bp->timer, jiffies + bp->current_interval);
3970}
3971
3972/* end of Statistics */
3973
3974/* nic init */
3975
3976/*
3977 * nic init service functions
3978 */
3979
34f80b04 3980static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3981{
34f80b04
EG
3982 int port = BP_PORT(bp);
3983
3984 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3985 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3986 sizeof(struct ustorm_def_status_block)/4);
3987 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3988 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3989 sizeof(struct cstorm_def_status_block)/4);
3990}
3991
3992static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
3993 struct host_status_block *sb, dma_addr_t mapping)
3994{
3995 int port = BP_PORT(bp);
bb2a0f7a 3996 int func = BP_FUNC(bp);
a2fbb9ea 3997 int index;
34f80b04 3998 u64 section;
a2fbb9ea
ET
3999
4000 /* USTORM */
4001 section = ((u64)mapping) + offsetof(struct host_status_block,
4002 u_status_block);
34f80b04 4003 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4004
4005 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4006 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4007 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4008 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4009 U64_HI(section));
bb2a0f7a
YG
4010 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4011 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4012
4013 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4014 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4015 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4016
4017 /* CSTORM */
4018 section = ((u64)mapping) + offsetof(struct host_status_block,
4019 c_status_block);
34f80b04 4020 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4021
4022 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4023 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4024 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4025 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4026 U64_HI(section));
7a9b2557
VZ
4027 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4028 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4029
4030 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4031 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4032 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4033
4034 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4035}
4036
4037static void bnx2x_zero_def_sb(struct bnx2x *bp)
4038{
4039 int func = BP_FUNC(bp);
a2fbb9ea 4040
34f80b04
EG
4041 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4042 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4043 sizeof(struct ustorm_def_status_block)/4);
4044 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4045 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4046 sizeof(struct cstorm_def_status_block)/4);
4047 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4048 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4049 sizeof(struct xstorm_def_status_block)/4);
4050 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4051 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4053}
4054
4055static void bnx2x_init_def_sb(struct bnx2x *bp,
4056 struct host_def_status_block *def_sb,
34f80b04 4057 dma_addr_t mapping, int sb_id)
a2fbb9ea 4058{
34f80b04
EG
4059 int port = BP_PORT(bp);
4060 int func = BP_FUNC(bp);
a2fbb9ea
ET
4061 int index, val, reg_offset;
4062 u64 section;
4063
4064 /* ATTN */
4065 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4066 atten_status_block);
34f80b04 4067 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4068
49d66772
ET
4069 bp->def_att_idx = 0;
4070 bp->attn_state = 0;
4071
a2fbb9ea
ET
4072 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4073 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4074
34f80b04 4075 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4076 bp->attn_group[index].sig[0] = REG_RD(bp,
4077 reg_offset + 0x10*index);
4078 bp->attn_group[index].sig[1] = REG_RD(bp,
4079 reg_offset + 0x4 + 0x10*index);
4080 bp->attn_group[index].sig[2] = REG_RD(bp,
4081 reg_offset + 0x8 + 0x10*index);
4082 bp->attn_group[index].sig[3] = REG_RD(bp,
4083 reg_offset + 0xc + 0x10*index);
4084 }
4085
4086 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4087 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4088
4089 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4090 HC_REG_ATTN_MSG0_ADDR_L);
4091
4092 REG_WR(bp, reg_offset, U64_LO(section));
4093 REG_WR(bp, reg_offset + 4, U64_HI(section));
4094
4095 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4096
4097 val = REG_RD(bp, reg_offset);
34f80b04 4098 val |= sb_id;
a2fbb9ea
ET
4099 REG_WR(bp, reg_offset, val);
4100
4101 /* USTORM */
4102 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4103 u_def_status_block);
34f80b04 4104 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4105
49d66772
ET
4106 bp->def_u_idx = 0;
4107
a2fbb9ea 4108 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4109 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4110 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4111 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4112 U64_HI(section));
34f80b04
EG
4113 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4114 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4115 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4116 BNX2X_BTR);
4117
4118 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4119 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4120 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4121
4122 /* CSTORM */
4123 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4124 c_def_status_block);
34f80b04 4125 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea 4126
49d66772
ET
4127 bp->def_c_idx = 0;
4128
a2fbb9ea 4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4133 U64_HI(section));
34f80b04
EG
4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4136 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4137 BNX2X_BTR);
4138
4139 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4140 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4141 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4142
4143 /* TSTORM */
4144 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4145 t_def_status_block);
34f80b04 4146 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea 4147
49d66772
ET
4148 bp->def_t_idx = 0;
4149
a2fbb9ea 4150 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4151 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4152 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4153 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4154 U64_HI(section));
34f80b04
EG
4155 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4156 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4157 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4158 BNX2X_BTR);
4159
4160 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4161 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4162 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4163
4164 /* XSTORM */
4165 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4166 x_def_status_block);
34f80b04 4167 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea 4168
49d66772
ET
4169 bp->def_x_idx = 0;
4170
a2fbb9ea 4171 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4172 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4173 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4174 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4175 U64_HI(section));
34f80b04
EG
4176 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4177 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4178 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4179 BNX2X_BTR);
4180
4181 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4182 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4183 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4184
bb2a0f7a 4185 bp->stats_pending = 0;
66e855f3 4186 bp->set_mac_pending = 0;
bb2a0f7a 4187
34f80b04 4188 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4189}
4190
4191static void bnx2x_update_coalesce(struct bnx2x *bp)
4192{
34f80b04 4193 int port = BP_PORT(bp);
a2fbb9ea
ET
4194 int i;
4195
4196 for_each_queue(bp, i) {
34f80b04 4197 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4198
4199 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4200 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4201 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4202 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4203 bp->rx_ticks/12);
a2fbb9ea 4204 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4205 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4206 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4207 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4208
4209 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4210 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4211 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4212 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4213 bp->tx_ticks/12);
a2fbb9ea 4214 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4215 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4216 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4217 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4218 }
4219}
4220
7a9b2557
VZ
4221static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4222 struct bnx2x_fastpath *fp, int last)
4223{
4224 int i;
4225
4226 for (i = 0; i < last; i++) {
4227 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4228 struct sk_buff *skb = rx_buf->skb;
4229
4230 if (skb == NULL) {
4231 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4232 continue;
4233 }
4234
4235 if (fp->tpa_state[i] == BNX2X_TPA_START)
4236 pci_unmap_single(bp->pdev,
4237 pci_unmap_addr(rx_buf, mapping),
4238 bp->rx_buf_use_size,
4239 PCI_DMA_FROMDEVICE);
4240
4241 dev_kfree_skb(skb);
4242 rx_buf->skb = NULL;
4243 }
4244}
4245
a2fbb9ea
ET
4246static void bnx2x_init_rx_rings(struct bnx2x *bp)
4247{
7a9b2557
VZ
4248 int func = BP_FUNC(bp);
4249 u16 ring_prod, cqe_ring_prod = 0;
a2fbb9ea 4250 int i, j;
a2fbb9ea
ET
4251
4252 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4253 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4254 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4255
7a9b2557
VZ
4256 if (bp->flags & TPA_ENABLE_FLAG) {
4257 DP(NETIF_MSG_IFUP,
4258 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4259 bp->rx_buf_use_size, bp->rx_buf_size,
4260 bp->dev->mtu + ETH_OVREHEAD);
4261
4262 for_each_queue(bp, j) {
4263 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4264 struct bnx2x_fastpath *fp = &bp->fp[j];
4265
4266 fp->tpa_pool[i].skb =
4267 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4268 if (!fp->tpa_pool[i].skb) {
4269 BNX2X_ERR("Failed to allocate TPA "
4270 "skb pool for queue[%d] - "
4271 "disabling TPA on this "
4272 "queue!\n", j);
4273 bnx2x_free_tpa_pool(bp, fp, i);
4274 fp->disable_tpa = 1;
4275 break;
4276 }
4277 pci_unmap_addr_set((struct sw_rx_bd *)
4278 &bp->fp->tpa_pool[i],
4279 mapping, 0);
4280 fp->tpa_state[i] = BNX2X_TPA_STOP;
4281 }
4282 }
4283 }
4284
a2fbb9ea
ET
4285 for_each_queue(bp, j) {
4286 struct bnx2x_fastpath *fp = &bp->fp[j];
4287
4288 fp->rx_bd_cons = 0;
4289 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4290 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4291
4292 /* "next page" elements initialization */
4293 /* SGE ring */
4294 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4295 struct eth_rx_sge *sge;
4296
4297 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4298 sge->addr_hi =
4299 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4300 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4301 sge->addr_lo =
4302 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4303 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4304 }
4305
4306 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4307
7a9b2557 4308 /* RX BD ring */
a2fbb9ea
ET
4309 for (i = 1; i <= NUM_RX_RINGS; i++) {
4310 struct eth_rx_bd *rx_bd;
4311
4312 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4313 rx_bd->addr_hi =
4314 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4315 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4316 rx_bd->addr_lo =
4317 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4318 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4319 }
4320
34f80b04 4321 /* CQ ring */
a2fbb9ea
ET
4322 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4323 struct eth_rx_cqe_next_page *nextpg;
4324
4325 nextpg = (struct eth_rx_cqe_next_page *)
4326 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4327 nextpg->addr_hi =
4328 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4329 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4330 nextpg->addr_lo =
4331 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4332 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4333 }
4334
7a9b2557
VZ
4335 /* Allocate SGEs and initialize the ring elements */
4336 for (i = 0, ring_prod = 0;
4337 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4338
7a9b2557
VZ
4339 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4340 BNX2X_ERR("was only able to allocate "
4341 "%d rx sges\n", i);
4342 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4343 /* Cleanup already allocated elements */
4344 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4345 bnx2x_free_tpa_pool(bp, fp,
4346 ETH_MAX_AGGREGATION_QUEUES_E1H);
4347 fp->disable_tpa = 1;
4348 ring_prod = 0;
4349 break;
4350 }
4351 ring_prod = NEXT_SGE_IDX(ring_prod);
4352 }
4353 fp->rx_sge_prod = ring_prod;
4354
4355 /* Allocate BDs and initialize BD ring */
66e855f3 4356 fp->rx_comp_cons = 0;
7a9b2557 4357 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4358 for (i = 0; i < bp->rx_ring_size; i++) {
4359 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4360 BNX2X_ERR("was only able to allocate "
4361 "%d rx skbs\n", i);
66e855f3 4362 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4363 break;
4364 }
4365 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4366 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4367 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4368 }
4369
7a9b2557
VZ
4370 fp->rx_bd_prod = ring_prod;
4371 /* must not have more available CQEs than BDs */
4372 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4373 cqe_ring_prod);
a2fbb9ea
ET
4374 fp->rx_pkt = fp->rx_calls = 0;
4375
7a9b2557
VZ
4376 /* Warning!
4377 * this will generate an interrupt (to the TSTORM)
4378 * must only be done after chip is initialized
4379 */
4380 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4381 fp->rx_sge_prod);
a2fbb9ea
ET
4382 if (j != 0)
4383 continue;
4384
4385 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4386 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4387 U64_LO(fp->rx_comp_mapping));
4388 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4389 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4390 U64_HI(fp->rx_comp_mapping));
4391 }
4392}
4393
4394static void bnx2x_init_tx_ring(struct bnx2x *bp)
4395{
4396 int i, j;
4397
4398 for_each_queue(bp, j) {
4399 struct bnx2x_fastpath *fp = &bp->fp[j];
4400
4401 for (i = 1; i <= NUM_TX_RINGS; i++) {
4402 struct eth_tx_bd *tx_bd =
4403 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4404
4405 tx_bd->addr_hi =
4406 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4407 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4408 tx_bd->addr_lo =
4409 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4410 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4411 }
4412
4413 fp->tx_pkt_prod = 0;
4414 fp->tx_pkt_cons = 0;
4415 fp->tx_bd_prod = 0;
4416 fp->tx_bd_cons = 0;
4417 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4418 fp->tx_pkt = 0;
4419 }
4420}
4421
4422static void bnx2x_init_sp_ring(struct bnx2x *bp)
4423{
34f80b04 4424 int func = BP_FUNC(bp);
a2fbb9ea
ET
4425
4426 spin_lock_init(&bp->spq_lock);
4427
4428 bp->spq_left = MAX_SPQ_PENDING;
4429 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4430 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4431 bp->spq_prod_bd = bp->spq;
4432 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4433
34f80b04 4434 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4435 U64_LO(bp->spq_mapping));
34f80b04
EG
4436 REG_WR(bp,
4437 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4438 U64_HI(bp->spq_mapping));
4439
34f80b04 4440 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4441 bp->spq_prod_idx);
4442}
4443
4444static void bnx2x_init_context(struct bnx2x *bp)
4445{
4446 int i;
4447
4448 for_each_queue(bp, i) {
4449 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4450 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4451 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4452
4453 context->xstorm_st_context.tx_bd_page_base_hi =
4454 U64_HI(fp->tx_desc_mapping);
4455 context->xstorm_st_context.tx_bd_page_base_lo =
4456 U64_LO(fp->tx_desc_mapping);
4457 context->xstorm_st_context.db_data_addr_hi =
4458 U64_HI(fp->tx_prods_mapping);
4459 context->xstorm_st_context.db_data_addr_lo =
4460 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4461 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4462 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4463
4464 context->ustorm_st_context.common.sb_index_numbers =
4465 BNX2X_RX_SB_INDEX_NUM;
4466 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4467 context->ustorm_st_context.common.status_block_id = sb_id;
4468 context->ustorm_st_context.common.flags =
4469 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4470 context->ustorm_st_context.common.mc_alignment_size = 64;
4471 context->ustorm_st_context.common.bd_buff_size =
4472 bp->rx_buf_use_size;
4473 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4474 U64_HI(fp->rx_desc_mapping);
34f80b04 4475 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4476 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4477 if (!fp->disable_tpa) {
4478 context->ustorm_st_context.common.flags |=
4479 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4480 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4481 context->ustorm_st_context.common.sge_buff_size =
4482 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4483 context->ustorm_st_context.common.sge_page_base_hi =
4484 U64_HI(fp->rx_sge_mapping);
4485 context->ustorm_st_context.common.sge_page_base_lo =
4486 U64_LO(fp->rx_sge_mapping);
4487 }
4488
a2fbb9ea
ET
4489 context->cstorm_st_context.sb_index_number =
4490 HC_INDEX_C_ETH_TX_CQ_CONS;
34f80b04 4491 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4492
4493 context->xstorm_ag_context.cdu_reserved =
4494 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4495 CDU_REGION_NUMBER_XCM_AG,
4496 ETH_CONNECTION_TYPE);
4497 context->ustorm_ag_context.cdu_usage =
4498 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4499 CDU_REGION_NUMBER_UCM_AG,
4500 ETH_CONNECTION_TYPE);
4501 }
4502}
4503
4504static void bnx2x_init_ind_table(struct bnx2x *bp)
4505{
34f80b04 4506 int port = BP_PORT(bp);
a2fbb9ea
ET
4507 int i;
4508
4509 if (!is_multi(bp))
4510 return;
4511
34f80b04 4512 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4513 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4514 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4515 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4516 i % bp->num_queues);
4517
4518 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4519}
4520
49d66772
ET
4521static void bnx2x_set_client_config(struct bnx2x *bp)
4522{
49d66772 4523 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4524 int port = BP_PORT(bp);
4525 int i;
49d66772 4526
34f80b04 4527 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4528 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4529 tstorm_client.config_flags =
4530 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4531#ifdef BCM_VLAN
34f80b04 4532 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4533 tstorm_client.config_flags |=
4534 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4535 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4536 }
4537#endif
49d66772 4538
7a9b2557
VZ
4539 if (bp->flags & TPA_ENABLE_FLAG) {
4540 tstorm_client.max_sges_for_packet =
4541 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4542 tstorm_client.max_sges_for_packet =
4543 ((tstorm_client.max_sges_for_packet +
4544 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4545 PAGES_PER_SGE_SHIFT;
4546
4547 tstorm_client.config_flags |=
4548 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4549 }
4550
49d66772
ET
4551 for_each_queue(bp, i) {
4552 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4553 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4554 ((u32 *)&tstorm_client)[0]);
4555 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4556 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4557 ((u32 *)&tstorm_client)[1]);
4558 }
4559
34f80b04
EG
4560 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4561 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4562}
4563
a2fbb9ea
ET
4564static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4565{
a2fbb9ea 4566 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4567 int mode = bp->rx_mode;
4568 int mask = (1 << BP_L_ID(bp));
4569 int func = BP_FUNC(bp);
a2fbb9ea
ET
4570 int i;
4571
4572 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4573
4574 switch (mode) {
4575 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4576 tstorm_mac_filter.ucast_drop_all = mask;
4577 tstorm_mac_filter.mcast_drop_all = mask;
4578 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4579 break;
4580 case BNX2X_RX_MODE_NORMAL:
34f80b04 4581 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4582 break;
4583 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4584 tstorm_mac_filter.mcast_accept_all = mask;
4585 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4586 break;
4587 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4588 tstorm_mac_filter.ucast_accept_all = mask;
4589 tstorm_mac_filter.mcast_accept_all = mask;
4590 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4591 break;
4592 default:
34f80b04
EG
4593 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4594 break;
a2fbb9ea
ET
4595 }
4596
4597 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4598 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4599 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4600 ((u32 *)&tstorm_mac_filter)[i]);
4601
34f80b04 4602/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4603 ((u32 *)&tstorm_mac_filter)[i]); */
4604 }
a2fbb9ea 4605
49d66772
ET
4606 if (mode != BNX2X_RX_MODE_NONE)
4607 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4608}
4609
471de716
EG
4610static void bnx2x_init_internal_common(struct bnx2x *bp)
4611{
4612 int i;
4613
4614 /* Zero this manually as its initialization is
4615 currently missing in the initTool */
4616 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4617 REG_WR(bp, BAR_USTRORM_INTMEM +
4618 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4619}
4620
4621static void bnx2x_init_internal_port(struct bnx2x *bp)
4622{
4623 int port = BP_PORT(bp);
4624
4625 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4626 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4627 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4628 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4629}
4630
4631static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4632{
a2fbb9ea
ET
4633 struct tstorm_eth_function_common_config tstorm_config = {0};
4634 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4635 int port = BP_PORT(bp);
4636 int func = BP_FUNC(bp);
4637 int i;
471de716 4638 u16 max_agg_size;
a2fbb9ea
ET
4639
4640 if (is_multi(bp)) {
4641 tstorm_config.config_flags = MULTI_FLAGS;
4642 tstorm_config.rss_result_mask = MULTI_MASK;
4643 }
4644
34f80b04
EG
4645 tstorm_config.leading_client_id = BP_L_ID(bp);
4646
a2fbb9ea 4647 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4648 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4649 (*(u32 *)&tstorm_config));
4650
c14423fe 4651 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4652 bnx2x_set_storm_rx_mode(bp);
4653
66e855f3
YG
4654 /* reset xstorm per client statistics */
4655 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4656 REG_WR(bp, BAR_XSTRORM_INTMEM +
4657 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4658 i*4, 0);
4659 }
4660 /* reset tstorm per client statistics */
4661 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4662 REG_WR(bp, BAR_TSTRORM_INTMEM +
4663 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4664 i*4, 0);
4665 }
4666
4667 /* Init statistics related context */
34f80b04 4668 stats_flags.collect_eth = 1;
a2fbb9ea 4669
66e855f3 4670 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4671 ((u32 *)&stats_flags)[0]);
66e855f3 4672 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4673 ((u32 *)&stats_flags)[1]);
4674
66e855f3 4675 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4676 ((u32 *)&stats_flags)[0]);
66e855f3 4677 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4678 ((u32 *)&stats_flags)[1]);
4679
66e855f3 4680 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4681 ((u32 *)&stats_flags)[0]);
66e855f3 4682 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4683 ((u32 *)&stats_flags)[1]);
4684
66e855f3
YG
4685 REG_WR(bp, BAR_XSTRORM_INTMEM +
4686 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4687 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4688 REG_WR(bp, BAR_XSTRORM_INTMEM +
4689 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4690 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4691
4692 REG_WR(bp, BAR_TSTRORM_INTMEM +
4693 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4694 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4695 REG_WR(bp, BAR_TSTRORM_INTMEM +
4696 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4697 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4698
4699 if (CHIP_IS_E1H(bp)) {
4700 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4701 IS_E1HMF(bp));
4702 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4703 IS_E1HMF(bp));
4704 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4705 IS_E1HMF(bp));
4706 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4707 IS_E1HMF(bp));
4708
7a9b2557
VZ
4709 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4710 bp->e1hov);
34f80b04
EG
4711 }
4712
471de716
EG
4713 /* Init CQ ring mapping and aggregation size */
4714 max_agg_size = min((u32)(bp->rx_buf_use_size +
4715 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4716 (u32)0xffff);
7a9b2557
VZ
4717 for_each_queue(bp, i) {
4718 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4719
4720 REG_WR(bp, BAR_USTRORM_INTMEM +
4721 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4722 U64_LO(fp->rx_comp_mapping));
4723 REG_WR(bp, BAR_USTRORM_INTMEM +
4724 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4725 U64_HI(fp->rx_comp_mapping));
4726
7a9b2557
VZ
4727 REG_WR16(bp, BAR_USTRORM_INTMEM +
4728 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4729 max_agg_size);
4730 }
a2fbb9ea
ET
4731}
4732
471de716
EG
4733static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4734{
4735 switch (load_code) {
4736 case FW_MSG_CODE_DRV_LOAD_COMMON:
4737 bnx2x_init_internal_common(bp);
4738 /* no break */
4739
4740 case FW_MSG_CODE_DRV_LOAD_PORT:
4741 bnx2x_init_internal_port(bp);
4742 /* no break */
4743
4744 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4745 bnx2x_init_internal_func(bp);
4746 break;
4747
4748 default:
4749 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4750 break;
4751 }
4752}
4753
4754static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4755{
4756 int i;
4757
4758 for_each_queue(bp, i) {
4759 struct bnx2x_fastpath *fp = &bp->fp[i];
4760
34f80b04 4761 fp->bp = bp;
a2fbb9ea 4762 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4763 fp->index = i;
34f80b04
EG
4764 fp->cl_id = BP_L_ID(bp) + i;
4765 fp->sb_id = fp->cl_id;
4766 DP(NETIF_MSG_IFUP,
4767 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4768 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4769 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4770 fp->status_blk_mapping);
a2fbb9ea
ET
4771 }
4772
4773 bnx2x_init_def_sb(bp, bp->def_status_blk,
34f80b04 4774 bp->def_status_blk_mapping, DEF_SB_ID);
a2fbb9ea
ET
4775 bnx2x_update_coalesce(bp);
4776 bnx2x_init_rx_rings(bp);
4777 bnx2x_init_tx_ring(bp);
4778 bnx2x_init_sp_ring(bp);
4779 bnx2x_init_context(bp);
471de716 4780 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4781 bnx2x_init_ind_table(bp);
615f8fd9 4782 bnx2x_int_enable(bp);
a2fbb9ea
ET
4783}
4784
4785/* end of nic init */
4786
4787/*
4788 * gzip service functions
4789 */
4790
4791static int bnx2x_gunzip_init(struct bnx2x *bp)
4792{
4793 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4794 &bp->gunzip_mapping);
4795 if (bp->gunzip_buf == NULL)
4796 goto gunzip_nomem1;
4797
4798 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4799 if (bp->strm == NULL)
4800 goto gunzip_nomem2;
4801
4802 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4803 GFP_KERNEL);
4804 if (bp->strm->workspace == NULL)
4805 goto gunzip_nomem3;
4806
4807 return 0;
4808
4809gunzip_nomem3:
4810 kfree(bp->strm);
4811 bp->strm = NULL;
4812
4813gunzip_nomem2:
4814 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4815 bp->gunzip_mapping);
4816 bp->gunzip_buf = NULL;
4817
4818gunzip_nomem1:
4819 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4820 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4821 return -ENOMEM;
4822}
4823
4824static void bnx2x_gunzip_end(struct bnx2x *bp)
4825{
4826 kfree(bp->strm->workspace);
4827
4828 kfree(bp->strm);
4829 bp->strm = NULL;
4830
4831 if (bp->gunzip_buf) {
4832 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4833 bp->gunzip_mapping);
4834 bp->gunzip_buf = NULL;
4835 }
4836}
4837
4838static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4839{
4840 int n, rc;
4841
4842 /* check gzip header */
4843 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4844 return -EINVAL;
4845
4846 n = 10;
4847
34f80b04 4848#define FNAME 0x8
a2fbb9ea
ET
4849
4850 if (zbuf[3] & FNAME)
4851 while ((zbuf[n++] != 0) && (n < len));
4852
4853 bp->strm->next_in = zbuf + n;
4854 bp->strm->avail_in = len - n;
4855 bp->strm->next_out = bp->gunzip_buf;
4856 bp->strm->avail_out = FW_BUF_SIZE;
4857
4858 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4859 if (rc != Z_OK)
4860 return rc;
4861
4862 rc = zlib_inflate(bp->strm, Z_FINISH);
4863 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4864 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4865 bp->dev->name, bp->strm->msg);
4866
4867 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4868 if (bp->gunzip_outlen & 0x3)
4869 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4870 " gunzip_outlen (%d) not aligned\n",
4871 bp->dev->name, bp->gunzip_outlen);
4872 bp->gunzip_outlen >>= 2;
4873
4874 zlib_inflateEnd(bp->strm);
4875
4876 if (rc == Z_STREAM_END)
4877 return 0;
4878
4879 return rc;
4880}
4881
4882/* nic load/unload */
4883
4884/*
34f80b04 4885 * General service functions
a2fbb9ea
ET
4886 */
4887
4888/* send a NIG loopback debug packet */
4889static void bnx2x_lb_pckt(struct bnx2x *bp)
4890{
a2fbb9ea 4891 u32 wb_write[3];
a2fbb9ea
ET
4892
4893 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4894 wb_write[0] = 0x55555555;
4895 wb_write[1] = 0x55555555;
34f80b04 4896 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4897 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4898
4899 /* NON-IP protocol */
a2fbb9ea
ET
4900 wb_write[0] = 0x09000000;
4901 wb_write[1] = 0x55555555;
34f80b04 4902 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4903 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4904}
4905
4906/* some of the internal memories
4907 * are not directly readable from the driver
4908 * to test them we send debug packets
4909 */
4910static int bnx2x_int_mem_test(struct bnx2x *bp)
4911{
4912 int factor;
4913 int count, i;
4914 u32 val = 0;
4915
ad8d3948 4916 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4917 factor = 120;
ad8d3948
EG
4918 else if (CHIP_REV_IS_EMUL(bp))
4919 factor = 200;
4920 else
a2fbb9ea 4921 factor = 1;
a2fbb9ea
ET
4922
4923 DP(NETIF_MSG_HW, "start part1\n");
4924
4925 /* Disable inputs of parser neighbor blocks */
4926 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4927 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4928 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4929 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4930
4931 /* Write 0 to parser credits for CFC search request */
4932 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4933
4934 /* send Ethernet packet */
4935 bnx2x_lb_pckt(bp);
4936
4937 /* TODO do i reset NIG statistic? */
4938 /* Wait until NIG register shows 1 packet of size 0x10 */
4939 count = 1000 * factor;
4940 while (count) {
34f80b04 4941
a2fbb9ea
ET
4942 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4943 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4944 if (val == 0x10)
4945 break;
4946
4947 msleep(10);
4948 count--;
4949 }
4950 if (val != 0x10) {
4951 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4952 return -1;
4953 }
4954
4955 /* Wait until PRS register shows 1 packet */
4956 count = 1000 * factor;
4957 while (count) {
4958 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4959 if (val == 1)
4960 break;
4961
4962 msleep(10);
4963 count--;
4964 }
4965 if (val != 0x1) {
4966 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4967 return -2;
4968 }
4969
4970 /* Reset and init BRB, PRS */
34f80b04 4971 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4972 msleep(50);
34f80b04 4973 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4974 msleep(50);
4975 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4976 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4977
4978 DP(NETIF_MSG_HW, "part2\n");
4979
4980 /* Disable inputs of parser neighbor blocks */
4981 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4982 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4983 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4984 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4985
4986 /* Write 0 to parser credits for CFC search request */
4987 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4988
4989 /* send 10 Ethernet packets */
4990 for (i = 0; i < 10; i++)
4991 bnx2x_lb_pckt(bp);
4992
4993 /* Wait until NIG register shows 10 + 1
4994 packets of size 11*0x10 = 0xb0 */
4995 count = 1000 * factor;
4996 while (count) {
34f80b04 4997
a2fbb9ea
ET
4998 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4999 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5000 if (val == 0xb0)
5001 break;
5002
5003 msleep(10);
5004 count--;
5005 }
5006 if (val != 0xb0) {
5007 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5008 return -3;
5009 }
5010
5011 /* Wait until PRS register shows 2 packets */
5012 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5013 if (val != 2)
5014 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5015
5016 /* Write 1 to parser credits for CFC search request */
5017 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5018
5019 /* Wait until PRS register shows 3 packets */
5020 msleep(10 * factor);
5021 /* Wait until NIG register shows 1 packet of size 0x10 */
5022 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5023 if (val != 3)
5024 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5025
5026 /* clear NIG EOP FIFO */
5027 for (i = 0; i < 11; i++)
5028 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5029 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5030 if (val != 1) {
5031 BNX2X_ERR("clear of NIG failed\n");
5032 return -4;
5033 }
5034
5035 /* Reset and init BRB, PRS, NIG */
5036 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5037 msleep(50);
5038 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5039 msleep(50);
5040 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5041 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5042#ifndef BCM_ISCSI
5043 /* set NIC mode */
5044 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5045#endif
5046
5047 /* Enable inputs of parser neighbor blocks */
5048 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5049 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5050 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5051 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5052
5053 DP(NETIF_MSG_HW, "done\n");
5054
5055 return 0; /* OK */
5056}
5057
5058static void enable_blocks_attention(struct bnx2x *bp)
5059{
5060 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5061 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5062 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5063 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5064 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5065 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5066 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5067 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5068 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5069/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5070/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5071 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5072 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5073 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5074/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5075/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5076 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5077 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5078 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5079 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5080/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5081/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5082 if (CHIP_REV_IS_FPGA(bp))
5083 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5084 else
5085 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5086 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5087 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5088 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5089/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5090/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5091 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5092 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5093/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5094 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5095}
5096
34f80b04
EG
5097
5098static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5099{
a2fbb9ea 5100 u32 val, i;
a2fbb9ea 5101
34f80b04 5102 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5103
34f80b04
EG
5104 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5105 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5106
34f80b04
EG
5107 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5108 if (CHIP_IS_E1H(bp))
5109 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5110
34f80b04
EG
5111 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5112 msleep(30);
5113 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5114
34f80b04
EG
5115 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5116 if (CHIP_IS_E1(bp)) {
5117 /* enable HW interrupt from PXP on USDM overflow
5118 bit 16 on INT_MASK_0 */
5119 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5120 }
a2fbb9ea 5121
34f80b04
EG
5122 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5123 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5124
5125#ifdef __BIG_ENDIAN
34f80b04
EG
5126 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5127 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5128 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5129 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5130 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5131 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5132
5133/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5134 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5135 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5136 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5137 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5138#endif
5139
5140#ifndef BCM_ISCSI
5141 /* set NIC mode */
5142 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5143#endif
5144
34f80b04 5145 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5146#ifdef BCM_ISCSI
34f80b04
EG
5147 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5148 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5149 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5150#endif
5151
34f80b04
EG
5152 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5153 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5154
34f80b04
EG
5155 /* let the HW do it's magic ... */
5156 msleep(100);
5157 /* finish PXP init */
5158 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5159 if (val != 1) {
5160 BNX2X_ERR("PXP2 CFG failed\n");
5161 return -EBUSY;
5162 }
5163 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5164 if (val != 1) {
5165 BNX2X_ERR("PXP2 RD_INIT failed\n");
5166 return -EBUSY;
5167 }
a2fbb9ea 5168
34f80b04
EG
5169 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5170 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5171
34f80b04 5172 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5173
34f80b04
EG
5174 /* clean the DMAE memory */
5175 bp->dmae_ready = 1;
5176 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5177
34f80b04
EG
5178 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5179 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5180 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5181 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5182
34f80b04
EG
5183 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5184 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5185 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5186 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5187
5188 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5189 /* soft reset pulse */
5190 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5191 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5192
5193#ifdef BCM_ISCSI
34f80b04 5194 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5195#endif
a2fbb9ea 5196
34f80b04
EG
5197 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5198 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5199 if (!CHIP_REV_IS_SLOW(bp)) {
5200 /* enable hw interrupt from doorbell Q */
5201 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5202 }
a2fbb9ea 5203
34f80b04
EG
5204 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5205 if (CHIP_REV_IS_SLOW(bp)) {
5206 /* fix for emulation and FPGA for no pause */
5207 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5208 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5209 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5210 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5211 }
a2fbb9ea 5212
34f80b04
EG
5213 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5214 if (CHIP_IS_E1H(bp))
5215 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5216
34f80b04
EG
5217 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5218 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5219 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5220 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5221
34f80b04
EG
5222 if (CHIP_IS_E1H(bp)) {
5223 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5224 STORM_INTMEM_SIZE_E1H/2);
5225 bnx2x_init_fill(bp,
5226 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5227 0, STORM_INTMEM_SIZE_E1H/2);
5228 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5229 STORM_INTMEM_SIZE_E1H/2);
5230 bnx2x_init_fill(bp,
5231 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5232 0, STORM_INTMEM_SIZE_E1H/2);
5233 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5234 STORM_INTMEM_SIZE_E1H/2);
5235 bnx2x_init_fill(bp,
5236 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5237 0, STORM_INTMEM_SIZE_E1H/2);
5238 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5239 STORM_INTMEM_SIZE_E1H/2);
5240 bnx2x_init_fill(bp,
5241 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5242 0, STORM_INTMEM_SIZE_E1H/2);
5243 } else { /* E1 */
ad8d3948
EG
5244 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1);
5246 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5247 STORM_INTMEM_SIZE_E1);
5248 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5249 STORM_INTMEM_SIZE_E1);
5250 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5251 STORM_INTMEM_SIZE_E1);
34f80b04 5252 }
a2fbb9ea 5253
34f80b04
EG
5254 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5255 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5256 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5257 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5258
34f80b04
EG
5259 /* sync semi rtc */
5260 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5261 0x80000000);
5262 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5263 0x80000000);
a2fbb9ea 5264
34f80b04
EG
5265 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5266 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5267 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5268
34f80b04
EG
5269 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5270 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5271 REG_WR(bp, i, 0xc0cac01a);
5272 /* TODO: replace with something meaningful */
5273 }
5274 if (CHIP_IS_E1H(bp))
5275 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5276 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5277
34f80b04
EG
5278 if (sizeof(union cdu_context) != 1024)
5279 /* we currently assume that a context is 1024 bytes */
5280 printk(KERN_ALERT PFX "please adjust the size of"
5281 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5282
34f80b04
EG
5283 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5284 val = (4 << 24) + (0 << 12) + 1024;
5285 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5286 if (CHIP_IS_E1(bp)) {
5287 /* !!! fix pxp client crdit until excel update */
5288 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5289 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5290 }
a2fbb9ea 5291
34f80b04
EG
5292 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5293 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5294
34f80b04
EG
5295 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5296 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5297
34f80b04
EG
5298 /* PXPCS COMMON comes here */
5299 /* Reset PCIE errors for debug */
5300 REG_WR(bp, 0x2814, 0xffffffff);
5301 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5302
34f80b04
EG
5303 /* EMAC0 COMMON comes here */
5304 /* EMAC1 COMMON comes here */
5305 /* DBU COMMON comes here */
5306 /* DBG COMMON comes here */
5307
5308 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5309 if (CHIP_IS_E1H(bp)) {
5310 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5311 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5312 }
5313
5314 if (CHIP_REV_IS_SLOW(bp))
5315 msleep(200);
5316
5317 /* finish CFC init */
5318 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5319 if (val != 1) {
5320 BNX2X_ERR("CFC LL_INIT failed\n");
5321 return -EBUSY;
5322 }
5323 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5324 if (val != 1) {
5325 BNX2X_ERR("CFC AC_INIT failed\n");
5326 return -EBUSY;
5327 }
5328 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5329 if (val != 1) {
5330 BNX2X_ERR("CFC CAM_INIT failed\n");
5331 return -EBUSY;
5332 }
5333 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5334
34f80b04
EG
5335 /* read NIG statistic
5336 to see if this is our first up since powerup */
5337 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5338 val = *bnx2x_sp(bp, wb_data[0]);
5339
5340 /* do internal memory self test */
5341 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5342 BNX2X_ERR("internal mem self test failed\n");
5343 return -EBUSY;
5344 }
5345
5346 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5347 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5348 /* Fan failure is indicated by SPIO 5 */
5349 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5350 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5351
5352 /* set to active low mode */
5353 val = REG_RD(bp, MISC_REG_SPIO_INT);
5354 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5355 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5356 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5357
34f80b04
EG
5358 /* enable interrupt to signal the IGU */
5359 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5360 val |= (1 << MISC_REGISTERS_SPIO_5);
5361 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5362 break;
f1410647 5363
34f80b04
EG
5364 default:
5365 break;
5366 }
f1410647 5367
34f80b04
EG
5368 /* clear PXP2 attentions */
5369 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5370
34f80b04 5371 enable_blocks_attention(bp);
a2fbb9ea 5372
7a9b2557
VZ
5373 if (bp->flags & TPA_ENABLE_FLAG) {
5374 struct tstorm_eth_tpa_exist tmp = {0};
5375
5376 tmp.tpa_exist = 1;
5377
5378 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5379 ((u32 *)&tmp)[0]);
5380 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5381 ((u32 *)&tmp)[1]);
5382 }
5383
34f80b04
EG
5384 return 0;
5385}
a2fbb9ea 5386
34f80b04
EG
5387static int bnx2x_init_port(struct bnx2x *bp)
5388{
5389 int port = BP_PORT(bp);
5390 u32 val;
a2fbb9ea 5391
34f80b04
EG
5392 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5393
5394 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5395
5396 /* Port PXP comes here */
5397 /* Port PXP2 comes here */
a2fbb9ea
ET
5398#ifdef BCM_ISCSI
5399 /* Port0 1
5400 * Port1 385 */
5401 i++;
5402 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5403 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5404 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5405 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5406
5407 /* Port0 2
5408 * Port1 386 */
5409 i++;
5410 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5411 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5412 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5413 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5414
5415 /* Port0 3
5416 * Port1 387 */
5417 i++;
5418 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5419 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5420 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5421 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5422#endif
34f80b04 5423 /* Port CMs come here */
a2fbb9ea
ET
5424
5425 /* Port QM comes here */
a2fbb9ea
ET
5426#ifdef BCM_ISCSI
5427 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5428 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5429
5430 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5431 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5432#endif
5433 /* Port DQ comes here */
5434 /* Port BRB1 comes here */
ad8d3948 5435 /* Port PRS comes here */
a2fbb9ea
ET
5436 /* Port TSDM comes here */
5437 /* Port CSDM comes here */
5438 /* Port USDM comes here */
5439 /* Port XSDM comes here */
34f80b04
EG
5440 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5441 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5442 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5443 port ? USEM_PORT1_END : USEM_PORT0_END);
5444 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5445 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5446 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5447 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5448 /* Port UPB comes here */
34f80b04
EG
5449 /* Port XPB comes here */
5450
5451 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5452 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5453
5454 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5455 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5456
5457 /* update threshold */
34f80b04 5458 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5459 /* update init credit */
34f80b04 5460 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5461
5462 /* probe changes */
34f80b04 5463 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5464 msleep(5);
34f80b04 5465 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5466
5467#ifdef BCM_ISCSI
5468 /* tell the searcher where the T2 table is */
5469 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5470
5471 wb_write[0] = U64_LO(bp->t2_mapping);
5472 wb_write[1] = U64_HI(bp->t2_mapping);
5473 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5474 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5475 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5476 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5477
5478 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5479 /* Port SRCH comes here */
5480#endif
5481 /* Port CDU comes here */
5482 /* Port CFC comes here */
34f80b04
EG
5483
5484 if (CHIP_IS_E1(bp)) {
5485 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5486 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5487 }
5488 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5489 port ? HC_PORT1_END : HC_PORT0_END);
5490
5491 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5492 MISC_AEU_PORT0_START,
34f80b04
EG
5493 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5494 /* init aeu_mask_attn_func_0/1:
5495 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5496 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5497 * bits 4-7 are used for "per vn group attention" */
5498 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5499 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5500
a2fbb9ea
ET
5501 /* Port PXPCS comes here */
5502 /* Port EMAC0 comes here */
5503 /* Port EMAC1 comes here */
5504 /* Port DBU comes here */
5505 /* Port DBG comes here */
34f80b04
EG
5506 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5507 port ? NIG_PORT1_END : NIG_PORT0_END);
5508
5509 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5510
5511 if (CHIP_IS_E1H(bp)) {
5512 u32 wsum;
5513 struct cmng_struct_per_port m_cmng_port;
5514 int vn;
5515
5516 /* 0x2 disable e1hov, 0x1 enable */
5517 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5518 (IS_E1HMF(bp) ? 0x1 : 0x2));
5519
5520 /* Init RATE SHAPING and FAIRNESS contexts.
5521 Initialize as if there is 10G link. */
5522 wsum = bnx2x_calc_vn_wsum(bp);
5523 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5524 if (IS_E1HMF(bp))
5525 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5526 bnx2x_init_vn_minmax(bp, 2*vn + port,
5527 wsum, 10000, &m_cmng_port);
5528 }
5529
a2fbb9ea
ET
5530 /* Port MCP comes here */
5531 /* Port DMAE comes here */
5532
34f80b04 5533 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
5534 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5535 /* add SPIO 5 to group 0 */
5536 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5537 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5538 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5539 break;
5540
5541 default:
5542 break;
5543 }
5544
c18487ee 5545 bnx2x__link_reset(bp);
a2fbb9ea 5546
34f80b04
EG
5547 return 0;
5548}
5549
5550#define ILT_PER_FUNC (768/2)
5551#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5552/* the phys address is shifted right 12 bits and has an added
5553 1=valid bit added to the 53rd bit
5554 then since this is a wide register(TM)
5555 we split it into two 32 bit writes
5556 */
5557#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5558#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5559#define PXP_ONE_ILT(x) (((x) << 10) | x)
5560#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5561
5562#define CNIC_ILT_LINES 0
5563
5564static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5565{
5566 int reg;
5567
5568 if (CHIP_IS_E1H(bp))
5569 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5570 else /* E1 */
5571 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5572
5573 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5574}
5575
5576static int bnx2x_init_func(struct bnx2x *bp)
5577{
5578 int port = BP_PORT(bp);
5579 int func = BP_FUNC(bp);
5580 int i;
5581
5582 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5583
5584 i = FUNC_ILT_BASE(func);
5585
5586 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5587 if (CHIP_IS_E1H(bp)) {
5588 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5589 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5590 } else /* E1 */
5591 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5592 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5593
5594
5595 if (CHIP_IS_E1H(bp)) {
5596 for (i = 0; i < 9; i++)
5597 bnx2x_init_block(bp,
5598 cm_start[func][i], cm_end[func][i]);
5599
5600 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5601 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5602 }
5603
5604 /* HC init per function */
5605 if (CHIP_IS_E1H(bp)) {
5606 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5607
5608 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5609 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5610 }
5611 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5612
5613 if (CHIP_IS_E1H(bp))
5614 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5615
c14423fe 5616 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5617 REG_WR(bp, 0x2114, 0xffffffff);
5618 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5619
34f80b04
EG
5620 return 0;
5621}
5622
5623static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5624{
5625 int i, rc = 0;
a2fbb9ea 5626
34f80b04
EG
5627 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5628 BP_FUNC(bp), load_code);
a2fbb9ea 5629
34f80b04
EG
5630 bp->dmae_ready = 0;
5631 mutex_init(&bp->dmae_mutex);
5632 bnx2x_gunzip_init(bp);
a2fbb9ea 5633
34f80b04
EG
5634 switch (load_code) {
5635 case FW_MSG_CODE_DRV_LOAD_COMMON:
5636 rc = bnx2x_init_common(bp);
5637 if (rc)
5638 goto init_hw_err;
5639 /* no break */
5640
5641 case FW_MSG_CODE_DRV_LOAD_PORT:
5642 bp->dmae_ready = 1;
5643 rc = bnx2x_init_port(bp);
5644 if (rc)
5645 goto init_hw_err;
5646 /* no break */
5647
5648 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5649 bp->dmae_ready = 1;
5650 rc = bnx2x_init_func(bp);
5651 if (rc)
5652 goto init_hw_err;
5653 break;
5654
5655 default:
5656 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5657 break;
5658 }
5659
5660 if (!BP_NOMCP(bp)) {
5661 int func = BP_FUNC(bp);
a2fbb9ea
ET
5662
5663 bp->fw_drv_pulse_wr_seq =
34f80b04 5664 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5665 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5666 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5667 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5668 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5669 } else
5670 bp->func_stx = 0;
a2fbb9ea 5671
34f80b04
EG
5672 /* this needs to be done before gunzip end */
5673 bnx2x_zero_def_sb(bp);
5674 for_each_queue(bp, i)
5675 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5676
5677init_hw_err:
5678 bnx2x_gunzip_end(bp);
5679
5680 return rc;
a2fbb9ea
ET
5681}
5682
c14423fe 5683/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5684static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5685{
34f80b04 5686 int func = BP_FUNC(bp);
f1410647
ET
5687 u32 seq = ++bp->fw_seq;
5688 u32 rc = 0;
19680c48
EG
5689 u32 cnt = 1;
5690 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5691
34f80b04 5692 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5693 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5694
19680c48
EG
5695 do {
5696 /* let the FW do it's magic ... */
5697 msleep(delay);
a2fbb9ea 5698
19680c48 5699 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5700
19680c48
EG
5701 /* Give the FW up to 2 second (200*10ms) */
5702 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5703
5704 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5705 cnt*delay, rc, seq);
a2fbb9ea
ET
5706
5707 /* is this a reply to our command? */
5708 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5709 rc &= FW_MSG_CODE_MASK;
f1410647 5710
a2fbb9ea
ET
5711 } else {
5712 /* FW BUG! */
5713 BNX2X_ERR("FW failed to respond!\n");
5714 bnx2x_fw_dump(bp);
5715 rc = 0;
5716 }
f1410647 5717
a2fbb9ea
ET
5718 return rc;
5719}
5720
5721static void bnx2x_free_mem(struct bnx2x *bp)
5722{
5723
5724#define BNX2X_PCI_FREE(x, y, size) \
5725 do { \
5726 if (x) { \
5727 pci_free_consistent(bp->pdev, size, x, y); \
5728 x = NULL; \
5729 y = 0; \
5730 } \
5731 } while (0)
5732
5733#define BNX2X_FREE(x) \
5734 do { \
5735 if (x) { \
5736 vfree(x); \
5737 x = NULL; \
5738 } \
5739 } while (0)
5740
5741 int i;
5742
5743 /* fastpath */
5744 for_each_queue(bp, i) {
5745
5746 /* Status blocks */
5747 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5748 bnx2x_fp(bp, i, status_blk_mapping),
5749 sizeof(struct host_status_block) +
5750 sizeof(struct eth_tx_db_data));
5751
5752 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5753 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5754 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5755 bnx2x_fp(bp, i, tx_desc_mapping),
5756 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5757
5758 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5759 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5760 bnx2x_fp(bp, i, rx_desc_mapping),
5761 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5762
5763 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5764 bnx2x_fp(bp, i, rx_comp_mapping),
5765 sizeof(struct eth_fast_path_rx_cqe) *
5766 NUM_RCQ_BD);
a2fbb9ea 5767
7a9b2557
VZ
5768 /* SGE ring */
5769 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5770 bnx2x_fp(bp, i, rx_sge_mapping),
5771 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5772 }
a2fbb9ea
ET
5773 /* end of fastpath */
5774
5775 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5776 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5777
5778 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5779 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5780
5781#ifdef BCM_ISCSI
5782 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5783 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5784 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5785 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5786#endif
7a9b2557 5787 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5788
5789#undef BNX2X_PCI_FREE
5790#undef BNX2X_KFREE
5791}
5792
5793static int bnx2x_alloc_mem(struct bnx2x *bp)
5794{
5795
5796#define BNX2X_PCI_ALLOC(x, y, size) \
5797 do { \
5798 x = pci_alloc_consistent(bp->pdev, size, y); \
5799 if (x == NULL) \
5800 goto alloc_mem_err; \
5801 memset(x, 0, size); \
5802 } while (0)
5803
5804#define BNX2X_ALLOC(x, size) \
5805 do { \
5806 x = vmalloc(size); \
5807 if (x == NULL) \
5808 goto alloc_mem_err; \
5809 memset(x, 0, size); \
5810 } while (0)
5811
5812 int i;
5813
5814 /* fastpath */
a2fbb9ea
ET
5815 for_each_queue(bp, i) {
5816 bnx2x_fp(bp, i, bp) = bp;
5817
5818 /* Status blocks */
5819 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5820 &bnx2x_fp(bp, i, status_blk_mapping),
5821 sizeof(struct host_status_block) +
5822 sizeof(struct eth_tx_db_data));
5823
5824 bnx2x_fp(bp, i, hw_tx_prods) =
5825 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5826
5827 bnx2x_fp(bp, i, tx_prods_mapping) =
5828 bnx2x_fp(bp, i, status_blk_mapping) +
5829 sizeof(struct host_status_block);
5830
5831 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5832 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5833 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5834 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5835 &bnx2x_fp(bp, i, tx_desc_mapping),
5836 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5837
5838 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5839 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5840 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5841 &bnx2x_fp(bp, i, rx_desc_mapping),
5842 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5843
5844 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5845 &bnx2x_fp(bp, i, rx_comp_mapping),
5846 sizeof(struct eth_fast_path_rx_cqe) *
5847 NUM_RCQ_BD);
5848
7a9b2557
VZ
5849 /* SGE ring */
5850 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5851 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5852 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5853 &bnx2x_fp(bp, i, rx_sge_mapping),
5854 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5855 }
5856 /* end of fastpath */
5857
5858 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5859 sizeof(struct host_def_status_block));
5860
5861 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5862 sizeof(struct bnx2x_slowpath));
5863
5864#ifdef BCM_ISCSI
5865 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5866
5867 /* Initialize T1 */
5868 for (i = 0; i < 64*1024; i += 64) {
5869 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5870 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5871 }
5872
5873 /* allocate searcher T2 table
5874 we allocate 1/4 of alloc num for T2
5875 (which is not entered into the ILT) */
5876 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5877
5878 /* Initialize T2 */
5879 for (i = 0; i < 16*1024; i += 64)
5880 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5881
c14423fe 5882 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5883 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5884
5885 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5886 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5887
5888 /* QM queues (128*MAX_CONN) */
5889 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5890#endif
5891
5892 /* Slow path ring */
5893 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5894
5895 return 0;
5896
5897alloc_mem_err:
5898 bnx2x_free_mem(bp);
5899 return -ENOMEM;
5900
5901#undef BNX2X_PCI_ALLOC
5902#undef BNX2X_ALLOC
5903}
5904
5905static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5906{
5907 int i;
5908
5909 for_each_queue(bp, i) {
5910 struct bnx2x_fastpath *fp = &bp->fp[i];
5911
5912 u16 bd_cons = fp->tx_bd_cons;
5913 u16 sw_prod = fp->tx_pkt_prod;
5914 u16 sw_cons = fp->tx_pkt_cons;
5915
a2fbb9ea
ET
5916 while (sw_cons != sw_prod) {
5917 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5918 sw_cons++;
5919 }
5920 }
5921}
5922
5923static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5924{
5925 int i, j;
5926
5927 for_each_queue(bp, j) {
5928 struct bnx2x_fastpath *fp = &bp->fp[j];
5929
a2fbb9ea
ET
5930 for (i = 0; i < NUM_RX_BD; i++) {
5931 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5932 struct sk_buff *skb = rx_buf->skb;
5933
5934 if (skb == NULL)
5935 continue;
5936
5937 pci_unmap_single(bp->pdev,
5938 pci_unmap_addr(rx_buf, mapping),
5939 bp->rx_buf_use_size,
5940 PCI_DMA_FROMDEVICE);
5941
5942 rx_buf->skb = NULL;
5943 dev_kfree_skb(skb);
5944 }
7a9b2557
VZ
5945 if (!fp->disable_tpa)
5946 bnx2x_free_tpa_pool(bp, fp,
5947 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5948 }
5949}
5950
5951static void bnx2x_free_skbs(struct bnx2x *bp)
5952{
5953 bnx2x_free_tx_skbs(bp);
5954 bnx2x_free_rx_skbs(bp);
5955}
5956
5957static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5958{
34f80b04 5959 int i, offset = 1;
a2fbb9ea
ET
5960
5961 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5962 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5963 bp->msix_table[0].vector);
5964
5965 for_each_queue(bp, i) {
c14423fe 5966 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5967 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5968 bnx2x_fp(bp, i, state));
5969
228241eb
ET
5970 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5971 BNX2X_ERR("IRQ of fp #%d being freed while "
5972 "state != closed\n", i);
a2fbb9ea 5973
34f80b04 5974 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5975 }
a2fbb9ea
ET
5976}
5977
5978static void bnx2x_free_irq(struct bnx2x *bp)
5979{
a2fbb9ea 5980 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5981 bnx2x_free_msix_irqs(bp);
5982 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5983 bp->flags &= ~USING_MSIX_FLAG;
5984
5985 } else
5986 free_irq(bp->pdev->irq, bp->dev);
5987}
5988
5989static int bnx2x_enable_msix(struct bnx2x *bp)
5990{
34f80b04 5991 int i, rc, offset;
a2fbb9ea
ET
5992
5993 bp->msix_table[0].entry = 0;
34f80b04
EG
5994 offset = 1;
5995 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 5996
34f80b04
EG
5997 for_each_queue(bp, i) {
5998 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 5999
34f80b04
EG
6000 bp->msix_table[i + offset].entry = igu_vec;
6001 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6002 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6003 }
6004
34f80b04
EG
6005 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6006 bp->num_queues + offset);
6007 if (rc) {
6008 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6009 return -1;
6010 }
a2fbb9ea
ET
6011 bp->flags |= USING_MSIX_FLAG;
6012
6013 return 0;
a2fbb9ea
ET
6014}
6015
a2fbb9ea
ET
6016static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6017{
34f80b04 6018 int i, rc, offset = 1;
a2fbb9ea 6019
a2fbb9ea
ET
6020 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6021 bp->dev->name, bp->dev);
a2fbb9ea
ET
6022 if (rc) {
6023 BNX2X_ERR("request sp irq failed\n");
6024 return -EBUSY;
6025 }
6026
6027 for_each_queue(bp, i) {
34f80b04 6028 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6029 bnx2x_msix_fp_int, 0,
6030 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6031 if (rc) {
34f80b04
EG
6032 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6033 i + offset, rc);
a2fbb9ea
ET
6034 bnx2x_free_msix_irqs(bp);
6035 return -EBUSY;
6036 }
6037
6038 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6039 }
6040
6041 return 0;
a2fbb9ea
ET
6042}
6043
6044static int bnx2x_req_irq(struct bnx2x *bp)
6045{
34f80b04 6046 int rc;
a2fbb9ea 6047
34f80b04
EG
6048 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6049 bp->dev->name, bp->dev);
a2fbb9ea
ET
6050 if (!rc)
6051 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6052
6053 return rc;
a2fbb9ea
ET
6054}
6055
6056/*
6057 * Init service functions
6058 */
6059
34f80b04 6060static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
a2fbb9ea
ET
6061{
6062 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6063 int port = BP_PORT(bp);
a2fbb9ea
ET
6064
6065 /* CAM allocation
6066 * unicasts 0-31:port0 32-63:port1
6067 * multicast 64-127:port0 128-191:port1
6068 */
6069 config->hdr.length_6b = 2;
34f80b04
EG
6070 config->hdr.offset = port ? 31 : 0;
6071 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6072 config->hdr.reserved1 = 0;
6073
6074 /* primary MAC */
6075 config->config_table[0].cam_entry.msb_mac_addr =
6076 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6077 config->config_table[0].cam_entry.middle_mac_addr =
6078 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6079 config->config_table[0].cam_entry.lsb_mac_addr =
6080 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6081 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6082 config->config_table[0].target_table_entry.flags = 0;
6083 config->config_table[0].target_table_entry.client_id = 0;
6084 config->config_table[0].target_table_entry.vlan_id = 0;
6085
6086 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6087 config->config_table[0].cam_entry.msb_mac_addr,
6088 config->config_table[0].cam_entry.middle_mac_addr,
6089 config->config_table[0].cam_entry.lsb_mac_addr);
6090
6091 /* broadcast */
6092 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6093 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6094 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6095 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6096 config->config_table[1].target_table_entry.flags =
6097 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6098 config->config_table[1].target_table_entry.client_id = 0;
6099 config->config_table[1].target_table_entry.vlan_id = 0;
6100
6101 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6102 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6103 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6104}
6105
34f80b04
EG
6106static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6107{
6108 struct mac_configuration_cmd_e1h *config =
6109 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6110
6111 if (bp->state != BNX2X_STATE_OPEN) {
6112 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6113 return;
6114 }
6115
6116 /* CAM allocation for E1H
6117 * unicasts: by func number
6118 * multicast: 20+FUNC*20, 20 each
6119 */
6120 config->hdr.length_6b = 1;
6121 config->hdr.offset = BP_FUNC(bp);
6122 config->hdr.client_id = BP_CL_ID(bp);
6123 config->hdr.reserved1 = 0;
6124
6125 /* primary MAC */
6126 config->config_table[0].msb_mac_addr =
6127 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6128 config->config_table[0].middle_mac_addr =
6129 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6130 config->config_table[0].lsb_mac_addr =
6131 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6132 config->config_table[0].client_id = BP_L_ID(bp);
6133 config->config_table[0].vlan_id = 0;
6134 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6135 config->config_table[0].flags = BP_PORT(bp);
6136
6137 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6138 config->config_table[0].msb_mac_addr,
6139 config->config_table[0].middle_mac_addr,
6140 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6141
6142 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6143 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6144 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6145}
6146
a2fbb9ea
ET
6147static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6148 int *state_p, int poll)
6149{
6150 /* can take a while if any port is running */
34f80b04 6151 int cnt = 500;
a2fbb9ea 6152
c14423fe
ET
6153 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6154 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6155
6156 might_sleep();
34f80b04 6157 while (cnt--) {
a2fbb9ea
ET
6158 if (poll) {
6159 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6160 /* if index is different from 0
6161 * the reply for some commands will
a2fbb9ea
ET
6162 * be on the none default queue
6163 */
6164 if (idx)
6165 bnx2x_rx_int(&bp->fp[idx], 10);
6166 }
34f80b04 6167 mb(); /* state is changed by bnx2x_sp_event() */
a2fbb9ea 6168
49d66772 6169 if (*state_p == state)
a2fbb9ea
ET
6170 return 0;
6171
a2fbb9ea 6172 msleep(1);
a2fbb9ea
ET
6173 }
6174
a2fbb9ea 6175 /* timeout! */
49d66772
ET
6176 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6177 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6178#ifdef BNX2X_STOP_ON_ERROR
6179 bnx2x_panic();
6180#endif
a2fbb9ea 6181
49d66772 6182 return -EBUSY;
a2fbb9ea
ET
6183}
6184
6185static int bnx2x_setup_leading(struct bnx2x *bp)
6186{
34f80b04 6187 int rc;
a2fbb9ea 6188
c14423fe 6189 /* reset IGU state */
34f80b04 6190 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6191
6192 /* SETUP ramrod */
6193 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6194
34f80b04
EG
6195 /* Wait for completion */
6196 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6197
34f80b04 6198 return rc;
a2fbb9ea
ET
6199}
6200
6201static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6202{
a2fbb9ea 6203 /* reset IGU state */
34f80b04 6204 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6205
228241eb 6206 /* SETUP ramrod */
a2fbb9ea
ET
6207 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6208 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6209
6210 /* Wait for completion */
6211 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6212 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6213}
6214
a2fbb9ea
ET
6215static int bnx2x_poll(struct napi_struct *napi, int budget);
6216static void bnx2x_set_rx_mode(struct net_device *dev);
6217
34f80b04
EG
6218/* must be called with rtnl_lock */
6219static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6220{
228241eb 6221 u32 load_code;
34f80b04
EG
6222 int i, rc;
6223
6224#ifdef BNX2X_STOP_ON_ERROR
6225 if (unlikely(bp->panic))
6226 return -EPERM;
6227#endif
a2fbb9ea
ET
6228
6229 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6230
34f80b04
EG
6231 /* Send LOAD_REQUEST command to MCP
6232 Returns the type of LOAD command:
6233 if it is the first port to be initialized
6234 common blocks should be initialized, otherwise - not
a2fbb9ea 6235 */
34f80b04 6236 if (!BP_NOMCP(bp)) {
228241eb
ET
6237 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6238 if (!load_code) {
da5a662a 6239 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6240 return -EBUSY;
6241 }
34f80b04 6242 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6243 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6244
a2fbb9ea 6245 } else {
da5a662a
VZ
6246 int port = BP_PORT(bp);
6247
34f80b04
EG
6248 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6249 load_count[0], load_count[1], load_count[2]);
6250 load_count[0]++;
da5a662a 6251 load_count[1 + port]++;
34f80b04
EG
6252 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6253 load_count[0], load_count[1], load_count[2]);
6254 if (load_count[0] == 1)
6255 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6256 else if (load_count[1 + port] == 1)
34f80b04
EG
6257 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6258 else
6259 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6260 }
6261
34f80b04
EG
6262 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6263 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6264 bp->port.pmf = 1;
6265 else
6266 bp->port.pmf = 0;
6267 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6268
6269 /* if we can't use MSI-X we only need one fp,
6270 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6271 * and fallback to inta with one fp
6272 */
34f80b04
EG
6273 if (use_inta) {
6274 bp->num_queues = 1;
6275
6276 } else {
6277 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6278 /* user requested number */
6279 bp->num_queues = use_multi;
6280
6281 else if (use_multi)
6282 bp->num_queues = min_t(u32, num_online_cpus(),
6283 BP_MAX_QUEUES(bp));
6284 else
a2fbb9ea 6285 bp->num_queues = 1;
34f80b04
EG
6286
6287 if (bnx2x_enable_msix(bp)) {
6288 /* failed to enable MSI-X */
6289 bp->num_queues = 1;
6290 if (use_multi)
6291 BNX2X_ERR("Multi requested but failed"
6292 " to enable MSI-X\n");
a2fbb9ea
ET
6293 }
6294 }
34f80b04
EG
6295 DP(NETIF_MSG_IFUP,
6296 "set number of queues to %d\n", bp->num_queues);
c14423fe 6297
a2fbb9ea
ET
6298 if (bnx2x_alloc_mem(bp))
6299 return -ENOMEM;
6300
7a9b2557
VZ
6301 for_each_queue(bp, i)
6302 bnx2x_fp(bp, i, disable_tpa) =
6303 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6304
34f80b04
EG
6305 if (bp->flags & USING_MSIX_FLAG) {
6306 rc = bnx2x_req_msix_irqs(bp);
6307 if (rc) {
6308 pci_disable_msix(bp->pdev);
6309 goto load_error;
6310 }
6311 } else {
6312 bnx2x_ack_int(bp);
6313 rc = bnx2x_req_irq(bp);
6314 if (rc) {
6315 BNX2X_ERR("IRQ request failed, aborting\n");
6316 goto load_error;
a2fbb9ea
ET
6317 }
6318 }
6319
6320 for_each_queue(bp, i)
6321 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6322 bnx2x_poll, 128);
6323
a2fbb9ea 6324 /* Initialize HW */
34f80b04
EG
6325 rc = bnx2x_init_hw(bp, load_code);
6326 if (rc) {
a2fbb9ea 6327 BNX2X_ERR("HW init failed, aborting\n");
228241eb 6328 goto load_error;
a2fbb9ea
ET
6329 }
6330
a2fbb9ea 6331 /* Setup NIC internals and enable interrupts */
471de716 6332 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6333
6334 /* Send LOAD_DONE command to MCP */
34f80b04 6335 if (!BP_NOMCP(bp)) {
228241eb
ET
6336 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6337 if (!load_code) {
da5a662a 6338 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6339 rc = -EBUSY;
228241eb 6340 goto load_int_disable;
a2fbb9ea
ET
6341 }
6342 }
6343
bb2a0f7a
YG
6344 bnx2x_stats_init(bp);
6345
a2fbb9ea
ET
6346 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6347
6348 /* Enable Rx interrupt handling before sending the ramrod
6349 as it's completed on Rx FP queue */
6350 for_each_queue(bp, i)
6351 napi_enable(&bnx2x_fp(bp, i, napi));
6352
da5a662a
VZ
6353 /* Enable interrupt handling */
6354 atomic_set(&bp->intr_sem, 0);
6355
34f80b04
EG
6356 rc = bnx2x_setup_leading(bp);
6357 if (rc) {
da5a662a 6358 BNX2X_ERR("Setup leading failed!\n");
228241eb 6359 goto load_stop_netif;
34f80b04 6360 }
a2fbb9ea 6361
34f80b04
EG
6362 if (CHIP_IS_E1H(bp))
6363 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6364 BNX2X_ERR("!!! mf_cfg function disabled\n");
6365 bp->state = BNX2X_STATE_DISABLED;
6366 }
a2fbb9ea 6367
34f80b04
EG
6368 if (bp->state == BNX2X_STATE_OPEN)
6369 for_each_nondefault_queue(bp, i) {
6370 rc = bnx2x_setup_multi(bp, i);
6371 if (rc)
6372 goto load_stop_netif;
6373 }
a2fbb9ea 6374
34f80b04
EG
6375 if (CHIP_IS_E1(bp))
6376 bnx2x_set_mac_addr_e1(bp);
6377 else
6378 bnx2x_set_mac_addr_e1h(bp);
6379
6380 if (bp->port.pmf)
6381 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6382
6383 /* Start fast path */
34f80b04
EG
6384 switch (load_mode) {
6385 case LOAD_NORMAL:
6386 /* Tx queue should be only reenabled */
6387 netif_wake_queue(bp->dev);
6388 bnx2x_set_rx_mode(bp->dev);
6389 break;
6390
6391 case LOAD_OPEN:
a2fbb9ea 6392 netif_start_queue(bp->dev);
34f80b04 6393 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6394 if (bp->flags & USING_MSIX_FLAG)
6395 printk(KERN_INFO PFX "%s: using MSI-X\n",
6396 bp->dev->name);
34f80b04 6397 break;
a2fbb9ea 6398
34f80b04 6399 case LOAD_DIAG:
a2fbb9ea 6400 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6401 bp->state = BNX2X_STATE_DIAG;
6402 break;
6403
6404 default:
6405 break;
a2fbb9ea
ET
6406 }
6407
34f80b04
EG
6408 if (!bp->port.pmf)
6409 bnx2x__link_status_update(bp);
6410
a2fbb9ea
ET
6411 /* start the timer */
6412 mod_timer(&bp->timer, jiffies + bp->current_interval);
6413
34f80b04 6414
a2fbb9ea
ET
6415 return 0;
6416
228241eb 6417load_stop_netif:
a2fbb9ea
ET
6418 for_each_queue(bp, i)
6419 napi_disable(&bnx2x_fp(bp, i, napi));
6420
228241eb 6421load_int_disable:
615f8fd9 6422 bnx2x_int_disable_sync(bp);
a2fbb9ea 6423
34f80b04 6424 /* Release IRQs */
a2fbb9ea
ET
6425 bnx2x_free_irq(bp);
6426
7a9b2557
VZ
6427 /* Free SKBs, SGEs, TPA pool and driver internals */
6428 bnx2x_free_skbs(bp);
6429 for_each_queue(bp, i)
6430 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6431 RX_SGE_CNT*NUM_RX_SGE_PAGES);
228241eb 6432load_error:
a2fbb9ea
ET
6433 bnx2x_free_mem(bp);
6434
6435 /* TBD we really need to reset the chip
6436 if we want to recover from this */
34f80b04 6437 return rc;
a2fbb9ea
ET
6438}
6439
6440static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6441{
a2fbb9ea
ET
6442 int rc;
6443
c14423fe 6444 /* halt the connection */
a2fbb9ea
ET
6445 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6446 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6447
34f80b04 6448 /* Wait for completion */
a2fbb9ea 6449 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6450 &(bp->fp[index].state), 1);
c14423fe 6451 if (rc) /* timeout */
a2fbb9ea
ET
6452 return rc;
6453
6454 /* delete cfc entry */
6455 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6456
34f80b04
EG
6457 /* Wait for completion */
6458 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6459 &(bp->fp[index].state), 1);
6460 return rc;
a2fbb9ea
ET
6461}
6462
da5a662a 6463static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6464{
49d66772 6465 u16 dsb_sp_prod_idx;
c14423fe 6466 /* if the other port is handling traffic,
a2fbb9ea 6467 this can take a lot of time */
34f80b04
EG
6468 int cnt = 500;
6469 int rc;
a2fbb9ea
ET
6470
6471 might_sleep();
6472
6473 /* Send HALT ramrod */
6474 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6475 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6476
34f80b04
EG
6477 /* Wait for completion */
6478 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6479 &(bp->fp[0].state), 1);
6480 if (rc) /* timeout */
da5a662a 6481 return rc;
a2fbb9ea 6482
49d66772 6483 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6484
228241eb 6485 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6486 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6487
49d66772 6488 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6489 we are going to reset the chip anyway
6490 so there is not much to do if this times out
6491 */
34f80b04 6492 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
49d66772 6493 msleep(1);
34f80b04
EG
6494 if (!cnt) {
6495 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6496 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6497 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6498#ifdef BNX2X_STOP_ON_ERROR
6499 bnx2x_panic();
da5a662a
VZ
6500#else
6501 rc = -EBUSY;
34f80b04
EG
6502#endif
6503 break;
6504 }
6505 cnt--;
da5a662a 6506 msleep(1);
49d66772
ET
6507 }
6508 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6509 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6510
6511 return rc;
a2fbb9ea
ET
6512}
6513
34f80b04
EG
6514static void bnx2x_reset_func(struct bnx2x *bp)
6515{
6516 int port = BP_PORT(bp);
6517 int func = BP_FUNC(bp);
6518 int base, i;
6519
6520 /* Configure IGU */
6521 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6522 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6523
6524 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6525
6526 /* Clear ILT */
6527 base = FUNC_ILT_BASE(func);
6528 for (i = base; i < base + ILT_PER_FUNC; i++)
6529 bnx2x_ilt_wr(bp, i, 0);
6530}
6531
6532static void bnx2x_reset_port(struct bnx2x *bp)
6533{
6534 int port = BP_PORT(bp);
6535 u32 val;
6536
6537 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6538
6539 /* Do not rcv packets to BRB */
6540 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6541 /* Do not direct rcv packets that are not for MCP to the BRB */
6542 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6543 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6544
6545 /* Configure AEU */
6546 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6547
6548 msleep(100);
6549 /* Check for BRB port occupancy */
6550 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6551 if (val)
6552 DP(NETIF_MSG_IFDOWN,
6553 "BRB1 is not empty %d blooks are occupied\n", val);
6554
6555 /* TODO: Close Doorbell port? */
6556}
6557
6558static void bnx2x_reset_common(struct bnx2x *bp)
6559{
6560 /* reset_common */
6561 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6562 0xd3ffff7f);
6563 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6564}
6565
6566static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6567{
6568 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6569 BP_FUNC(bp), reset_code);
6570
6571 switch (reset_code) {
6572 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6573 bnx2x_reset_port(bp);
6574 bnx2x_reset_func(bp);
6575 bnx2x_reset_common(bp);
6576 break;
6577
6578 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6579 bnx2x_reset_port(bp);
6580 bnx2x_reset_func(bp);
6581 break;
6582
6583 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6584 bnx2x_reset_func(bp);
6585 break;
49d66772 6586
34f80b04
EG
6587 default:
6588 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6589 break;
6590 }
6591}
6592
6593/* msut be called with rtnl_lock */
6594static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6595{
da5a662a 6596 int port = BP_PORT(bp);
a2fbb9ea 6597 u32 reset_code = 0;
da5a662a 6598 int i, cnt, rc;
a2fbb9ea
ET
6599
6600 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6601
228241eb
ET
6602 bp->rx_mode = BNX2X_RX_MODE_NONE;
6603 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6604
228241eb
ET
6605 if (netif_running(bp->dev)) {
6606 netif_tx_disable(bp->dev);
6607 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6608 }
6609
34f80b04
EG
6610 del_timer_sync(&bp->timer);
6611 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6612 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6613 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6614
da5a662a 6615 /* Wait until tx fast path tasks complete */
228241eb
ET
6616 for_each_queue(bp, i) {
6617 struct bnx2x_fastpath *fp = &bp->fp[i];
6618
34f80b04
EG
6619 cnt = 1000;
6620 smp_rmb();
da5a662a
VZ
6621 while (BNX2X_HAS_TX_WORK(fp)) {
6622
6623 if (!netif_running(bp->dev))
6624 bnx2x_tx_int(fp, 1000);
6625
34f80b04
EG
6626 if (!cnt) {
6627 BNX2X_ERR("timeout waiting for queue[%d]\n",
6628 i);
6629#ifdef BNX2X_STOP_ON_ERROR
6630 bnx2x_panic();
6631 return -EBUSY;
6632#else
6633 break;
6634#endif
6635 }
6636 cnt--;
da5a662a 6637 msleep(1);
34f80b04
EG
6638 smp_rmb();
6639 }
228241eb 6640 }
a2fbb9ea 6641
da5a662a
VZ
6642 /* Give HW time to discard old tx messages */
6643 msleep(1);
a2fbb9ea 6644
228241eb
ET
6645 for_each_queue(bp, i)
6646 napi_disable(&bnx2x_fp(bp, i, napi));
6647 /* Disable interrupts after Tx and Rx are disabled on stack level */
6648 bnx2x_int_disable_sync(bp);
a2fbb9ea 6649
34f80b04
EG
6650 /* Release IRQs */
6651 bnx2x_free_irq(bp);
6652
da5a662a
VZ
6653 if (unload_mode == UNLOAD_NORMAL)
6654 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6655
6656 else if (bp->flags & NO_WOL_FLAG) {
a2fbb9ea 6657 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
da5a662a
VZ
6658 if (CHIP_IS_E1H(bp))
6659 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
228241eb 6660
da5a662a
VZ
6661 } else if (bp->wol) {
6662 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6663 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6664 u32 val;
34f80b04
EG
6665 /* The mac address is written to entries 1-4 to
6666 preserve entry 0 which is used by the PMF */
da5a662a
VZ
6667 u8 entry = (BP_E1HVN(bp) + 1)*8;
6668
a2fbb9ea 6669 val = (mac_addr[0] << 8) | mac_addr[1];
da5a662a 6670 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
a2fbb9ea
ET
6671
6672 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6673 (mac_addr[4] << 8) | mac_addr[5];
da5a662a 6674 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
a2fbb9ea
ET
6675
6676 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6677
a2fbb9ea
ET
6678 } else
6679 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6680
da5a662a
VZ
6681 if (CHIP_IS_E1H(bp))
6682 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6683
34f80b04
EG
6684 /* Close multi and leading connections
6685 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6686 for_each_nondefault_queue(bp, i)
6687 if (bnx2x_stop_multi(bp, i))
228241eb 6688 goto unload_error;
a2fbb9ea 6689
da5a662a
VZ
6690 rc = bnx2x_stop_leading(bp);
6691 if (rc) {
34f80b04 6692 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6693#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6694 return -EBUSY;
da5a662a
VZ
6695#else
6696 goto unload_error;
34f80b04 6697#endif
228241eb
ET
6698 }
6699
6700unload_error:
34f80b04 6701 if (!BP_NOMCP(bp))
228241eb 6702 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6703 else {
6704 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6705 load_count[0], load_count[1], load_count[2]);
6706 load_count[0]--;
da5a662a 6707 load_count[1 + port]--;
34f80b04
EG
6708 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6709 load_count[0], load_count[1], load_count[2]);
6710 if (load_count[0] == 0)
6711 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6712 else if (load_count[1 + port] == 0)
34f80b04
EG
6713 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6714 else
6715 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6716 }
a2fbb9ea 6717
34f80b04
EG
6718 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6719 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6720 bnx2x__link_reset(bp);
a2fbb9ea
ET
6721
6722 /* Reset the chip */
228241eb 6723 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6724
6725 /* Report UNLOAD_DONE to MCP */
34f80b04 6726 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6727 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6728
7a9b2557 6729 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6730 bnx2x_free_skbs(bp);
7a9b2557
VZ
6731 for_each_queue(bp, i)
6732 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6733 RX_SGE_CNT*NUM_RX_SGE_PAGES);
a2fbb9ea
ET
6734 bnx2x_free_mem(bp);
6735
6736 bp->state = BNX2X_STATE_CLOSED;
228241eb 6737
a2fbb9ea
ET
6738 netif_carrier_off(bp->dev);
6739
6740 return 0;
6741}
6742
34f80b04
EG
6743static void bnx2x_reset_task(struct work_struct *work)
6744{
6745 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6746
6747#ifdef BNX2X_STOP_ON_ERROR
6748 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6749 " so reset not done to allow debug dump,\n"
6750 KERN_ERR " you will need to reboot when done\n");
6751 return;
6752#endif
6753
6754 rtnl_lock();
6755
6756 if (!netif_running(bp->dev))
6757 goto reset_task_exit;
6758
6759 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6760 bnx2x_nic_load(bp, LOAD_NORMAL);
6761
6762reset_task_exit:
6763 rtnl_unlock();
6764}
6765
a2fbb9ea
ET
6766/* end of nic load/unload */
6767
6768/* ethtool_ops */
6769
6770/*
6771 * Init service functions
6772 */
6773
34f80b04
EG
6774static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6775{
6776 u32 val;
6777
6778 /* Check if there is any driver already loaded */
6779 val = REG_RD(bp, MISC_REG_UNPREPARED);
6780 if (val == 0x1) {
6781 /* Check if it is the UNDI driver
6782 * UNDI driver initializes CID offset for normal bell to 0x7
6783 */
4a37fb66 6784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6785 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6786 if (val == 0x7) {
6787 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6788 /* save our func */
34f80b04 6789 int func = BP_FUNC(bp);
da5a662a
VZ
6790 u32 swap_en;
6791 u32 swap_val;
34f80b04
EG
6792
6793 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6794
6795 /* try unload UNDI on port 0 */
6796 bp->func = 0;
da5a662a
VZ
6797 bp->fw_seq =
6798 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6799 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6800 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6801
6802 /* if UNDI is loaded on the other port */
6803 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6804
da5a662a
VZ
6805 /* send "DONE" for previous unload */
6806 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6807
6808 /* unload UNDI on port 1 */
34f80b04 6809 bp->func = 1;
da5a662a
VZ
6810 bp->fw_seq =
6811 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6812 DRV_MSG_SEQ_NUMBER_MASK);
6813 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6814
6815 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6816 }
6817
da5a662a
VZ
6818 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6819 HC_REG_CONFIG_0), 0x1000);
6820
6821 /* close input traffic and wait for it */
6822 /* Do not rcv packets to BRB */
6823 REG_WR(bp,
6824 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6825 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6826 /* Do not direct rcv packets that are not for MCP to
6827 * the BRB */
6828 REG_WR(bp,
6829 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6830 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6831 /* clear AEU */
6832 REG_WR(bp,
6833 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6834 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6835 msleep(10);
6836
6837 /* save NIG port swap info */
6838 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6839 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6840 /* reset device */
6841 REG_WR(bp,
6842 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6843 0xd3ffffff);
34f80b04
EG
6844 REG_WR(bp,
6845 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6846 0x1403);
da5a662a
VZ
6847 /* take the NIG out of reset and restore swap values */
6848 REG_WR(bp,
6849 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6850 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6851 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6852 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6853
6854 /* send unload done to the MCP */
6855 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6856
6857 /* restore our func and fw_seq */
6858 bp->func = func;
6859 bp->fw_seq =
6860 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6861 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6862 }
4a37fb66 6863 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6864 }
6865}
6866
6867static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6868{
6869 u32 val, val2, val3, val4, id;
6870
6871 /* Get the chip revision id and number. */
6872 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6873 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6874 id = ((val & 0xffff) << 16);
6875 val = REG_RD(bp, MISC_REG_CHIP_REV);
6876 id |= ((val & 0xf) << 12);
6877 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6878 id |= ((val & 0xff) << 4);
6879 REG_RD(bp, MISC_REG_BOND_ID);
6880 id |= (val & 0xf);
6881 bp->common.chip_id = id;
6882 bp->link_params.chip_id = bp->common.chip_id;
6883 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6884
6885 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6886 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6887 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6888 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6889 bp->common.flash_size, bp->common.flash_size);
6890
6891 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6892 bp->link_params.shmem_base = bp->common.shmem_base;
6893 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6894
6895 if (!bp->common.shmem_base ||
6896 (bp->common.shmem_base < 0xA0000) ||
6897 (bp->common.shmem_base >= 0xC0000)) {
6898 BNX2X_DEV_INFO("MCP not active\n");
6899 bp->flags |= NO_MCP_FLAG;
6900 return;
6901 }
6902
6903 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6904 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6905 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6906 BNX2X_ERR("BAD MCP validity signature\n");
6907
6908 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6909 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6910
6911 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6912 bp->common.hw_config, bp->common.board);
6913
6914 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6915 SHARED_HW_CFG_LED_MODE_MASK) >>
6916 SHARED_HW_CFG_LED_MODE_SHIFT);
6917
6918 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6919 bp->common.bc_ver = val;
6920 BNX2X_DEV_INFO("bc_ver %X\n", val);
6921 if (val < BNX2X_BC_VER) {
6922 /* for now only warn
6923 * later we might need to enforce this */
6924 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6925 " please upgrade BC\n", BNX2X_BC_VER, val);
6926 }
6927 BNX2X_DEV_INFO("%sWoL Capable\n",
6928 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6929
6930 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6931 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6932 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6933 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6934
6935 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6936 val, val2, val3, val4);
6937}
6938
6939static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6940 u32 switch_cfg)
a2fbb9ea 6941{
34f80b04 6942 int port = BP_PORT(bp);
a2fbb9ea
ET
6943 u32 ext_phy_type;
6944
a2fbb9ea
ET
6945 switch (switch_cfg) {
6946 case SWITCH_CFG_1G:
6947 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6948
c18487ee
YR
6949 ext_phy_type =
6950 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6951 switch (ext_phy_type) {
6952 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6953 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6954 ext_phy_type);
6955
34f80b04
EG
6956 bp->port.supported |= (SUPPORTED_10baseT_Half |
6957 SUPPORTED_10baseT_Full |
6958 SUPPORTED_100baseT_Half |
6959 SUPPORTED_100baseT_Full |
6960 SUPPORTED_1000baseT_Full |
6961 SUPPORTED_2500baseX_Full |
6962 SUPPORTED_TP |
6963 SUPPORTED_FIBRE |
6964 SUPPORTED_Autoneg |
6965 SUPPORTED_Pause |
6966 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6967 break;
6968
6969 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6970 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6971 ext_phy_type);
6972
34f80b04
EG
6973 bp->port.supported |= (SUPPORTED_10baseT_Half |
6974 SUPPORTED_10baseT_Full |
6975 SUPPORTED_100baseT_Half |
6976 SUPPORTED_100baseT_Full |
6977 SUPPORTED_1000baseT_Full |
6978 SUPPORTED_TP |
6979 SUPPORTED_FIBRE |
6980 SUPPORTED_Autoneg |
6981 SUPPORTED_Pause |
6982 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6983 break;
6984
6985 default:
6986 BNX2X_ERR("NVRAM config error. "
6987 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6988 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6989 return;
6990 }
6991
34f80b04
EG
6992 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6993 port*0x10);
6994 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6995 break;
6996
6997 case SWITCH_CFG_10G:
6998 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6999
c18487ee
YR
7000 ext_phy_type =
7001 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7002 switch (ext_phy_type) {
7003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7004 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7005 ext_phy_type);
7006
34f80b04
EG
7007 bp->port.supported |= (SUPPORTED_10baseT_Half |
7008 SUPPORTED_10baseT_Full |
7009 SUPPORTED_100baseT_Half |
7010 SUPPORTED_100baseT_Full |
7011 SUPPORTED_1000baseT_Full |
7012 SUPPORTED_2500baseX_Full |
7013 SUPPORTED_10000baseT_Full |
7014 SUPPORTED_TP |
7015 SUPPORTED_FIBRE |
7016 SUPPORTED_Autoneg |
7017 SUPPORTED_Pause |
7018 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7019 break;
7020
7021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7022 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7023 ext_phy_type);
f1410647 7024
34f80b04
EG
7025 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7026 SUPPORTED_FIBRE |
7027 SUPPORTED_Pause |
7028 SUPPORTED_Asym_Pause);
f1410647
ET
7029 break;
7030
a2fbb9ea 7031 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7032 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7033 ext_phy_type);
7034
34f80b04
EG
7035 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7036 SUPPORTED_1000baseT_Full |
7037 SUPPORTED_FIBRE |
7038 SUPPORTED_Pause |
7039 SUPPORTED_Asym_Pause);
f1410647
ET
7040 break;
7041
7042 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7043 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7044 ext_phy_type);
7045
34f80b04
EG
7046 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7047 SUPPORTED_1000baseT_Full |
7048 SUPPORTED_FIBRE |
7049 SUPPORTED_Autoneg |
7050 SUPPORTED_Pause |
7051 SUPPORTED_Asym_Pause);
f1410647
ET
7052 break;
7053
c18487ee
YR
7054 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7055 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7056 ext_phy_type);
7057
34f80b04
EG
7058 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7059 SUPPORTED_2500baseX_Full |
7060 SUPPORTED_1000baseT_Full |
7061 SUPPORTED_FIBRE |
7062 SUPPORTED_Autoneg |
7063 SUPPORTED_Pause |
7064 SUPPORTED_Asym_Pause);
c18487ee
YR
7065 break;
7066
f1410647
ET
7067 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7068 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7069 ext_phy_type);
7070
34f80b04
EG
7071 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7072 SUPPORTED_TP |
7073 SUPPORTED_Autoneg |
7074 SUPPORTED_Pause |
7075 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7076 break;
7077
c18487ee
YR
7078 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7079 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7080 bp->link_params.ext_phy_config);
7081 break;
7082
a2fbb9ea
ET
7083 default:
7084 BNX2X_ERR("NVRAM config error. "
7085 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7086 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7087 return;
7088 }
7089
34f80b04
EG
7090 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7091 port*0x18);
7092 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7093
a2fbb9ea
ET
7094 break;
7095
7096 default:
7097 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7098 bp->port.link_config);
a2fbb9ea
ET
7099 return;
7100 }
34f80b04 7101 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7102
7103 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7104 if (!(bp->link_params.speed_cap_mask &
7105 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7106 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7107
c18487ee
YR
7108 if (!(bp->link_params.speed_cap_mask &
7109 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7110 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7111
c18487ee
YR
7112 if (!(bp->link_params.speed_cap_mask &
7113 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7114 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7115
c18487ee
YR
7116 if (!(bp->link_params.speed_cap_mask &
7117 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7118 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7119
c18487ee
YR
7120 if (!(bp->link_params.speed_cap_mask &
7121 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7122 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7123 SUPPORTED_1000baseT_Full);
a2fbb9ea 7124
c18487ee
YR
7125 if (!(bp->link_params.speed_cap_mask &
7126 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7127 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7128
c18487ee
YR
7129 if (!(bp->link_params.speed_cap_mask &
7130 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7131 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7132
34f80b04 7133 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7134}
7135
34f80b04 7136static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7137{
c18487ee 7138 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7139
34f80b04 7140 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7141 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7142 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7143 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7144 bp->port.advertising = bp->port.supported;
a2fbb9ea 7145 } else {
c18487ee
YR
7146 u32 ext_phy_type =
7147 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7148
7149 if ((ext_phy_type ==
7150 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7151 (ext_phy_type ==
7152 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7153 /* force 10G, no AN */
c18487ee 7154 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7155 bp->port.advertising =
a2fbb9ea
ET
7156 (ADVERTISED_10000baseT_Full |
7157 ADVERTISED_FIBRE);
7158 break;
7159 }
7160 BNX2X_ERR("NVRAM config error. "
7161 "Invalid link_config 0x%x"
7162 " Autoneg not supported\n",
34f80b04 7163 bp->port.link_config);
a2fbb9ea
ET
7164 return;
7165 }
7166 break;
7167
7168 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7169 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7170 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7171 bp->port.advertising = (ADVERTISED_10baseT_Full |
7172 ADVERTISED_TP);
a2fbb9ea
ET
7173 } else {
7174 BNX2X_ERR("NVRAM config error. "
7175 "Invalid link_config 0x%x"
7176 " speed_cap_mask 0x%x\n",
34f80b04 7177 bp->port.link_config,
c18487ee 7178 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7179 return;
7180 }
7181 break;
7182
7183 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7184 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7185 bp->link_params.req_line_speed = SPEED_10;
7186 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7187 bp->port.advertising = (ADVERTISED_10baseT_Half |
7188 ADVERTISED_TP);
a2fbb9ea
ET
7189 } else {
7190 BNX2X_ERR("NVRAM config error. "
7191 "Invalid link_config 0x%x"
7192 " speed_cap_mask 0x%x\n",
34f80b04 7193 bp->port.link_config,
c18487ee 7194 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7195 return;
7196 }
7197 break;
7198
7199 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7200 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7201 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7202 bp->port.advertising = (ADVERTISED_100baseT_Full |
7203 ADVERTISED_TP);
a2fbb9ea
ET
7204 } else {
7205 BNX2X_ERR("NVRAM config error. "
7206 "Invalid link_config 0x%x"
7207 " speed_cap_mask 0x%x\n",
34f80b04 7208 bp->port.link_config,
c18487ee 7209 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7210 return;
7211 }
7212 break;
7213
7214 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7215 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7216 bp->link_params.req_line_speed = SPEED_100;
7217 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7218 bp->port.advertising = (ADVERTISED_100baseT_Half |
7219 ADVERTISED_TP);
a2fbb9ea
ET
7220 } else {
7221 BNX2X_ERR("NVRAM config error. "
7222 "Invalid link_config 0x%x"
7223 " speed_cap_mask 0x%x\n",
34f80b04 7224 bp->port.link_config,
c18487ee 7225 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7226 return;
7227 }
7228 break;
7229
7230 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7231 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7232 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7233 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7234 ADVERTISED_TP);
a2fbb9ea
ET
7235 } else {
7236 BNX2X_ERR("NVRAM config error. "
7237 "Invalid link_config 0x%x"
7238 " speed_cap_mask 0x%x\n",
34f80b04 7239 bp->port.link_config,
c18487ee 7240 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7241 return;
7242 }
7243 break;
7244
7245 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7246 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7247 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7248 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7249 ADVERTISED_TP);
a2fbb9ea
ET
7250 } else {
7251 BNX2X_ERR("NVRAM config error. "
7252 "Invalid link_config 0x%x"
7253 " speed_cap_mask 0x%x\n",
34f80b04 7254 bp->port.link_config,
c18487ee 7255 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7256 return;
7257 }
7258 break;
7259
7260 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7261 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7262 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7263 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7264 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7265 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7266 ADVERTISED_FIBRE);
a2fbb9ea
ET
7267 } else {
7268 BNX2X_ERR("NVRAM config error. "
7269 "Invalid link_config 0x%x"
7270 " speed_cap_mask 0x%x\n",
34f80b04 7271 bp->port.link_config,
c18487ee 7272 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7273 return;
7274 }
7275 break;
7276
7277 default:
7278 BNX2X_ERR("NVRAM config error. "
7279 "BAD link speed link_config 0x%x\n",
34f80b04 7280 bp->port.link_config);
c18487ee 7281 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7282 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7283 break;
7284 }
a2fbb9ea 7285
34f80b04
EG
7286 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7287 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7288 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7289 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7290 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7291
c18487ee 7292 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7293 " advertising 0x%x\n",
c18487ee
YR
7294 bp->link_params.req_line_speed,
7295 bp->link_params.req_duplex,
34f80b04 7296 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7297}
7298
34f80b04 7299static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7300{
34f80b04
EG
7301 int port = BP_PORT(bp);
7302 u32 val, val2;
a2fbb9ea 7303
c18487ee 7304 bp->link_params.bp = bp;
34f80b04 7305 bp->link_params.port = port;
c18487ee 7306
c18487ee 7307 bp->link_params.serdes_config =
f1410647 7308 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7309 bp->link_params.lane_config =
a2fbb9ea 7310 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7311 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7312 SHMEM_RD(bp,
7313 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7314 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7315 SHMEM_RD(bp,
7316 dev_info.port_hw_config[port].speed_capability_mask);
7317
34f80b04 7318 bp->port.link_config =
a2fbb9ea
ET
7319 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7320
34f80b04
EG
7321 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7322 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7323 " link_config 0x%08x\n",
c18487ee
YR
7324 bp->link_params.serdes_config,
7325 bp->link_params.lane_config,
7326 bp->link_params.ext_phy_config,
34f80b04 7327 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7328
34f80b04 7329 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7330 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7331 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7332
7333 bnx2x_link_settings_requested(bp);
7334
7335 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7336 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7337 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7338 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7339 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7340 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7341 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7342 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7343 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7344 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7345}
7346
7347static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7348{
7349 int func = BP_FUNC(bp);
7350 u32 val, val2;
7351 int rc = 0;
a2fbb9ea 7352
34f80b04 7353 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7354
34f80b04
EG
7355 bp->e1hov = 0;
7356 bp->e1hmf = 0;
7357 if (CHIP_IS_E1H(bp)) {
7358 bp->mf_config =
7359 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7360
34f80b04
EG
7361 val =
7362 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7363 FUNC_MF_CFG_E1HOV_TAG_MASK);
7364 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7365
34f80b04
EG
7366 bp->e1hov = val;
7367 bp->e1hmf = 1;
7368 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7369 "(0x%04x)\n",
7370 func, bp->e1hov, bp->e1hov);
7371 } else {
7372 BNX2X_DEV_INFO("Single function mode\n");
7373 if (BP_E1HVN(bp)) {
7374 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7375 " aborting\n", func);
7376 rc = -EPERM;
7377 }
7378 }
7379 }
a2fbb9ea 7380
34f80b04
EG
7381 if (!BP_NOMCP(bp)) {
7382 bnx2x_get_port_hwinfo(bp);
7383
7384 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7385 DRV_MSG_SEQ_NUMBER_MASK);
7386 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7387 }
7388
7389 if (IS_E1HMF(bp)) {
7390 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7391 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7392 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7393 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7394 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7395 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7396 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7397 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7398 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7399 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7400 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7401 ETH_ALEN);
7402 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7403 ETH_ALEN);
a2fbb9ea 7404 }
34f80b04
EG
7405
7406 return rc;
a2fbb9ea
ET
7407 }
7408
34f80b04
EG
7409 if (BP_NOMCP(bp)) {
7410 /* only supposed to happen on emulation/FPGA */
7411 BNX2X_ERR("warning rendom MAC workaround active\n");
7412 random_ether_addr(bp->dev->dev_addr);
7413 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7414 }
a2fbb9ea 7415
34f80b04
EG
7416 return rc;
7417}
7418
7419static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7420{
7421 int func = BP_FUNC(bp);
7422 int rc;
7423
da5a662a
VZ
7424 /* Disable interrupt handling until HW is initialized */
7425 atomic_set(&bp->intr_sem, 1);
7426
34f80b04 7427 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7428
34f80b04
EG
7429 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7430 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7431
7432 rc = bnx2x_get_hwinfo(bp);
7433
7434 /* need to reset chip if undi was active */
7435 if (!BP_NOMCP(bp))
7436 bnx2x_undi_unload(bp);
7437
7438 if (CHIP_REV_IS_FPGA(bp))
7439 printk(KERN_ERR PFX "FPGA detected\n");
7440
7441 if (BP_NOMCP(bp) && (func == 0))
7442 printk(KERN_ERR PFX
7443 "MCP disabled, must load devices in order!\n");
7444
7a9b2557
VZ
7445 /* Set TPA flags */
7446 if (disable_tpa) {
7447 bp->flags &= ~TPA_ENABLE_FLAG;
7448 bp->dev->features &= ~NETIF_F_LRO;
7449 } else {
7450 bp->flags |= TPA_ENABLE_FLAG;
7451 bp->dev->features |= NETIF_F_LRO;
7452 }
7453
7454
34f80b04
EG
7455 bp->tx_ring_size = MAX_TX_AVAIL;
7456 bp->rx_ring_size = MAX_RX_AVAIL;
7457
7458 bp->rx_csum = 1;
7459 bp->rx_offset = 0;
7460
7461 bp->tx_ticks = 50;
7462 bp->rx_ticks = 25;
7463
34f80b04
EG
7464 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7465 bp->current_interval = (poll ? poll : bp->timer_interval);
7466
7467 init_timer(&bp->timer);
7468 bp->timer.expires = jiffies + bp->current_interval;
7469 bp->timer.data = (unsigned long) bp;
7470 bp->timer.function = bnx2x_timer;
7471
7472 return rc;
a2fbb9ea
ET
7473}
7474
7475/*
7476 * ethtool service functions
7477 */
7478
7479/* All ethtool functions called with rtnl_lock */
7480
7481static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7482{
7483 struct bnx2x *bp = netdev_priv(dev);
7484
34f80b04
EG
7485 cmd->supported = bp->port.supported;
7486 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7487
7488 if (netif_carrier_ok(dev)) {
c18487ee
YR
7489 cmd->speed = bp->link_vars.line_speed;
7490 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7491 } else {
c18487ee
YR
7492 cmd->speed = bp->link_params.req_line_speed;
7493 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7494 }
34f80b04
EG
7495 if (IS_E1HMF(bp)) {
7496 u16 vn_max_rate;
7497
7498 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7499 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7500 if (vn_max_rate < cmd->speed)
7501 cmd->speed = vn_max_rate;
7502 }
a2fbb9ea 7503
c18487ee
YR
7504 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7505 u32 ext_phy_type =
7506 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7507
7508 switch (ext_phy_type) {
7509 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7510 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7511 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7512 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7513 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7514 cmd->port = PORT_FIBRE;
7515 break;
7516
7517 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7518 cmd->port = PORT_TP;
7519 break;
7520
c18487ee
YR
7521 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7522 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7523 bp->link_params.ext_phy_config);
7524 break;
7525
f1410647
ET
7526 default:
7527 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7528 bp->link_params.ext_phy_config);
7529 break;
f1410647
ET
7530 }
7531 } else
a2fbb9ea 7532 cmd->port = PORT_TP;
a2fbb9ea 7533
34f80b04 7534 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7535 cmd->transceiver = XCVR_INTERNAL;
7536
c18487ee 7537 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7538 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7539 else
a2fbb9ea 7540 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7541
7542 cmd->maxtxpkt = 0;
7543 cmd->maxrxpkt = 0;
7544
7545 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7546 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7547 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7548 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7549 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7550 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7551 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7552
7553 return 0;
7554}
7555
7556static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7557{
7558 struct bnx2x *bp = netdev_priv(dev);
7559 u32 advertising;
7560
34f80b04
EG
7561 if (IS_E1HMF(bp))
7562 return 0;
7563
a2fbb9ea
ET
7564 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7565 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7566 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7567 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7568 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7569 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7570 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7571
a2fbb9ea 7572 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7573 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7574 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7575 return -EINVAL;
f1410647 7576 }
a2fbb9ea
ET
7577
7578 /* advertise the requested speed and duplex if supported */
34f80b04 7579 cmd->advertising &= bp->port.supported;
a2fbb9ea 7580
c18487ee
YR
7581 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7582 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7583 bp->port.advertising |= (ADVERTISED_Autoneg |
7584 cmd->advertising);
a2fbb9ea
ET
7585
7586 } else { /* forced speed */
7587 /* advertise the requested speed and duplex if supported */
7588 switch (cmd->speed) {
7589 case SPEED_10:
7590 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7591 if (!(bp->port.supported &
f1410647
ET
7592 SUPPORTED_10baseT_Full)) {
7593 DP(NETIF_MSG_LINK,
7594 "10M full not supported\n");
a2fbb9ea 7595 return -EINVAL;
f1410647 7596 }
a2fbb9ea
ET
7597
7598 advertising = (ADVERTISED_10baseT_Full |
7599 ADVERTISED_TP);
7600 } else {
34f80b04 7601 if (!(bp->port.supported &
f1410647
ET
7602 SUPPORTED_10baseT_Half)) {
7603 DP(NETIF_MSG_LINK,
7604 "10M half not supported\n");
a2fbb9ea 7605 return -EINVAL;
f1410647 7606 }
a2fbb9ea
ET
7607
7608 advertising = (ADVERTISED_10baseT_Half |
7609 ADVERTISED_TP);
7610 }
7611 break;
7612
7613 case SPEED_100:
7614 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7615 if (!(bp->port.supported &
f1410647
ET
7616 SUPPORTED_100baseT_Full)) {
7617 DP(NETIF_MSG_LINK,
7618 "100M full not supported\n");
a2fbb9ea 7619 return -EINVAL;
f1410647 7620 }
a2fbb9ea
ET
7621
7622 advertising = (ADVERTISED_100baseT_Full |
7623 ADVERTISED_TP);
7624 } else {
34f80b04 7625 if (!(bp->port.supported &
f1410647
ET
7626 SUPPORTED_100baseT_Half)) {
7627 DP(NETIF_MSG_LINK,
7628 "100M half not supported\n");
a2fbb9ea 7629 return -EINVAL;
f1410647 7630 }
a2fbb9ea
ET
7631
7632 advertising = (ADVERTISED_100baseT_Half |
7633 ADVERTISED_TP);
7634 }
7635 break;
7636
7637 case SPEED_1000:
f1410647
ET
7638 if (cmd->duplex != DUPLEX_FULL) {
7639 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7640 return -EINVAL;
f1410647 7641 }
a2fbb9ea 7642
34f80b04 7643 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7644 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7645 return -EINVAL;
f1410647 7646 }
a2fbb9ea
ET
7647
7648 advertising = (ADVERTISED_1000baseT_Full |
7649 ADVERTISED_TP);
7650 break;
7651
7652 case SPEED_2500:
f1410647
ET
7653 if (cmd->duplex != DUPLEX_FULL) {
7654 DP(NETIF_MSG_LINK,
7655 "2.5G half not supported\n");
a2fbb9ea 7656 return -EINVAL;
f1410647 7657 }
a2fbb9ea 7658
34f80b04 7659 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7660 DP(NETIF_MSG_LINK,
7661 "2.5G full not supported\n");
a2fbb9ea 7662 return -EINVAL;
f1410647 7663 }
a2fbb9ea 7664
f1410647 7665 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7666 ADVERTISED_TP);
7667 break;
7668
7669 case SPEED_10000:
f1410647
ET
7670 if (cmd->duplex != DUPLEX_FULL) {
7671 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7672 return -EINVAL;
f1410647 7673 }
a2fbb9ea 7674
34f80b04 7675 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7676 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7677 return -EINVAL;
f1410647 7678 }
a2fbb9ea
ET
7679
7680 advertising = (ADVERTISED_10000baseT_Full |
7681 ADVERTISED_FIBRE);
7682 break;
7683
7684 default:
f1410647 7685 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7686 return -EINVAL;
7687 }
7688
c18487ee
YR
7689 bp->link_params.req_line_speed = cmd->speed;
7690 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7691 bp->port.advertising = advertising;
a2fbb9ea
ET
7692 }
7693
c18487ee 7694 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7695 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7696 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7697 bp->port.advertising);
a2fbb9ea 7698
34f80b04 7699 if (netif_running(dev)) {
bb2a0f7a 7700 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7701 bnx2x_link_set(bp);
7702 }
a2fbb9ea
ET
7703
7704 return 0;
7705}
7706
c18487ee
YR
7707#define PHY_FW_VER_LEN 10
7708
a2fbb9ea
ET
7709static void bnx2x_get_drvinfo(struct net_device *dev,
7710 struct ethtool_drvinfo *info)
7711{
7712 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7713 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7714
7715 strcpy(info->driver, DRV_MODULE_NAME);
7716 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7717
7718 phy_fw_ver[0] = '\0';
34f80b04 7719 if (bp->port.pmf) {
4a37fb66 7720 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7721 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7722 (bp->state != BNX2X_STATE_CLOSED),
7723 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7724 bnx2x_release_phy_lock(bp);
34f80b04 7725 }
c18487ee
YR
7726
7727 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7728 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7729 BCM_5710_FW_REVISION_VERSION,
34f80b04 7730 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7731 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7732 strcpy(info->bus_info, pci_name(bp->pdev));
7733 info->n_stats = BNX2X_NUM_STATS;
7734 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7735 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7736 info->regdump_len = 0;
7737}
7738
7739static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7740{
7741 struct bnx2x *bp = netdev_priv(dev);
7742
7743 if (bp->flags & NO_WOL_FLAG) {
7744 wol->supported = 0;
7745 wol->wolopts = 0;
7746 } else {
7747 wol->supported = WAKE_MAGIC;
7748 if (bp->wol)
7749 wol->wolopts = WAKE_MAGIC;
7750 else
7751 wol->wolopts = 0;
7752 }
7753 memset(&wol->sopass, 0, sizeof(wol->sopass));
7754}
7755
7756static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7757{
7758 struct bnx2x *bp = netdev_priv(dev);
7759
7760 if (wol->wolopts & ~WAKE_MAGIC)
7761 return -EINVAL;
7762
7763 if (wol->wolopts & WAKE_MAGIC) {
7764 if (bp->flags & NO_WOL_FLAG)
7765 return -EINVAL;
7766
7767 bp->wol = 1;
34f80b04 7768 } else
a2fbb9ea 7769 bp->wol = 0;
34f80b04 7770
a2fbb9ea
ET
7771 return 0;
7772}
7773
7774static u32 bnx2x_get_msglevel(struct net_device *dev)
7775{
7776 struct bnx2x *bp = netdev_priv(dev);
7777
7778 return bp->msglevel;
7779}
7780
7781static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7782{
7783 struct bnx2x *bp = netdev_priv(dev);
7784
7785 if (capable(CAP_NET_ADMIN))
7786 bp->msglevel = level;
7787}
7788
7789static int bnx2x_nway_reset(struct net_device *dev)
7790{
7791 struct bnx2x *bp = netdev_priv(dev);
7792
34f80b04
EG
7793 if (!bp->port.pmf)
7794 return 0;
a2fbb9ea 7795
34f80b04 7796 if (netif_running(dev)) {
bb2a0f7a 7797 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7798 bnx2x_link_set(bp);
7799 }
a2fbb9ea
ET
7800
7801 return 0;
7802}
7803
7804static int bnx2x_get_eeprom_len(struct net_device *dev)
7805{
7806 struct bnx2x *bp = netdev_priv(dev);
7807
34f80b04 7808 return bp->common.flash_size;
a2fbb9ea
ET
7809}
7810
7811static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7812{
34f80b04 7813 int port = BP_PORT(bp);
a2fbb9ea
ET
7814 int count, i;
7815 u32 val = 0;
7816
7817 /* adjust timeout for emulation/FPGA */
7818 count = NVRAM_TIMEOUT_COUNT;
7819 if (CHIP_REV_IS_SLOW(bp))
7820 count *= 100;
7821
7822 /* request access to nvram interface */
7823 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7824 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7825
7826 for (i = 0; i < count*10; i++) {
7827 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7828 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7829 break;
7830
7831 udelay(5);
7832 }
7833
7834 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7835 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7836 return -EBUSY;
7837 }
7838
7839 return 0;
7840}
7841
7842static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7843{
34f80b04 7844 int port = BP_PORT(bp);
a2fbb9ea
ET
7845 int count, i;
7846 u32 val = 0;
7847
7848 /* adjust timeout for emulation/FPGA */
7849 count = NVRAM_TIMEOUT_COUNT;
7850 if (CHIP_REV_IS_SLOW(bp))
7851 count *= 100;
7852
7853 /* relinquish nvram interface */
7854 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7855 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7856
7857 for (i = 0; i < count*10; i++) {
7858 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7859 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7860 break;
7861
7862 udelay(5);
7863 }
7864
7865 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7866 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7867 return -EBUSY;
7868 }
7869
7870 return 0;
7871}
7872
7873static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7874{
7875 u32 val;
7876
7877 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7878
7879 /* enable both bits, even on read */
7880 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7881 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7882 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7883}
7884
7885static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7886{
7887 u32 val;
7888
7889 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7890
7891 /* disable both bits, even after read */
7892 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7893 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7894 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7895}
7896
7897static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7898 u32 cmd_flags)
7899{
f1410647 7900 int count, i, rc;
a2fbb9ea
ET
7901 u32 val;
7902
7903 /* build the command word */
7904 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7905
7906 /* need to clear DONE bit separately */
7907 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7908
7909 /* address of the NVRAM to read from */
7910 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7911 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7912
7913 /* issue a read command */
7914 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7915
7916 /* adjust timeout for emulation/FPGA */
7917 count = NVRAM_TIMEOUT_COUNT;
7918 if (CHIP_REV_IS_SLOW(bp))
7919 count *= 100;
7920
7921 /* wait for completion */
7922 *ret_val = 0;
7923 rc = -EBUSY;
7924 for (i = 0; i < count; i++) {
7925 udelay(5);
7926 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7927
7928 if (val & MCPR_NVM_COMMAND_DONE) {
7929 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7930 /* we read nvram data in cpu order
7931 * but ethtool sees it as an array of bytes
7932 * converting to big-endian will do the work */
7933 val = cpu_to_be32(val);
7934 *ret_val = val;
7935 rc = 0;
7936 break;
7937 }
7938 }
7939
7940 return rc;
7941}
7942
7943static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7944 int buf_size)
7945{
7946 int rc;
7947 u32 cmd_flags;
7948 u32 val;
7949
7950 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7951 DP(BNX2X_MSG_NVM,
c14423fe 7952 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7953 offset, buf_size);
7954 return -EINVAL;
7955 }
7956
34f80b04
EG
7957 if (offset + buf_size > bp->common.flash_size) {
7958 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7959 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7960 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7961 return -EINVAL;
7962 }
7963
7964 /* request access to nvram interface */
7965 rc = bnx2x_acquire_nvram_lock(bp);
7966 if (rc)
7967 return rc;
7968
7969 /* enable access to nvram interface */
7970 bnx2x_enable_nvram_access(bp);
7971
7972 /* read the first word(s) */
7973 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7974 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7975 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7976 memcpy(ret_buf, &val, 4);
7977
7978 /* advance to the next dword */
7979 offset += sizeof(u32);
7980 ret_buf += sizeof(u32);
7981 buf_size -= sizeof(u32);
7982 cmd_flags = 0;
7983 }
7984
7985 if (rc == 0) {
7986 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7987 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7988 memcpy(ret_buf, &val, 4);
7989 }
7990
7991 /* disable access to nvram interface */
7992 bnx2x_disable_nvram_access(bp);
7993 bnx2x_release_nvram_lock(bp);
7994
7995 return rc;
7996}
7997
7998static int bnx2x_get_eeprom(struct net_device *dev,
7999 struct ethtool_eeprom *eeprom, u8 *eebuf)
8000{
8001 struct bnx2x *bp = netdev_priv(dev);
8002 int rc;
8003
34f80b04 8004 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8005 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8006 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8007 eeprom->len, eeprom->len);
8008
8009 /* parameters already validated in ethtool_get_eeprom */
8010
8011 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8012
8013 return rc;
8014}
8015
8016static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8017 u32 cmd_flags)
8018{
f1410647 8019 int count, i, rc;
a2fbb9ea
ET
8020
8021 /* build the command word */
8022 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8023
8024 /* need to clear DONE bit separately */
8025 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8026
8027 /* write the data */
8028 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8029
8030 /* address of the NVRAM to write to */
8031 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8032 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8033
8034 /* issue the write command */
8035 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8036
8037 /* adjust timeout for emulation/FPGA */
8038 count = NVRAM_TIMEOUT_COUNT;
8039 if (CHIP_REV_IS_SLOW(bp))
8040 count *= 100;
8041
8042 /* wait for completion */
8043 rc = -EBUSY;
8044 for (i = 0; i < count; i++) {
8045 udelay(5);
8046 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8047 if (val & MCPR_NVM_COMMAND_DONE) {
8048 rc = 0;
8049 break;
8050 }
8051 }
8052
8053 return rc;
8054}
8055
f1410647 8056#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8057
8058static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8059 int buf_size)
8060{
8061 int rc;
8062 u32 cmd_flags;
8063 u32 align_offset;
8064 u32 val;
8065
34f80b04
EG
8066 if (offset + buf_size > bp->common.flash_size) {
8067 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8068 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8069 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8070 return -EINVAL;
8071 }
8072
8073 /* request access to nvram interface */
8074 rc = bnx2x_acquire_nvram_lock(bp);
8075 if (rc)
8076 return rc;
8077
8078 /* enable access to nvram interface */
8079 bnx2x_enable_nvram_access(bp);
8080
8081 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8082 align_offset = (offset & ~0x03);
8083 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8084
8085 if (rc == 0) {
8086 val &= ~(0xff << BYTE_OFFSET(offset));
8087 val |= (*data_buf << BYTE_OFFSET(offset));
8088
8089 /* nvram data is returned as an array of bytes
8090 * convert it back to cpu order */
8091 val = be32_to_cpu(val);
8092
a2fbb9ea
ET
8093 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8094 cmd_flags);
8095 }
8096
8097 /* disable access to nvram interface */
8098 bnx2x_disable_nvram_access(bp);
8099 bnx2x_release_nvram_lock(bp);
8100
8101 return rc;
8102}
8103
8104static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8105 int buf_size)
8106{
8107 int rc;
8108 u32 cmd_flags;
8109 u32 val;
8110 u32 written_so_far;
8111
34f80b04 8112 if (buf_size == 1) /* ethtool */
a2fbb9ea 8113 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8114
8115 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8116 DP(BNX2X_MSG_NVM,
c14423fe 8117 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8118 offset, buf_size);
8119 return -EINVAL;
8120 }
8121
34f80b04
EG
8122 if (offset + buf_size > bp->common.flash_size) {
8123 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8124 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8125 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8126 return -EINVAL;
8127 }
8128
8129 /* request access to nvram interface */
8130 rc = bnx2x_acquire_nvram_lock(bp);
8131 if (rc)
8132 return rc;
8133
8134 /* enable access to nvram interface */
8135 bnx2x_enable_nvram_access(bp);
8136
8137 written_so_far = 0;
8138 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8139 while ((written_so_far < buf_size) && (rc == 0)) {
8140 if (written_so_far == (buf_size - sizeof(u32)))
8141 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8142 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8143 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8144 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8145 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8146
8147 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8148
8149 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8150
8151 /* advance to the next dword */
8152 offset += sizeof(u32);
8153 data_buf += sizeof(u32);
8154 written_so_far += sizeof(u32);
8155 cmd_flags = 0;
8156 }
8157
8158 /* disable access to nvram interface */
8159 bnx2x_disable_nvram_access(bp);
8160 bnx2x_release_nvram_lock(bp);
8161
8162 return rc;
8163}
8164
8165static int bnx2x_set_eeprom(struct net_device *dev,
8166 struct ethtool_eeprom *eeprom, u8 *eebuf)
8167{
8168 struct bnx2x *bp = netdev_priv(dev);
8169 int rc;
8170
34f80b04 8171 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8172 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8173 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8174 eeprom->len, eeprom->len);
8175
8176 /* parameters already validated in ethtool_set_eeprom */
8177
c18487ee 8178 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8179 if (eeprom->magic == 0x00504859)
8180 if (bp->port.pmf) {
8181
4a37fb66 8182 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8183 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8184 bp->link_params.ext_phy_config,
8185 (bp->state != BNX2X_STATE_CLOSED),
8186 eebuf, eeprom->len);
bb2a0f7a
YG
8187 if ((bp->state == BNX2X_STATE_OPEN) ||
8188 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8189 rc |= bnx2x_link_reset(&bp->link_params,
8190 &bp->link_vars);
8191 rc |= bnx2x_phy_init(&bp->link_params,
8192 &bp->link_vars);
bb2a0f7a 8193 }
4a37fb66 8194 bnx2x_release_phy_lock(bp);
34f80b04
EG
8195
8196 } else /* Only the PMF can access the PHY */
8197 return -EINVAL;
8198 else
c18487ee 8199 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8200
8201 return rc;
8202}
8203
8204static int bnx2x_get_coalesce(struct net_device *dev,
8205 struct ethtool_coalesce *coal)
8206{
8207 struct bnx2x *bp = netdev_priv(dev);
8208
8209 memset(coal, 0, sizeof(struct ethtool_coalesce));
8210
8211 coal->rx_coalesce_usecs = bp->rx_ticks;
8212 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8213
8214 return 0;
8215}
8216
8217static int bnx2x_set_coalesce(struct net_device *dev,
8218 struct ethtool_coalesce *coal)
8219{
8220 struct bnx2x *bp = netdev_priv(dev);
8221
8222 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8223 if (bp->rx_ticks > 3000)
8224 bp->rx_ticks = 3000;
8225
8226 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8227 if (bp->tx_ticks > 0x3000)
8228 bp->tx_ticks = 0x3000;
8229
34f80b04 8230 if (netif_running(dev))
a2fbb9ea
ET
8231 bnx2x_update_coalesce(bp);
8232
8233 return 0;
8234}
8235
7a9b2557
VZ
8236static int bnx2x_set_flags(struct net_device *dev, u32 data)
8237{
8238 struct bnx2x *bp = netdev_priv(dev);
8239 int changed = 0;
8240 int rc = 0;
8241
8242 if (data & ETH_FLAG_LRO) {
8243 if (!(dev->features & NETIF_F_LRO)) {
8244 dev->features |= NETIF_F_LRO;
8245 bp->flags |= TPA_ENABLE_FLAG;
8246 changed = 1;
8247 }
8248
8249 } else if (dev->features & NETIF_F_LRO) {
8250 dev->features &= ~NETIF_F_LRO;
8251 bp->flags &= ~TPA_ENABLE_FLAG;
8252 changed = 1;
8253 }
8254
8255 if (changed && netif_running(dev)) {
8256 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8257 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8258 }
8259
8260 return rc;
8261}
8262
a2fbb9ea
ET
8263static void bnx2x_get_ringparam(struct net_device *dev,
8264 struct ethtool_ringparam *ering)
8265{
8266 struct bnx2x *bp = netdev_priv(dev);
8267
8268 ering->rx_max_pending = MAX_RX_AVAIL;
8269 ering->rx_mini_max_pending = 0;
8270 ering->rx_jumbo_max_pending = 0;
8271
8272 ering->rx_pending = bp->rx_ring_size;
8273 ering->rx_mini_pending = 0;
8274 ering->rx_jumbo_pending = 0;
8275
8276 ering->tx_max_pending = MAX_TX_AVAIL;
8277 ering->tx_pending = bp->tx_ring_size;
8278}
8279
8280static int bnx2x_set_ringparam(struct net_device *dev,
8281 struct ethtool_ringparam *ering)
8282{
8283 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8284 int rc = 0;
a2fbb9ea
ET
8285
8286 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8287 (ering->tx_pending > MAX_TX_AVAIL) ||
8288 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8289 return -EINVAL;
8290
8291 bp->rx_ring_size = ering->rx_pending;
8292 bp->tx_ring_size = ering->tx_pending;
8293
34f80b04
EG
8294 if (netif_running(dev)) {
8295 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8296 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8297 }
8298
34f80b04 8299 return rc;
a2fbb9ea
ET
8300}
8301
8302static void bnx2x_get_pauseparam(struct net_device *dev,
8303 struct ethtool_pauseparam *epause)
8304{
8305 struct bnx2x *bp = netdev_priv(dev);
8306
c18487ee
YR
8307 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8308 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8309
8310 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8311 FLOW_CTRL_RX);
8312 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8313 FLOW_CTRL_TX);
a2fbb9ea
ET
8314
8315 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8316 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8317 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8318}
8319
8320static int bnx2x_set_pauseparam(struct net_device *dev,
8321 struct ethtool_pauseparam *epause)
8322{
8323 struct bnx2x *bp = netdev_priv(dev);
8324
34f80b04
EG
8325 if (IS_E1HMF(bp))
8326 return 0;
8327
a2fbb9ea
ET
8328 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8329 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8330 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8331
c18487ee 8332 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8333
f1410647 8334 if (epause->rx_pause)
c18487ee
YR
8335 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8336
f1410647 8337 if (epause->tx_pause)
c18487ee
YR
8338 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8339
8340 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8341 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8342
c18487ee 8343 if (epause->autoneg) {
34f80b04 8344 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
8345 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8346 return -EINVAL;
8347 }
a2fbb9ea 8348
c18487ee
YR
8349 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8350 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8351 }
a2fbb9ea 8352
c18487ee
YR
8353 DP(NETIF_MSG_LINK,
8354 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8355
8356 if (netif_running(dev)) {
bb2a0f7a 8357 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8358 bnx2x_link_set(bp);
8359 }
a2fbb9ea
ET
8360
8361 return 0;
8362}
8363
8364static u32 bnx2x_get_rx_csum(struct net_device *dev)
8365{
8366 struct bnx2x *bp = netdev_priv(dev);
8367
8368 return bp->rx_csum;
8369}
8370
8371static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8372{
8373 struct bnx2x *bp = netdev_priv(dev);
8374
8375 bp->rx_csum = data;
8376 return 0;
8377}
8378
8379static int bnx2x_set_tso(struct net_device *dev, u32 data)
8380{
755735eb 8381 if (data) {
a2fbb9ea 8382 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8383 dev->features |= NETIF_F_TSO6;
8384 } else {
a2fbb9ea 8385 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8386 dev->features &= ~NETIF_F_TSO6;
8387 }
8388
a2fbb9ea
ET
8389 return 0;
8390}
8391
f3c87cdd 8392static const struct {
a2fbb9ea
ET
8393 char string[ETH_GSTRING_LEN];
8394} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8395 { "register_test (offline)" },
8396 { "memory_test (offline)" },
8397 { "loopback_test (offline)" },
8398 { "nvram_test (online)" },
8399 { "interrupt_test (online)" },
8400 { "link_test (online)" },
8401 { "idle check (online)" },
8402 { "MC errors (online)" }
a2fbb9ea
ET
8403};
8404
8405static int bnx2x_self_test_count(struct net_device *dev)
8406{
8407 return BNX2X_NUM_TESTS;
8408}
8409
f3c87cdd
YG
8410static int bnx2x_test_registers(struct bnx2x *bp)
8411{
8412 int idx, i, rc = -ENODEV;
8413 u32 wr_val = 0;
8414 static const struct {
8415 u32 offset0;
8416 u32 offset1;
8417 u32 mask;
8418 } reg_tbl[] = {
8419/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8420 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8421 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8422 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8423 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8424 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8425 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8426 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8427 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8428 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8429/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8430 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8431 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8432 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8433 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8434 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8435 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8436 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8437 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8438 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8439/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8440 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8441 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8442 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8443 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8444 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8445 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8446 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8447 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8448 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8449/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8450 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8451 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8452 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8453 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8454 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8455 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8456 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8457
8458 { 0xffffffff, 0, 0x00000000 }
8459 };
8460
8461 if (!netif_running(bp->dev))
8462 return rc;
8463
8464 /* Repeat the test twice:
8465 First by writing 0x00000000, second by writing 0xffffffff */
8466 for (idx = 0; idx < 2; idx++) {
8467
8468 switch (idx) {
8469 case 0:
8470 wr_val = 0;
8471 break;
8472 case 1:
8473 wr_val = 0xffffffff;
8474 break;
8475 }
8476
8477 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8478 u32 offset, mask, save_val, val;
8479 int port = BP_PORT(bp);
8480
8481 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8482 mask = reg_tbl[i].mask;
8483
8484 save_val = REG_RD(bp, offset);
8485
8486 REG_WR(bp, offset, wr_val);
8487 val = REG_RD(bp, offset);
8488
8489 /* Restore the original register's value */
8490 REG_WR(bp, offset, save_val);
8491
8492 /* verify that value is as expected value */
8493 if ((val & mask) != (wr_val & mask))
8494 goto test_reg_exit;
8495 }
8496 }
8497
8498 rc = 0;
8499
8500test_reg_exit:
8501 return rc;
8502}
8503
8504static int bnx2x_test_memory(struct bnx2x *bp)
8505{
8506 int i, j, rc = -ENODEV;
8507 u32 val;
8508 static const struct {
8509 u32 offset;
8510 int size;
8511 } mem_tbl[] = {
8512 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8513 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8514 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8515 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8516 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8517 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8518 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8519
8520 { 0xffffffff, 0 }
8521 };
8522 static const struct {
8523 char *name;
8524 u32 offset;
8525 u32 mask;
8526 } prty_tbl[] = {
8527 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8528 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8529 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8530 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8531 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8532 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8533
8534 { NULL, 0xffffffff, 0 }
8535 };
8536
8537 if (!netif_running(bp->dev))
8538 return rc;
8539
8540 /* Go through all the memories */
8541 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8542 for (j = 0; j < mem_tbl[i].size; j++)
8543 REG_RD(bp, mem_tbl[i].offset + j*4);
8544
8545 /* Check the parity status */
8546 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8547 val = REG_RD(bp, prty_tbl[i].offset);
8548 if (val & ~(prty_tbl[i].mask)) {
8549 DP(NETIF_MSG_HW,
8550 "%s is 0x%x\n", prty_tbl[i].name, val);
8551 goto test_mem_exit;
8552 }
8553 }
8554
8555 rc = 0;
8556
8557test_mem_exit:
8558 return rc;
8559}
8560
8561static void bnx2x_netif_start(struct bnx2x *bp)
8562{
8563 int i;
8564
8565 if (atomic_dec_and_test(&bp->intr_sem)) {
8566 if (netif_running(bp->dev)) {
8567 bnx2x_int_enable(bp);
8568 for_each_queue(bp, i)
8569 napi_enable(&bnx2x_fp(bp, i, napi));
8570 if (bp->state == BNX2X_STATE_OPEN)
8571 netif_wake_queue(bp->dev);
8572 }
8573 }
8574}
8575
8576static void bnx2x_netif_stop(struct bnx2x *bp)
8577{
8578 int i;
8579
8580 if (netif_running(bp->dev)) {
8581 netif_tx_disable(bp->dev);
8582 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8583 for_each_queue(bp, i)
8584 napi_disable(&bnx2x_fp(bp, i, napi));
8585 }
8586 bnx2x_int_disable_sync(bp);
8587}
8588
8589static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8590{
8591 int cnt = 1000;
8592
8593 if (link_up)
8594 while (bnx2x_link_test(bp) && cnt--)
8595 msleep(10);
8596}
8597
8598static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8599{
8600 unsigned int pkt_size, num_pkts, i;
8601 struct sk_buff *skb;
8602 unsigned char *packet;
8603 struct bnx2x_fastpath *fp = &bp->fp[0];
8604 u16 tx_start_idx, tx_idx;
8605 u16 rx_start_idx, rx_idx;
8606 u16 pkt_prod;
8607 struct sw_tx_bd *tx_buf;
8608 struct eth_tx_bd *tx_bd;
8609 dma_addr_t mapping;
8610 union eth_rx_cqe *cqe;
8611 u8 cqe_fp_flags;
8612 struct sw_rx_bd *rx_buf;
8613 u16 len;
8614 int rc = -ENODEV;
8615
8616 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8617 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8618 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8619 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8620 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8621
8622 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8623 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8624 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8625 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8626 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8627 /* wait until link state is restored */
8628 bnx2x_wait_for_link(bp, link_up);
8629
8630 } else
8631 return -EINVAL;
8632
8633 pkt_size = 1514;
8634 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8635 if (!skb) {
8636 rc = -ENOMEM;
8637 goto test_loopback_exit;
8638 }
8639 packet = skb_put(skb, pkt_size);
8640 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8641 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8642 for (i = ETH_HLEN; i < pkt_size; i++)
8643 packet[i] = (unsigned char) (i & 0xff);
8644
8645 num_pkts = 0;
8646 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8647 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8648
8649 pkt_prod = fp->tx_pkt_prod++;
8650 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8651 tx_buf->first_bd = fp->tx_bd_prod;
8652 tx_buf->skb = skb;
8653
8654 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8655 mapping = pci_map_single(bp->pdev, skb->data,
8656 skb_headlen(skb), PCI_DMA_TODEVICE);
8657 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8658 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8659 tx_bd->nbd = cpu_to_le16(1);
8660 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8661 tx_bd->vlan = cpu_to_le16(pkt_prod);
8662 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8663 ETH_TX_BD_FLAGS_END_BD);
8664 tx_bd->general_data = ((UNICAST_ADDRESS <<
8665 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8666
8667 fp->hw_tx_prods->bds_prod =
8668 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8669 mb(); /* FW restriction: must not reorder writing nbd and packets */
8670 fp->hw_tx_prods->packets_prod =
8671 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8672 DOORBELL(bp, FP_IDX(fp), 0);
8673
8674 mmiowb();
8675
8676 num_pkts++;
8677 fp->tx_bd_prod++;
8678 bp->dev->trans_start = jiffies;
8679
8680 udelay(100);
8681
8682 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8683 if (tx_idx != tx_start_idx + num_pkts)
8684 goto test_loopback_exit;
8685
8686 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8687 if (rx_idx != rx_start_idx + num_pkts)
8688 goto test_loopback_exit;
8689
8690 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8691 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8692 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8693 goto test_loopback_rx_exit;
8694
8695 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8696 if (len != pkt_size)
8697 goto test_loopback_rx_exit;
8698
8699 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8700 skb = rx_buf->skb;
8701 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8702 for (i = ETH_HLEN; i < pkt_size; i++)
8703 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8704 goto test_loopback_rx_exit;
8705
8706 rc = 0;
8707
8708test_loopback_rx_exit:
8709 bp->dev->last_rx = jiffies;
8710
8711 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8712 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8713 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8714 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8715
8716 /* Update producers */
8717 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8718 fp->rx_sge_prod);
8719 mmiowb(); /* keep prod updates ordered */
8720
8721test_loopback_exit:
8722 bp->link_params.loopback_mode = LOOPBACK_NONE;
8723
8724 return rc;
8725}
8726
8727static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8728{
8729 int rc = 0;
8730
8731 if (!netif_running(bp->dev))
8732 return BNX2X_LOOPBACK_FAILED;
8733
8734 bnx2x_netif_stop(bp);
8735
8736 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8737 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8738 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8739 }
8740
8741 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8742 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8743 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8744 }
8745
8746 bnx2x_netif_start(bp);
8747
8748 return rc;
8749}
8750
8751#define CRC32_RESIDUAL 0xdebb20e3
8752
8753static int bnx2x_test_nvram(struct bnx2x *bp)
8754{
8755 static const struct {
8756 int offset;
8757 int size;
8758 } nvram_tbl[] = {
8759 { 0, 0x14 }, /* bootstrap */
8760 { 0x14, 0xec }, /* dir */
8761 { 0x100, 0x350 }, /* manuf_info */
8762 { 0x450, 0xf0 }, /* feature_info */
8763 { 0x640, 0x64 }, /* upgrade_key_info */
8764 { 0x6a4, 0x64 },
8765 { 0x708, 0x70 }, /* manuf_key_info */
8766 { 0x778, 0x70 },
8767 { 0, 0 }
8768 };
8769 u32 buf[0x350 / 4];
8770 u8 *data = (u8 *)buf;
8771 int i, rc;
8772 u32 magic, csum;
8773
8774 rc = bnx2x_nvram_read(bp, 0, data, 4);
8775 if (rc) {
8776 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8777 goto test_nvram_exit;
8778 }
8779
8780 magic = be32_to_cpu(buf[0]);
8781 if (magic != 0x669955aa) {
8782 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8783 rc = -ENODEV;
8784 goto test_nvram_exit;
8785 }
8786
8787 for (i = 0; nvram_tbl[i].size; i++) {
8788
8789 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8790 nvram_tbl[i].size);
8791 if (rc) {
8792 DP(NETIF_MSG_PROBE,
8793 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8794 goto test_nvram_exit;
8795 }
8796
8797 csum = ether_crc_le(nvram_tbl[i].size, data);
8798 if (csum != CRC32_RESIDUAL) {
8799 DP(NETIF_MSG_PROBE,
8800 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8801 rc = -ENODEV;
8802 goto test_nvram_exit;
8803 }
8804 }
8805
8806test_nvram_exit:
8807 return rc;
8808}
8809
8810static int bnx2x_test_intr(struct bnx2x *bp)
8811{
8812 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8813 int i, rc;
8814
8815 if (!netif_running(bp->dev))
8816 return -ENODEV;
8817
8818 config->hdr.length_6b = 0;
8819 config->hdr.offset = 0;
8820 config->hdr.client_id = BP_CL_ID(bp);
8821 config->hdr.reserved1 = 0;
8822
8823 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8824 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8825 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8826 if (rc == 0) {
8827 bp->set_mac_pending++;
8828 for (i = 0; i < 10; i++) {
8829 if (!bp->set_mac_pending)
8830 break;
8831 msleep_interruptible(10);
8832 }
8833 if (i == 10)
8834 rc = -ENODEV;
8835 }
8836
8837 return rc;
8838}
8839
a2fbb9ea
ET
8840static void bnx2x_self_test(struct net_device *dev,
8841 struct ethtool_test *etest, u64 *buf)
8842{
8843 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8844
8845 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8846
f3c87cdd 8847 if (!netif_running(dev))
a2fbb9ea 8848 return;
a2fbb9ea 8849
f3c87cdd
YG
8850 /* offline tests are not suppoerted in MF mode */
8851 if (IS_E1HMF(bp))
8852 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8853
8854 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8855 u8 link_up;
8856
8857 link_up = bp->link_vars.link_up;
8858 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8859 bnx2x_nic_load(bp, LOAD_DIAG);
8860 /* wait until link state is restored */
8861 bnx2x_wait_for_link(bp, link_up);
8862
8863 if (bnx2x_test_registers(bp) != 0) {
8864 buf[0] = 1;
8865 etest->flags |= ETH_TEST_FL_FAILED;
8866 }
8867 if (bnx2x_test_memory(bp) != 0) {
8868 buf[1] = 1;
8869 etest->flags |= ETH_TEST_FL_FAILED;
8870 }
8871 buf[2] = bnx2x_test_loopback(bp, link_up);
8872 if (buf[2] != 0)
8873 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8874
f3c87cdd
YG
8875 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8876 bnx2x_nic_load(bp, LOAD_NORMAL);
8877 /* wait until link state is restored */
8878 bnx2x_wait_for_link(bp, link_up);
8879 }
8880 if (bnx2x_test_nvram(bp) != 0) {
8881 buf[3] = 1;
a2fbb9ea
ET
8882 etest->flags |= ETH_TEST_FL_FAILED;
8883 }
f3c87cdd
YG
8884 if (bnx2x_test_intr(bp) != 0) {
8885 buf[4] = 1;
8886 etest->flags |= ETH_TEST_FL_FAILED;
8887 }
8888 if (bp->port.pmf)
8889 if (bnx2x_link_test(bp) != 0) {
8890 buf[5] = 1;
8891 etest->flags |= ETH_TEST_FL_FAILED;
8892 }
8893 buf[7] = bnx2x_mc_assert(bp);
8894 if (buf[7] != 0)
8895 etest->flags |= ETH_TEST_FL_FAILED;
8896
8897#ifdef BNX2X_EXTRA_DEBUG
8898 bnx2x_panic_dump(bp);
8899#endif
a2fbb9ea
ET
8900}
8901
bb2a0f7a
YG
8902static const struct {
8903 long offset;
8904 int size;
8905 u32 flags;
66e855f3
YG
8906#define STATS_FLAGS_PORT 1
8907#define STATS_FLAGS_FUNC 2
8908 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8909} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8910/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8911 8, STATS_FLAGS_FUNC, "rx_bytes" },
8912 { STATS_OFFSET32(error_bytes_received_hi),
8913 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8914 { STATS_OFFSET32(total_bytes_transmitted_hi),
8915 8, STATS_FLAGS_FUNC, "tx_bytes" },
8916 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8917 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8918 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8919 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8920 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8921 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8922 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8923 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8924 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8925 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8926 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8927 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8928/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8929 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8930 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8931 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8932 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8933 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8934 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8935 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8936 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 8937 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 8938 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 8939 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 8940 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 8941 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 8942 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 8943 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 8944 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 8945 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 8946 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
8947 8, STATS_FLAGS_PORT, "rx_fragments" },
8948/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8949 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 8950 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 8951 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 8952 { STATS_OFFSET32(jabber_packets_received),
66e855f3 8953 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 8954 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 8955 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 8956 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 8957 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 8958 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 8959 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 8960 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 8961 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 8962 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 8963 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 8964 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 8965 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 8966 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 8967 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 8968/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 8969 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 8970 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
8971 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8972 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8973 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8974 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8975 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 8976 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
8977 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8978 { STATS_OFFSET32(mac_filter_discard),
8979 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8980 { STATS_OFFSET32(no_buff_discard),
8981 4, STATS_FLAGS_FUNC, "rx_discards" },
8982 { STATS_OFFSET32(xxoverflow_discard),
8983 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8984 { STATS_OFFSET32(brb_drop_hi),
8985 8, STATS_FLAGS_PORT, "brb_discard" },
8986 { STATS_OFFSET32(brb_truncate_hi),
8987 8, STATS_FLAGS_PORT, "brb_truncate" },
8988/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8989 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8990 { STATS_OFFSET32(rx_skb_alloc_failed),
8991 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8992/* 42 */{ STATS_OFFSET32(hw_csum_err),
8993 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
8994};
8995
66e855f3
YG
8996#define IS_NOT_E1HMF_STAT(bp, i) \
8997 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
8998
a2fbb9ea
ET
8999static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9000{
bb2a0f7a
YG
9001 struct bnx2x *bp = netdev_priv(dev);
9002 int i, j;
9003
a2fbb9ea
ET
9004 switch (stringset) {
9005 case ETH_SS_STATS:
bb2a0f7a 9006 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9007 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9008 continue;
9009 strcpy(buf + j*ETH_GSTRING_LEN,
9010 bnx2x_stats_arr[i].string);
9011 j++;
9012 }
a2fbb9ea
ET
9013 break;
9014
9015 case ETH_SS_TEST:
9016 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9017 break;
9018 }
9019}
9020
9021static int bnx2x_get_stats_count(struct net_device *dev)
9022{
bb2a0f7a
YG
9023 struct bnx2x *bp = netdev_priv(dev);
9024 int i, num_stats = 0;
9025
9026 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9027 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9028 continue;
9029 num_stats++;
9030 }
9031 return num_stats;
a2fbb9ea
ET
9032}
9033
9034static void bnx2x_get_ethtool_stats(struct net_device *dev,
9035 struct ethtool_stats *stats, u64 *buf)
9036{
9037 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9038 u32 *hw_stats = (u32 *)&bp->eth_stats;
9039 int i, j;
a2fbb9ea 9040
bb2a0f7a 9041 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9042 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9043 continue;
bb2a0f7a
YG
9044
9045 if (bnx2x_stats_arr[i].size == 0) {
9046 /* skip this counter */
9047 buf[j] = 0;
9048 j++;
a2fbb9ea
ET
9049 continue;
9050 }
bb2a0f7a 9051 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9052 /* 4-byte counter */
bb2a0f7a
YG
9053 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9054 j++;
a2fbb9ea
ET
9055 continue;
9056 }
9057 /* 8-byte counter */
bb2a0f7a
YG
9058 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9059 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9060 j++;
a2fbb9ea
ET
9061 }
9062}
9063
9064static int bnx2x_phys_id(struct net_device *dev, u32 data)
9065{
9066 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9067 int port = BP_PORT(bp);
a2fbb9ea
ET
9068 int i;
9069
34f80b04
EG
9070 if (!netif_running(dev))
9071 return 0;
9072
9073 if (!bp->port.pmf)
9074 return 0;
9075
a2fbb9ea
ET
9076 if (data == 0)
9077 data = 2;
9078
9079 for (i = 0; i < (data * 2); i++) {
c18487ee 9080 if ((i % 2) == 0)
34f80b04 9081 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9082 bp->link_params.hw_led_mode,
9083 bp->link_params.chip_id);
9084 else
34f80b04 9085 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9086 bp->link_params.hw_led_mode,
9087 bp->link_params.chip_id);
9088
a2fbb9ea
ET
9089 msleep_interruptible(500);
9090 if (signal_pending(current))
9091 break;
9092 }
9093
c18487ee 9094 if (bp->link_vars.link_up)
34f80b04 9095 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9096 bp->link_vars.line_speed,
9097 bp->link_params.hw_led_mode,
9098 bp->link_params.chip_id);
a2fbb9ea
ET
9099
9100 return 0;
9101}
9102
9103static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9104 .get_settings = bnx2x_get_settings,
9105 .set_settings = bnx2x_set_settings,
9106 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9107 .get_wol = bnx2x_get_wol,
9108 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9109 .get_msglevel = bnx2x_get_msglevel,
9110 .set_msglevel = bnx2x_set_msglevel,
9111 .nway_reset = bnx2x_nway_reset,
9112 .get_link = ethtool_op_get_link,
9113 .get_eeprom_len = bnx2x_get_eeprom_len,
9114 .get_eeprom = bnx2x_get_eeprom,
9115 .set_eeprom = bnx2x_set_eeprom,
9116 .get_coalesce = bnx2x_get_coalesce,
9117 .set_coalesce = bnx2x_set_coalesce,
9118 .get_ringparam = bnx2x_get_ringparam,
9119 .set_ringparam = bnx2x_set_ringparam,
9120 .get_pauseparam = bnx2x_get_pauseparam,
9121 .set_pauseparam = bnx2x_set_pauseparam,
9122 .get_rx_csum = bnx2x_get_rx_csum,
9123 .set_rx_csum = bnx2x_set_rx_csum,
9124 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9125 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9126 .set_flags = bnx2x_set_flags,
9127 .get_flags = ethtool_op_get_flags,
9128 .get_sg = ethtool_op_get_sg,
9129 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9130 .get_tso = ethtool_op_get_tso,
9131 .set_tso = bnx2x_set_tso,
9132 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9133 .self_test = bnx2x_self_test,
9134 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9135 .phys_id = bnx2x_phys_id,
9136 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9137 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9138};
9139
9140/* end of ethtool_ops */
9141
9142/****************************************************************************
9143* General service functions
9144****************************************************************************/
9145
9146static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9147{
9148 u16 pmcsr;
9149
9150 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9151
9152 switch (state) {
9153 case PCI_D0:
34f80b04 9154 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9155 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9156 PCI_PM_CTRL_PME_STATUS));
9157
9158 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9159 /* delay required during transition out of D3hot */
9160 msleep(20);
34f80b04 9161 break;
a2fbb9ea 9162
34f80b04
EG
9163 case PCI_D3hot:
9164 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9165 pmcsr |= 3;
a2fbb9ea 9166
34f80b04
EG
9167 if (bp->wol)
9168 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9169
34f80b04
EG
9170 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9171 pmcsr);
a2fbb9ea 9172
34f80b04
EG
9173 /* No more memory access after this point until
9174 * device is brought back to D0.
9175 */
9176 break;
9177
9178 default:
9179 return -EINVAL;
9180 }
9181 return 0;
a2fbb9ea
ET
9182}
9183
34f80b04
EG
9184/*
9185 * net_device service functions
9186 */
9187
a2fbb9ea
ET
9188static int bnx2x_poll(struct napi_struct *napi, int budget)
9189{
9190 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9191 napi);
9192 struct bnx2x *bp = fp->bp;
9193 int work_done = 0;
9194
9195#ifdef BNX2X_STOP_ON_ERROR
9196 if (unlikely(bp->panic))
34f80b04 9197 goto poll_panic;
a2fbb9ea
ET
9198#endif
9199
9200 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9201 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9202 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9203
9204 bnx2x_update_fpsb_idx(fp);
9205
da5a662a 9206 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9207 bnx2x_tx_int(fp, budget);
9208
da5a662a 9209 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9210 work_done = bnx2x_rx_int(fp, budget);
9211
da5a662a 9212 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9213
9214 /* must not complete if we consumed full budget */
da5a662a 9215 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9216
9217#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9218poll_panic:
a2fbb9ea
ET
9219#endif
9220 netif_rx_complete(bp->dev, napi);
9221
34f80b04 9222 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9223 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9224 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9225 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9226 }
a2fbb9ea
ET
9227 return work_done;
9228}
9229
755735eb
EG
9230
9231/* we split the first BD into headers and data BDs
9232 * to ease the pain of our fellow micocode engineers
9233 * we use one mapping for both BDs
9234 * So far this has only been observed to happen
9235 * in Other Operating Systems(TM)
9236 */
9237static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9238 struct bnx2x_fastpath *fp,
9239 struct eth_tx_bd **tx_bd, u16 hlen,
9240 u16 bd_prod, int nbd)
9241{
9242 struct eth_tx_bd *h_tx_bd = *tx_bd;
9243 struct eth_tx_bd *d_tx_bd;
9244 dma_addr_t mapping;
9245 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9246
9247 /* first fix first BD */
9248 h_tx_bd->nbd = cpu_to_le16(nbd);
9249 h_tx_bd->nbytes = cpu_to_le16(hlen);
9250
9251 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9252 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9253 h_tx_bd->addr_lo, h_tx_bd->nbd);
9254
9255 /* now get a new data BD
9256 * (after the pbd) and fill it */
9257 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9258 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9259
9260 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9261 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9262
9263 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9264 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9265 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9266 d_tx_bd->vlan = 0;
9267 /* this marks the BD as one that has no individual mapping
9268 * the FW ignores this flag in a BD not marked start
9269 */
9270 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9271 DP(NETIF_MSG_TX_QUEUED,
9272 "TSO split data size is %d (%x:%x)\n",
9273 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9274
9275 /* update tx_bd for marking the last BD flag */
9276 *tx_bd = d_tx_bd;
9277
9278 return bd_prod;
9279}
9280
9281static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9282{
9283 if (fix > 0)
9284 csum = (u16) ~csum_fold(csum_sub(csum,
9285 csum_partial(t_header - fix, fix, 0)));
9286
9287 else if (fix < 0)
9288 csum = (u16) ~csum_fold(csum_add(csum,
9289 csum_partial(t_header, -fix, 0)));
9290
9291 return swab16(csum);
9292}
9293
9294static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9295{
9296 u32 rc;
9297
9298 if (skb->ip_summed != CHECKSUM_PARTIAL)
9299 rc = XMIT_PLAIN;
9300
9301 else {
9302 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9303 rc = XMIT_CSUM_V6;
9304 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9305 rc |= XMIT_CSUM_TCP;
9306
9307 } else {
9308 rc = XMIT_CSUM_V4;
9309 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9310 rc |= XMIT_CSUM_TCP;
9311 }
9312 }
9313
9314 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9315 rc |= XMIT_GSO_V4;
9316
9317 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9318 rc |= XMIT_GSO_V6;
9319
9320 return rc;
9321}
9322
9323/* check if packet requires linearization (packet is too fragmented) */
9324static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9325 u32 xmit_type)
9326{
9327 int to_copy = 0;
9328 int hlen = 0;
9329 int first_bd_sz = 0;
9330
9331 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9332 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9333
9334 if (xmit_type & XMIT_GSO) {
9335 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9336 /* Check if LSO packet needs to be copied:
9337 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9338 int wnd_size = MAX_FETCH_BD - 3;
9339 /* Number of widnows to check */
9340 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9341 int wnd_idx = 0;
9342 int frag_idx = 0;
9343 u32 wnd_sum = 0;
9344
9345 /* Headers length */
9346 hlen = (int)(skb_transport_header(skb) - skb->data) +
9347 tcp_hdrlen(skb);
9348
9349 /* Amount of data (w/o headers) on linear part of SKB*/
9350 first_bd_sz = skb_headlen(skb) - hlen;
9351
9352 wnd_sum = first_bd_sz;
9353
9354 /* Calculate the first sum - it's special */
9355 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9356 wnd_sum +=
9357 skb_shinfo(skb)->frags[frag_idx].size;
9358
9359 /* If there was data on linear skb data - check it */
9360 if (first_bd_sz > 0) {
9361 if (unlikely(wnd_sum < lso_mss)) {
9362 to_copy = 1;
9363 goto exit_lbl;
9364 }
9365
9366 wnd_sum -= first_bd_sz;
9367 }
9368
9369 /* Others are easier: run through the frag list and
9370 check all windows */
9371 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9372 wnd_sum +=
9373 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9374
9375 if (unlikely(wnd_sum < lso_mss)) {
9376 to_copy = 1;
9377 break;
9378 }
9379 wnd_sum -=
9380 skb_shinfo(skb)->frags[wnd_idx].size;
9381 }
9382
9383 } else {
9384 /* in non-LSO too fragmented packet should always
9385 be linearized */
9386 to_copy = 1;
9387 }
9388 }
9389
9390exit_lbl:
9391 if (unlikely(to_copy))
9392 DP(NETIF_MSG_TX_QUEUED,
9393 "Linearization IS REQUIRED for %s packet. "
9394 "num_frags %d hlen %d first_bd_sz %d\n",
9395 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9396 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9397
9398 return to_copy;
9399}
9400
9401/* called with netif_tx_lock
a2fbb9ea 9402 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9403 * netif_wake_queue()
a2fbb9ea
ET
9404 */
9405static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9406{
9407 struct bnx2x *bp = netdev_priv(dev);
9408 struct bnx2x_fastpath *fp;
9409 struct sw_tx_bd *tx_buf;
9410 struct eth_tx_bd *tx_bd;
9411 struct eth_tx_parse_bd *pbd = NULL;
9412 u16 pkt_prod, bd_prod;
755735eb 9413 int nbd, fp_index;
a2fbb9ea 9414 dma_addr_t mapping;
755735eb
EG
9415 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9416 int vlan_off = (bp->e1hov ? 4 : 0);
9417 int i;
9418 u8 hlen = 0;
a2fbb9ea
ET
9419
9420#ifdef BNX2X_STOP_ON_ERROR
9421 if (unlikely(bp->panic))
9422 return NETDEV_TX_BUSY;
9423#endif
9424
755735eb 9425 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9426 fp = &bp->fp[fp_index];
755735eb 9427
a2fbb9ea
ET
9428 if (unlikely(bnx2x_tx_avail(bp->fp) <
9429 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9430 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9431 netif_stop_queue(dev);
9432 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9433 return NETDEV_TX_BUSY;
9434 }
9435
755735eb
EG
9436 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9437 " gso type %x xmit_type %x\n",
9438 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9439 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9440
9441 /* First, check if we need to linearaize the skb
9442 (due to FW restrictions) */
9443 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9444 /* Statistics of linearization */
9445 bp->lin_cnt++;
9446 if (skb_linearize(skb) != 0) {
9447 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9448 "silently dropping this SKB\n");
9449 dev_kfree_skb_any(skb);
da5a662a 9450 return NETDEV_TX_OK;
755735eb
EG
9451 }
9452 }
9453
a2fbb9ea 9454 /*
755735eb 9455 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9456 then for TSO or xsum we have a parsing info BD,
755735eb 9457 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9458 (don't forget to mark the last one as last,
9459 and to unmap only AFTER you write to the BD ...)
755735eb 9460 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9461 */
9462
9463 pkt_prod = fp->tx_pkt_prod++;
755735eb 9464 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9465
755735eb 9466 /* get a tx_buf and first BD */
a2fbb9ea
ET
9467 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9468 tx_bd = &fp->tx_desc_ring[bd_prod];
9469
9470 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9471 tx_bd->general_data = (UNICAST_ADDRESS <<
9472 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9473 tx_bd->general_data |= 1; /* header nbd */
9474
755735eb
EG
9475 /* remember the first BD of the packet */
9476 tx_buf->first_bd = fp->tx_bd_prod;
9477 tx_buf->skb = skb;
a2fbb9ea
ET
9478
9479 DP(NETIF_MSG_TX_QUEUED,
9480 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9481 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9482
755735eb
EG
9483 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9484 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9485 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9486 vlan_off += 4;
9487 } else
9488 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9489
755735eb 9490 if (xmit_type) {
a2fbb9ea 9491
755735eb 9492 /* turn on parsing and get a BD */
a2fbb9ea
ET
9493 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9494 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9495
9496 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9497 }
9498
9499 if (xmit_type & XMIT_CSUM) {
9500 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9501
9502 /* for now NS flag is not used in Linux */
755735eb 9503 pbd->global_data = (hlen |
96fc1784 9504 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9505 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9506
755735eb
EG
9507 pbd->ip_hlen = (skb_transport_header(skb) -
9508 skb_network_header(skb)) / 2;
9509
9510 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9511
755735eb
EG
9512 pbd->total_hlen = cpu_to_le16(hlen);
9513 hlen = hlen*2 - vlan_off;
a2fbb9ea 9514
755735eb
EG
9515 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9516
9517 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9518 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9519 ETH_TX_BD_FLAGS_IP_CSUM;
9520 else
9521 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9522
9523 if (xmit_type & XMIT_CSUM_TCP) {
9524 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9525
9526 } else {
9527 s8 fix = SKB_CS_OFF(skb); /* signed! */
9528
a2fbb9ea 9529 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9530 pbd->cs_offset = fix / 2;
a2fbb9ea 9531
755735eb
EG
9532 DP(NETIF_MSG_TX_QUEUED,
9533 "hlen %d offset %d fix %d csum before fix %x\n",
9534 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9535 SKB_CS(skb));
9536
9537 /* HW bug: fixup the CSUM */
9538 pbd->tcp_pseudo_csum =
9539 bnx2x_csum_fix(skb_transport_header(skb),
9540 SKB_CS(skb), fix);
9541
9542 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9543 pbd->tcp_pseudo_csum);
9544 }
a2fbb9ea
ET
9545 }
9546
9547 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9548 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9549
9550 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9551 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9552 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9553 tx_bd->nbd = cpu_to_le16(nbd);
9554 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9555
9556 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9557 " nbytes %d flags %x vlan %x\n",
9558 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9559 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9560 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9561
755735eb 9562 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9563
9564 DP(NETIF_MSG_TX_QUEUED,
9565 "TSO packet len %d hlen %d total len %d tso size %d\n",
9566 skb->len, hlen, skb_headlen(skb),
9567 skb_shinfo(skb)->gso_size);
9568
9569 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9570
755735eb
EG
9571 if (unlikely(skb_headlen(skb) > hlen))
9572 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9573 bd_prod, ++nbd);
a2fbb9ea
ET
9574
9575 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9576 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9577 pbd->tcp_flags = pbd_tcp_flags(skb);
9578
9579 if (xmit_type & XMIT_GSO_V4) {
9580 pbd->ip_id = swab16(ip_hdr(skb)->id);
9581 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9582 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9583 ip_hdr(skb)->daddr,
9584 0, IPPROTO_TCP, 0));
755735eb
EG
9585
9586 } else
9587 pbd->tcp_pseudo_csum =
9588 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9589 &ipv6_hdr(skb)->daddr,
9590 0, IPPROTO_TCP, 0));
9591
a2fbb9ea
ET
9592 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9593 }
9594
755735eb
EG
9595 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9596 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9597
755735eb
EG
9598 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9599 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9600
755735eb
EG
9601 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9602 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9603
755735eb
EG
9604 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9605 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9606 tx_bd->nbytes = cpu_to_le16(frag->size);
9607 tx_bd->vlan = cpu_to_le16(pkt_prod);
9608 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9609
755735eb
EG
9610 DP(NETIF_MSG_TX_QUEUED,
9611 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9612 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9613 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9614 }
9615
755735eb 9616 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9617 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9618
9619 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9620 tx_bd, tx_bd->bd_flags.as_bitfield);
9621
a2fbb9ea
ET
9622 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9623
755735eb 9624 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9625 * if the packet contains or ends with it
9626 */
9627 if (TX_BD_POFF(bd_prod) < nbd)
9628 nbd++;
9629
9630 if (pbd)
9631 DP(NETIF_MSG_TX_QUEUED,
9632 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9633 " tcp_flags %x xsum %x seq %u hlen %u\n",
9634 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9635 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9636 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9637
755735eb 9638 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9639
96fc1784
ET
9640 fp->hw_tx_prods->bds_prod =
9641 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9642 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9643 fp->hw_tx_prods->packets_prod =
9644 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9645 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9646
9647 mmiowb();
9648
755735eb 9649 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9650 dev->trans_start = jiffies;
9651
9652 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9653 netif_stop_queue(dev);
bb2a0f7a 9654 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9655 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9656 netif_wake_queue(dev);
9657 }
9658 fp->tx_pkt++;
9659
9660 return NETDEV_TX_OK;
9661}
9662
bb2a0f7a 9663/* called with rtnl_lock */
a2fbb9ea
ET
9664static int bnx2x_open(struct net_device *dev)
9665{
9666 struct bnx2x *bp = netdev_priv(dev);
9667
9668 bnx2x_set_power_state(bp, PCI_D0);
9669
bb2a0f7a 9670 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9671}
9672
bb2a0f7a 9673/* called with rtnl_lock */
a2fbb9ea
ET
9674static int bnx2x_close(struct net_device *dev)
9675{
a2fbb9ea
ET
9676 struct bnx2x *bp = netdev_priv(dev);
9677
9678 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9679 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9680 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9681 if (!CHIP_REV_IS_SLOW(bp))
9682 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9683
9684 return 0;
9685}
9686
34f80b04
EG
9687/* called with netif_tx_lock from set_multicast */
9688static void bnx2x_set_rx_mode(struct net_device *dev)
9689{
9690 struct bnx2x *bp = netdev_priv(dev);
9691 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9692 int port = BP_PORT(bp);
9693
9694 if (bp->state != BNX2X_STATE_OPEN) {
9695 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9696 return;
9697 }
9698
9699 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9700
9701 if (dev->flags & IFF_PROMISC)
9702 rx_mode = BNX2X_RX_MODE_PROMISC;
9703
9704 else if ((dev->flags & IFF_ALLMULTI) ||
9705 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9706 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9707
9708 else { /* some multicasts */
9709 if (CHIP_IS_E1(bp)) {
9710 int i, old, offset;
9711 struct dev_mc_list *mclist;
9712 struct mac_configuration_cmd *config =
9713 bnx2x_sp(bp, mcast_config);
9714
9715 for (i = 0, mclist = dev->mc_list;
9716 mclist && (i < dev->mc_count);
9717 i++, mclist = mclist->next) {
9718
9719 config->config_table[i].
9720 cam_entry.msb_mac_addr =
9721 swab16(*(u16 *)&mclist->dmi_addr[0]);
9722 config->config_table[i].
9723 cam_entry.middle_mac_addr =
9724 swab16(*(u16 *)&mclist->dmi_addr[2]);
9725 config->config_table[i].
9726 cam_entry.lsb_mac_addr =
9727 swab16(*(u16 *)&mclist->dmi_addr[4]);
9728 config->config_table[i].cam_entry.flags =
9729 cpu_to_le16(port);
9730 config->config_table[i].
9731 target_table_entry.flags = 0;
9732 config->config_table[i].
9733 target_table_entry.client_id = 0;
9734 config->config_table[i].
9735 target_table_entry.vlan_id = 0;
9736
9737 DP(NETIF_MSG_IFUP,
9738 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9739 config->config_table[i].
9740 cam_entry.msb_mac_addr,
9741 config->config_table[i].
9742 cam_entry.middle_mac_addr,
9743 config->config_table[i].
9744 cam_entry.lsb_mac_addr);
9745 }
9746 old = config->hdr.length_6b;
9747 if (old > i) {
9748 for (; i < old; i++) {
9749 if (CAM_IS_INVALID(config->
9750 config_table[i])) {
9751 i--; /* already invalidated */
9752 break;
9753 }
9754 /* invalidate */
9755 CAM_INVALIDATE(config->
9756 config_table[i]);
9757 }
9758 }
9759
9760 if (CHIP_REV_IS_SLOW(bp))
9761 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9762 else
9763 offset = BNX2X_MAX_MULTICAST*(1 + port);
9764
9765 config->hdr.length_6b = i;
9766 config->hdr.offset = offset;
9767 config->hdr.client_id = BP_CL_ID(bp);
9768 config->hdr.reserved1 = 0;
9769
9770 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9771 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9772 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9773 0);
9774 } else { /* E1H */
9775 /* Accept one or more multicasts */
9776 struct dev_mc_list *mclist;
9777 u32 mc_filter[MC_HASH_SIZE];
9778 u32 crc, bit, regidx;
9779 int i;
9780
9781 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9782
9783 for (i = 0, mclist = dev->mc_list;
9784 mclist && (i < dev->mc_count);
9785 i++, mclist = mclist->next) {
9786
9787 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9788 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9789 mclist->dmi_addr[0], mclist->dmi_addr[1],
9790 mclist->dmi_addr[2], mclist->dmi_addr[3],
9791 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9792
9793 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9794 bit = (crc >> 24) & 0xff;
9795 regidx = bit >> 5;
9796 bit &= 0x1f;
9797 mc_filter[regidx] |= (1 << bit);
9798 }
9799
9800 for (i = 0; i < MC_HASH_SIZE; i++)
9801 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9802 mc_filter[i]);
9803 }
9804 }
9805
9806 bp->rx_mode = rx_mode;
9807 bnx2x_set_storm_rx_mode(bp);
9808}
9809
9810/* called with rtnl_lock */
a2fbb9ea
ET
9811static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9812{
9813 struct sockaddr *addr = p;
9814 struct bnx2x *bp = netdev_priv(dev);
9815
34f80b04 9816 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9817 return -EINVAL;
9818
9819 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9820 if (netif_running(dev)) {
9821 if (CHIP_IS_E1(bp))
9822 bnx2x_set_mac_addr_e1(bp);
9823 else
9824 bnx2x_set_mac_addr_e1h(bp);
9825 }
a2fbb9ea
ET
9826
9827 return 0;
9828}
9829
c18487ee 9830/* called with rtnl_lock */
a2fbb9ea
ET
9831static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9832{
9833 struct mii_ioctl_data *data = if_mii(ifr);
9834 struct bnx2x *bp = netdev_priv(dev);
9835 int err;
9836
9837 switch (cmd) {
9838 case SIOCGMIIPHY:
34f80b04 9839 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9840
c14423fe 9841 /* fallthrough */
c18487ee 9842
a2fbb9ea 9843 case SIOCGMIIREG: {
c18487ee 9844 u16 mii_regval;
a2fbb9ea 9845
c18487ee
YR
9846 if (!netif_running(dev))
9847 return -EAGAIN;
a2fbb9ea 9848
34f80b04
EG
9849 mutex_lock(&bp->port.phy_mutex);
9850 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9851 DEFAULT_PHY_DEV_ADDR,
9852 (data->reg_num & 0x1f), &mii_regval);
9853 data->val_out = mii_regval;
34f80b04 9854 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9855 return err;
9856 }
9857
9858 case SIOCSMIIREG:
9859 if (!capable(CAP_NET_ADMIN))
9860 return -EPERM;
9861
c18487ee
YR
9862 if (!netif_running(dev))
9863 return -EAGAIN;
9864
34f80b04
EG
9865 mutex_lock(&bp->port.phy_mutex);
9866 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9867 DEFAULT_PHY_DEV_ADDR,
9868 (data->reg_num & 0x1f), data->val_in);
34f80b04 9869 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9870 return err;
9871
9872 default:
9873 /* do nothing */
9874 break;
9875 }
9876
9877 return -EOPNOTSUPP;
9878}
9879
34f80b04 9880/* called with rtnl_lock */
a2fbb9ea
ET
9881static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9882{
9883 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9884 int rc = 0;
a2fbb9ea
ET
9885
9886 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9887 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9888 return -EINVAL;
9889
9890 /* This does not race with packet allocation
c14423fe 9891 * because the actual alloc size is
a2fbb9ea
ET
9892 * only updated as part of load
9893 */
9894 dev->mtu = new_mtu;
9895
9896 if (netif_running(dev)) {
34f80b04
EG
9897 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9898 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9899 }
34f80b04
EG
9900
9901 return rc;
a2fbb9ea
ET
9902}
9903
9904static void bnx2x_tx_timeout(struct net_device *dev)
9905{
9906 struct bnx2x *bp = netdev_priv(dev);
9907
9908#ifdef BNX2X_STOP_ON_ERROR
9909 if (!bp->panic)
9910 bnx2x_panic();
9911#endif
9912 /* This allows the netif to be shutdown gracefully before resetting */
9913 schedule_work(&bp->reset_task);
9914}
9915
9916#ifdef BCM_VLAN
34f80b04 9917/* called with rtnl_lock */
a2fbb9ea
ET
9918static void bnx2x_vlan_rx_register(struct net_device *dev,
9919 struct vlan_group *vlgrp)
9920{
9921 struct bnx2x *bp = netdev_priv(dev);
9922
9923 bp->vlgrp = vlgrp;
9924 if (netif_running(dev))
49d66772 9925 bnx2x_set_client_config(bp);
a2fbb9ea 9926}
34f80b04 9927
a2fbb9ea
ET
9928#endif
9929
9930#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9931static void poll_bnx2x(struct net_device *dev)
9932{
9933 struct bnx2x *bp = netdev_priv(dev);
9934
9935 disable_irq(bp->pdev->irq);
9936 bnx2x_interrupt(bp->pdev->irq, dev);
9937 enable_irq(bp->pdev->irq);
9938}
9939#endif
9940
34f80b04
EG
9941static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9942 struct net_device *dev)
a2fbb9ea
ET
9943{
9944 struct bnx2x *bp;
9945 int rc;
9946
9947 SET_NETDEV_DEV(dev, &pdev->dev);
9948 bp = netdev_priv(dev);
9949
34f80b04
EG
9950 bp->dev = dev;
9951 bp->pdev = pdev;
a2fbb9ea 9952 bp->flags = 0;
34f80b04 9953 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9954
9955 rc = pci_enable_device(pdev);
9956 if (rc) {
9957 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9958 goto err_out;
9959 }
9960
9961 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9962 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9963 " aborting\n");
9964 rc = -ENODEV;
9965 goto err_out_disable;
9966 }
9967
9968 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9969 printk(KERN_ERR PFX "Cannot find second PCI device"
9970 " base address, aborting\n");
9971 rc = -ENODEV;
9972 goto err_out_disable;
9973 }
9974
34f80b04
EG
9975 if (atomic_read(&pdev->enable_cnt) == 1) {
9976 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9977 if (rc) {
9978 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9979 " aborting\n");
9980 goto err_out_disable;
9981 }
a2fbb9ea 9982
34f80b04
EG
9983 pci_set_master(pdev);
9984 pci_save_state(pdev);
9985 }
a2fbb9ea
ET
9986
9987 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9988 if (bp->pm_cap == 0) {
9989 printk(KERN_ERR PFX "Cannot find power management"
9990 " capability, aborting\n");
9991 rc = -EIO;
9992 goto err_out_release;
9993 }
9994
9995 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9996 if (bp->pcie_cap == 0) {
9997 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9998 " aborting\n");
9999 rc = -EIO;
10000 goto err_out_release;
10001 }
10002
10003 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10004 bp->flags |= USING_DAC_FLAG;
10005 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10006 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10007 " failed, aborting\n");
10008 rc = -EIO;
10009 goto err_out_release;
10010 }
10011
10012 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10013 printk(KERN_ERR PFX "System does not support DMA,"
10014 " aborting\n");
10015 rc = -EIO;
10016 goto err_out_release;
10017 }
10018
34f80b04
EG
10019 dev->mem_start = pci_resource_start(pdev, 0);
10020 dev->base_addr = dev->mem_start;
10021 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10022
10023 dev->irq = pdev->irq;
10024
10025 bp->regview = ioremap_nocache(dev->base_addr,
10026 pci_resource_len(pdev, 0));
10027 if (!bp->regview) {
10028 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10029 rc = -ENOMEM;
10030 goto err_out_release;
10031 }
10032
34f80b04
EG
10033 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10034 min_t(u64, BNX2X_DB_SIZE,
10035 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10036 if (!bp->doorbells) {
10037 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10038 rc = -ENOMEM;
10039 goto err_out_unmap;
10040 }
10041
10042 bnx2x_set_power_state(bp, PCI_D0);
10043
34f80b04
EG
10044 /* clean indirect addresses */
10045 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10046 PCICFG_VENDOR_ID_OFFSET);
10047 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10048 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10049 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10050 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10051
34f80b04
EG
10052 dev->hard_start_xmit = bnx2x_start_xmit;
10053 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10054
34f80b04
EG
10055 dev->ethtool_ops = &bnx2x_ethtool_ops;
10056 dev->open = bnx2x_open;
10057 dev->stop = bnx2x_close;
10058 dev->set_multicast_list = bnx2x_set_rx_mode;
10059 dev->set_mac_address = bnx2x_change_mac_addr;
10060 dev->do_ioctl = bnx2x_ioctl;
10061 dev->change_mtu = bnx2x_change_mtu;
10062 dev->tx_timeout = bnx2x_tx_timeout;
10063#ifdef BCM_VLAN
10064 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10065#endif
10066#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10067 dev->poll_controller = poll_bnx2x;
10068#endif
10069 dev->features |= NETIF_F_SG;
10070 dev->features |= NETIF_F_HW_CSUM;
10071 if (bp->flags & USING_DAC_FLAG)
10072 dev->features |= NETIF_F_HIGHDMA;
10073#ifdef BCM_VLAN
10074 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10075#endif
10076 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10077 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10078
10079 return 0;
10080
10081err_out_unmap:
10082 if (bp->regview) {
10083 iounmap(bp->regview);
10084 bp->regview = NULL;
10085 }
a2fbb9ea
ET
10086 if (bp->doorbells) {
10087 iounmap(bp->doorbells);
10088 bp->doorbells = NULL;
10089 }
10090
10091err_out_release:
34f80b04
EG
10092 if (atomic_read(&pdev->enable_cnt) == 1)
10093 pci_release_regions(pdev);
a2fbb9ea
ET
10094
10095err_out_disable:
10096 pci_disable_device(pdev);
10097 pci_set_drvdata(pdev, NULL);
10098
10099err_out:
10100 return rc;
10101}
10102
25047950
ET
10103static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10104{
10105 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10106
10107 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10108 return val;
10109}
10110
10111/* return value of 1=2.5GHz 2=5GHz */
10112static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10113{
10114 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10115
10116 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10117 return val;
10118}
10119
a2fbb9ea
ET
10120static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10121 const struct pci_device_id *ent)
10122{
10123 static int version_printed;
10124 struct net_device *dev = NULL;
10125 struct bnx2x *bp;
25047950 10126 int rc;
25047950 10127 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10128
10129 if (version_printed++ == 0)
10130 printk(KERN_INFO "%s", version);
10131
10132 /* dev zeroed in init_etherdev */
10133 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10134 if (!dev) {
10135 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10136 return -ENOMEM;
34f80b04 10137 }
a2fbb9ea
ET
10138
10139 netif_carrier_off(dev);
10140
10141 bp = netdev_priv(dev);
10142 bp->msglevel = debug;
10143
34f80b04 10144 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10145 if (rc < 0) {
10146 free_netdev(dev);
10147 return rc;
10148 }
10149
a2fbb9ea
ET
10150 rc = register_netdev(dev);
10151 if (rc) {
c14423fe 10152 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10153 goto init_one_exit;
a2fbb9ea
ET
10154 }
10155
10156 pci_set_drvdata(pdev, dev);
10157
34f80b04
EG
10158 rc = bnx2x_init_bp(bp);
10159 if (rc) {
10160 unregister_netdev(dev);
10161 goto init_one_exit;
10162 }
10163
10164 bp->common.name = board_info[ent->driver_data].name;
25047950 10165 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10166 " IRQ %d, ", dev->name, bp->common.name,
10167 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10168 bnx2x_get_pcie_width(bp),
10169 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10170 dev->base_addr, bp->pdev->irq);
10171 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10172 return 0;
34f80b04
EG
10173
10174init_one_exit:
10175 if (bp->regview)
10176 iounmap(bp->regview);
10177
10178 if (bp->doorbells)
10179 iounmap(bp->doorbells);
10180
10181 free_netdev(dev);
10182
10183 if (atomic_read(&pdev->enable_cnt) == 1)
10184 pci_release_regions(pdev);
10185
10186 pci_disable_device(pdev);
10187 pci_set_drvdata(pdev, NULL);
10188
10189 return rc;
a2fbb9ea
ET
10190}
10191
10192static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10193{
10194 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10195 struct bnx2x *bp;
10196
10197 if (!dev) {
228241eb
ET
10198 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10199 return;
10200 }
228241eb 10201 bp = netdev_priv(dev);
a2fbb9ea 10202
a2fbb9ea
ET
10203 unregister_netdev(dev);
10204
10205 if (bp->regview)
10206 iounmap(bp->regview);
10207
10208 if (bp->doorbells)
10209 iounmap(bp->doorbells);
10210
10211 free_netdev(dev);
34f80b04
EG
10212
10213 if (atomic_read(&pdev->enable_cnt) == 1)
10214 pci_release_regions(pdev);
10215
a2fbb9ea
ET
10216 pci_disable_device(pdev);
10217 pci_set_drvdata(pdev, NULL);
10218}
10219
10220static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10221{
10222 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10223 struct bnx2x *bp;
10224
34f80b04
EG
10225 if (!dev) {
10226 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10227 return -ENODEV;
10228 }
10229 bp = netdev_priv(dev);
a2fbb9ea 10230
34f80b04 10231 rtnl_lock();
a2fbb9ea 10232
34f80b04 10233 pci_save_state(pdev);
228241eb 10234
34f80b04
EG
10235 if (!netif_running(dev)) {
10236 rtnl_unlock();
10237 return 0;
10238 }
a2fbb9ea
ET
10239
10240 netif_device_detach(dev);
a2fbb9ea 10241
da5a662a 10242 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10243
a2fbb9ea 10244 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10245
34f80b04
EG
10246 rtnl_unlock();
10247
a2fbb9ea
ET
10248 return 0;
10249}
10250
10251static int bnx2x_resume(struct pci_dev *pdev)
10252{
10253 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10254 struct bnx2x *bp;
a2fbb9ea
ET
10255 int rc;
10256
228241eb
ET
10257 if (!dev) {
10258 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10259 return -ENODEV;
10260 }
228241eb 10261 bp = netdev_priv(dev);
a2fbb9ea 10262
34f80b04
EG
10263 rtnl_lock();
10264
228241eb 10265 pci_restore_state(pdev);
34f80b04
EG
10266
10267 if (!netif_running(dev)) {
10268 rtnl_unlock();
10269 return 0;
10270 }
10271
a2fbb9ea
ET
10272 bnx2x_set_power_state(bp, PCI_D0);
10273 netif_device_attach(dev);
10274
da5a662a 10275 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10276
34f80b04
EG
10277 rtnl_unlock();
10278
10279 return rc;
a2fbb9ea
ET
10280}
10281
493adb1f
WX
10282/**
10283 * bnx2x_io_error_detected - called when PCI error is detected
10284 * @pdev: Pointer to PCI device
10285 * @state: The current pci connection state
10286 *
10287 * This function is called after a PCI bus error affecting
10288 * this device has been detected.
10289 */
10290static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10291 pci_channel_state_t state)
10292{
10293 struct net_device *dev = pci_get_drvdata(pdev);
10294 struct bnx2x *bp = netdev_priv(dev);
10295
10296 rtnl_lock();
10297
10298 netif_device_detach(dev);
10299
10300 if (netif_running(dev))
10301 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10302
10303 pci_disable_device(pdev);
10304
10305 rtnl_unlock();
10306
10307 /* Request a slot reset */
10308 return PCI_ERS_RESULT_NEED_RESET;
10309}
10310
10311/**
10312 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10313 * @pdev: Pointer to PCI device
10314 *
10315 * Restart the card from scratch, as if from a cold-boot.
10316 */
10317static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10318{
10319 struct net_device *dev = pci_get_drvdata(pdev);
10320 struct bnx2x *bp = netdev_priv(dev);
10321
10322 rtnl_lock();
10323
10324 if (pci_enable_device(pdev)) {
10325 dev_err(&pdev->dev,
10326 "Cannot re-enable PCI device after reset\n");
10327 rtnl_unlock();
10328 return PCI_ERS_RESULT_DISCONNECT;
10329 }
10330
10331 pci_set_master(pdev);
10332 pci_restore_state(pdev);
10333
10334 if (netif_running(dev))
10335 bnx2x_set_power_state(bp, PCI_D0);
10336
10337 rtnl_unlock();
10338
10339 return PCI_ERS_RESULT_RECOVERED;
10340}
10341
10342/**
10343 * bnx2x_io_resume - called when traffic can start flowing again
10344 * @pdev: Pointer to PCI device
10345 *
10346 * This callback is called when the error recovery driver tells us that
10347 * its OK to resume normal operation.
10348 */
10349static void bnx2x_io_resume(struct pci_dev *pdev)
10350{
10351 struct net_device *dev = pci_get_drvdata(pdev);
10352 struct bnx2x *bp = netdev_priv(dev);
10353
10354 rtnl_lock();
10355
10356 if (netif_running(dev))
10357 bnx2x_nic_load(bp, LOAD_OPEN);
10358
10359 netif_device_attach(dev);
10360
10361 rtnl_unlock();
10362}
10363
10364static struct pci_error_handlers bnx2x_err_handler = {
10365 .error_detected = bnx2x_io_error_detected,
10366 .slot_reset = bnx2x_io_slot_reset,
10367 .resume = bnx2x_io_resume,
10368};
10369
a2fbb9ea 10370static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10371 .name = DRV_MODULE_NAME,
10372 .id_table = bnx2x_pci_tbl,
10373 .probe = bnx2x_init_one,
10374 .remove = __devexit_p(bnx2x_remove_one),
10375 .suspend = bnx2x_suspend,
10376 .resume = bnx2x_resume,
10377 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10378};
10379
10380static int __init bnx2x_init(void)
10381{
10382 return pci_register_driver(&bnx2x_pci_driver);
10383}
10384
10385static void __exit bnx2x_cleanup(void)
10386{
10387 pci_unregister_driver(&bnx2x_pci_driver);
10388}
10389
10390module_init(bnx2x_init);
10391module_exit(bnx2x_cleanup);
10392