]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: HW attention lock
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
e35c3269
EG
63#define DRV_MODULE_VERSION "1.45.6"
64#define DRV_MODULE_RELDATE "2008/06/23"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 78
19680c48 79static int disable_tpa;
a2fbb9ea
ET
80static int use_inta;
81static int poll;
a2fbb9ea 82static int debug;
34f80b04 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
19680c48 86module_param(disable_tpa, int, 0);
a2fbb9ea
ET
87module_param(use_inta, int, 0);
88module_param(poll, int, 0);
a2fbb9ea 89module_param(debug, int, 0);
19680c48 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 93MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
240 msleep(100);
241 else
242 udelay(5);
243
244 if (!cnt) {
a2fbb9ea
ET
245 BNX2X_ERR("dmae timeout!\n");
246 break;
247 }
ad8d3948 248 cnt--;
a2fbb9ea 249 }
ad8d3948
EG
250
251 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
252}
253
c18487ee 254void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 255{
ad8d3948 256 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
258 int cnt = 200;
259
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 int i;
263
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 return;
269 }
270
271 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
272
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
275
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
34f80b04
EG
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->len = len32;
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 293 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 294
ad8d3948 295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
302
303 *wb_comp = 0;
304
34f80b04 305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
306
307 udelay(5);
ad8d3948
EG
308
309 while (*wb_comp != DMAE_COMP_VAL) {
310
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
313 msleep(100);
314 else
315 udelay(5);
316
317 if (!cnt) {
a2fbb9ea
ET
318 BNX2X_ERR("dmae timeout!\n");
319 break;
320 }
ad8d3948 321 cnt--;
a2fbb9ea 322 }
ad8d3948 323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
326
327 mutex_unlock(&bp->dmae_mutex);
328}
329
330/* used only for slowpath so not inlined */
331static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332{
333 u32 wb_write[2];
334
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 338}
a2fbb9ea 339
ad8d3948
EG
340#ifdef USE_WB_RD
341static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342{
343 u32 wb_data[2];
344
345 REG_RD_DMAE(bp, reg, wb_data, 2);
346
347 return HILO_U64(wb_data[0], wb_data[1]);
348}
349#endif
350
a2fbb9ea
ET
351static int bnx2x_mc_assert(struct bnx2x *bp)
352{
a2fbb9ea 353 char last_idx;
34f80b04
EG
354 int i, rc = 0;
355 u32 row0, row1, row2, row3;
356
357 /* XSTORM */
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 if (last_idx)
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
379 rc++;
380 } else {
381 break;
382 }
383 }
384
385 /* TSTORM */
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 if (last_idx)
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
407 rc++;
408 } else {
409 break;
410 }
411 }
412
413 /* CSTORM */
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 if (last_idx)
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
435 rc++;
436 } else {
437 break;
438 }
439 }
440
441 /* USTORM */
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 if (last_idx)
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
463 rc++;
464 } else {
465 break;
a2fbb9ea
ET
466 }
467 }
34f80b04 468
a2fbb9ea
ET
469 return rc;
470}
c14423fe 471
a2fbb9ea
ET
472static void bnx2x_fw_dump(struct bnx2x *bp)
473{
474 u32 mark, offset;
475 u32 data[9];
476 int word;
477
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
481
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 offset + 4*word));
486 data[8] = 0x0;
49d66772 487 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
488 }
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
497}
498
499static void bnx2x_panic_dump(struct bnx2x *bp)
500{
501 int i;
502 u16 j, start, end;
503
66e855f3
YG
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
506
a2fbb9ea
ET
507 BNX2X_ERR("begin crash dump -----------------\n");
508
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
512
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
528 fp->fp_u_idx,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
531
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
536
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
539 }
540
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
545
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548 }
549
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
555
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
558 }
559
7a9b2557
VZ
560 start = 0;
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
565
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
568 }
569
a2fbb9ea
ET
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
574
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
577 }
578 }
579
49d66772
ET
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 582 " spq_prod_idx(%u)\n",
49d66772 583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
585
34f80b04 586 bnx2x_fw_dump(bp);
a2fbb9ea
ET
587 bnx2x_mc_assert(bp);
588 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
589}
590
615f8fd9 591static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 592{
34f80b04 593 int port = BP_PORT(bp);
a2fbb9ea
ET
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597
598 if (msix) {
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
602 } else {
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 607
615f8fd9
ET
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
610
611 REG_WR(bp, addr, val);
612
a2fbb9ea
ET
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614 }
615
615f8fd9 616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
617 val, port, addr, msix);
618
619 REG_WR(bp, addr, val);
34f80b04
EG
620
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
623 if (IS_E1HMF(bp)) {
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
625 if (bp->port.pmf)
626 /* enable nig attention */
627 val |= 0x0100;
628 } else
629 val = 0xffff;
630
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
633 }
a2fbb9ea
ET
634}
635
615f8fd9 636static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 637{
34f80b04 638 int port = BP_PORT(bp);
a2fbb9ea
ET
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
641
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
646
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648 val, port, addr);
649
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653}
654
615f8fd9 655static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 656{
a2fbb9ea
ET
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658 int i;
659
34f80b04 660 /* disable interrupt handling */
a2fbb9ea 661 atomic_inc(&bp->intr_sem);
c14423fe 662 /* prevent the HW from sending interrupts */
615f8fd9 663 bnx2x_int_disable(bp);
a2fbb9ea
ET
664
665 /* make sure all ISRs are done */
666 if (msix) {
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
669
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
672 } else
673 synchronize_irq(bp->pdev->irq);
674
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
34f80b04 688 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
689 struct igu_ack_register igu_ack;
690
691 igu_ack.status_block_index = index;
692 igu_ack.sb_id_and_flags =
34f80b04 693 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
694 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697
34f80b04
EG
698 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
700 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
701}
702
703static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704{
705 struct host_status_block *fpsb = fp->status_blk;
706 u16 rc = 0;
707
708 barrier(); /* status block is written to by the chip */
709 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
711 rc |= 1;
712 }
713 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
715 rc |= 2;
716 }
717 return rc;
718}
719
a2fbb9ea
ET
720static u16 bnx2x_ack_int(struct bnx2x *bp)
721{
34f80b04 722 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
723 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
724
34f80b04
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
726 result, BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
727
728#ifdef IGU_DEBUG
729#warning IGU_DEBUG active
730 if (result == 0) {
731 BNX2X_ERR("read %x from IGU\n", result);
732 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
733 }
734#endif
735 return result;
736}
737
738
739/*
740 * fast path service functions
741 */
742
743/* free skb in the packet ring at pos idx
744 * return idx of last bd freed
745 */
746static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
747 u16 idx)
748{
749 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
750 struct eth_tx_bd *tx_bd;
751 struct sk_buff *skb = tx_buf->skb;
34f80b04 752 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
753 int nbd;
754
755 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
756 idx, tx_buf, skb);
757
758 /* unmap first bd */
759 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
760 tx_bd = &fp->tx_desc_ring[bd_idx];
761 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
762 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
763
764 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 765 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
766#ifdef BNX2X_STOP_ON_ERROR
767 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 768 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
769 bnx2x_panic();
770 }
771#endif
772
773 /* Skip a parse bd and the TSO split header bd
774 since they have no mapping */
775 if (nbd)
776 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
777
778 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
779 ETH_TX_BD_FLAGS_TCP_CSUM |
780 ETH_TX_BD_FLAGS_SW_LSO)) {
781 if (--nbd)
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 tx_bd = &fp->tx_desc_ring[bd_idx];
784 /* is this a TSO split header bd? */
785 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
786 if (--nbd)
787 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
788 }
789 }
790
791 /* now free frags */
792 while (nbd > 0) {
793
794 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
795 tx_bd = &fp->tx_desc_ring[bd_idx];
796 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
797 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
798 if (--nbd)
799 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
800 }
801
802 /* release skb */
53e5e96e 803 WARN_ON(!skb);
a2fbb9ea
ET
804 dev_kfree_skb(skb);
805 tx_buf->first_bd = 0;
806 tx_buf->skb = NULL;
807
34f80b04 808 return new_cons;
a2fbb9ea
ET
809}
810
34f80b04 811static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 812{
34f80b04
EG
813 s16 used;
814 u16 prod;
815 u16 cons;
a2fbb9ea 816
34f80b04 817 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
818 prod = fp->tx_bd_prod;
819 cons = fp->tx_bd_cons;
820
34f80b04
EG
821 /* NUM_TX_RINGS = number of "next-page" entries
822 It will be used as a threshold */
823 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 824
34f80b04 825#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
826 WARN_ON(used < 0);
827 WARN_ON(used > fp->bp->tx_ring_size);
828 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 829#endif
a2fbb9ea 830
34f80b04 831 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
832}
833
834static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
835{
836 struct bnx2x *bp = fp->bp;
837 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
838 int done = 0;
839
840#ifdef BNX2X_STOP_ON_ERROR
841 if (unlikely(bp->panic))
842 return;
843#endif
844
845 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
846 sw_cons = fp->tx_pkt_cons;
847
848 while (sw_cons != hw_cons) {
849 u16 pkt_cons;
850
851 pkt_cons = TX_BD(sw_cons);
852
853 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
854
34f80b04 855 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
856 hw_cons, sw_cons, pkt_cons);
857
34f80b04 858/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
859 rmb();
860 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
861 }
862*/
863 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
864 sw_cons++;
865 done++;
866
867 if (done == work)
868 break;
869 }
870
871 fp->tx_pkt_cons = sw_cons;
872 fp->tx_bd_cons = bd_cons;
873
874 /* Need to make the tx_cons update visible to start_xmit()
875 * before checking for netif_queue_stopped(). Without the
876 * memory barrier, there is a small possibility that start_xmit()
877 * will miss it and cause the queue to be stopped forever.
878 */
879 smp_mb();
880
881 /* TBD need a thresh? */
882 if (unlikely(netif_queue_stopped(bp->dev))) {
883
884 netif_tx_lock(bp->dev);
885
886 if (netif_queue_stopped(bp->dev) &&
da5a662a 887 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
888 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
889 netif_wake_queue(bp->dev);
890
891 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
892 }
893}
894
895static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
896 union eth_rx_cqe *rr_cqe)
897{
898 struct bnx2x *bp = fp->bp;
899 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
900 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
901
34f80b04 902 DP(BNX2X_MSG_SP,
a2fbb9ea 903 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
904 FP_IDX(fp), cid, command, bp->state,
905 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
906
907 bp->spq_left++;
908
34f80b04 909 if (FP_IDX(fp)) {
a2fbb9ea
ET
910 switch (command | fp->state) {
911 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
912 BNX2X_FP_STATE_OPENING):
913 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
914 cid);
915 fp->state = BNX2X_FP_STATE_OPEN;
916 break;
917
918 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
919 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
920 cid);
921 fp->state = BNX2X_FP_STATE_HALTED;
922 break;
923
924 default:
34f80b04
EG
925 BNX2X_ERR("unexpected MC reply (%d) "
926 "fp->state is %x\n", command, fp->state);
927 break;
a2fbb9ea 928 }
34f80b04 929 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
930 return;
931 }
c14423fe 932
a2fbb9ea
ET
933 switch (command | bp->state) {
934 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
935 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
936 bp->state = BNX2X_STATE_OPEN;
937 break;
938
939 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
941 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
942 fp->state = BNX2X_FP_STATE_HALTED;
943 break;
944
a2fbb9ea 945 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 946 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 947 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
948 break;
949
950 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 952 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 953 bp->set_mac_pending = 0;
a2fbb9ea
ET
954 break;
955
49d66772 956 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 957 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
958 break;
959
a2fbb9ea 960 default:
34f80b04 961 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 962 command, bp->state);
34f80b04 963 break;
a2fbb9ea 964 }
34f80b04 965 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
966}
967
7a9b2557
VZ
968static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
969 struct bnx2x_fastpath *fp, u16 index)
970{
971 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
972 struct page *page = sw_buf->page;
973 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
974
975 /* Skip "next page" elements */
976 if (!page)
977 return;
978
979 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
980 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
981 __free_pages(page, PAGES_PER_SGE_SHIFT);
982
983 sw_buf->page = NULL;
984 sge->addr_hi = 0;
985 sge->addr_lo = 0;
986}
987
988static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
989 struct bnx2x_fastpath *fp, int last)
990{
991 int i;
992
993 for (i = 0; i < last; i++)
994 bnx2x_free_rx_sge(bp, fp, i);
995}
996
997static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
998 struct bnx2x_fastpath *fp, u16 index)
999{
1000 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1001 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1002 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1003 dma_addr_t mapping;
1004
1005 if (unlikely(page == NULL))
1006 return -ENOMEM;
1007
1008 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1009 PCI_DMA_FROMDEVICE);
8d8bb39b 1010 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1011 __free_pages(page, PAGES_PER_SGE_SHIFT);
1012 return -ENOMEM;
1013 }
1014
1015 sw_buf->page = page;
1016 pci_unmap_addr_set(sw_buf, mapping, mapping);
1017
1018 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1019 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1020
1021 return 0;
1022}
1023
a2fbb9ea
ET
1024static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1025 struct bnx2x_fastpath *fp, u16 index)
1026{
1027 struct sk_buff *skb;
1028 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1029 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1030 dma_addr_t mapping;
1031
1032 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1033 if (unlikely(skb == NULL))
1034 return -ENOMEM;
1035
1036 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1037 PCI_DMA_FROMDEVICE);
8d8bb39b 1038 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1039 dev_kfree_skb(skb);
1040 return -ENOMEM;
1041 }
1042
1043 rx_buf->skb = skb;
1044 pci_unmap_addr_set(rx_buf, mapping, mapping);
1045
1046 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1047 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1048
1049 return 0;
1050}
1051
1052/* note that we are not allocating a new skb,
1053 * we are just moving one from cons to prod
1054 * we are not creating a new mapping,
1055 * so there is no need to check for dma_mapping_error().
1056 */
1057static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1058 struct sk_buff *skb, u16 cons, u16 prod)
1059{
1060 struct bnx2x *bp = fp->bp;
1061 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1062 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1063 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1064 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1065
1066 pci_dma_sync_single_for_device(bp->pdev,
1067 pci_unmap_addr(cons_rx_buf, mapping),
1068 bp->rx_offset + RX_COPY_THRESH,
1069 PCI_DMA_FROMDEVICE);
1070
1071 prod_rx_buf->skb = cons_rx_buf->skb;
1072 pci_unmap_addr_set(prod_rx_buf, mapping,
1073 pci_unmap_addr(cons_rx_buf, mapping));
1074 *prod_bd = *cons_bd;
1075}
1076
7a9b2557
VZ
1077static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1078 u16 idx)
1079{
1080 u16 last_max = fp->last_max_sge;
1081
1082 if (SUB_S16(idx, last_max) > 0)
1083 fp->last_max_sge = idx;
1084}
1085
1086static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1087{
1088 int i, j;
1089
1090 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1091 int idx = RX_SGE_CNT * i - 1;
1092
1093 for (j = 0; j < 2; j++) {
1094 SGE_MASK_CLEAR_BIT(fp, idx);
1095 idx--;
1096 }
1097 }
1098}
1099
1100static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1101 struct eth_fast_path_rx_cqe *fp_cqe)
1102{
1103 struct bnx2x *bp = fp->bp;
1104 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1105 le16_to_cpu(fp_cqe->len_on_bd)) >>
1106 BCM_PAGE_SHIFT;
1107 u16 last_max, last_elem, first_elem;
1108 u16 delta = 0;
1109 u16 i;
1110
1111 if (!sge_len)
1112 return;
1113
1114 /* First mark all used pages */
1115 for (i = 0; i < sge_len; i++)
1116 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1117
1118 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1119 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1120
1121 /* Here we assume that the last SGE index is the biggest */
1122 prefetch((void *)(fp->sge_mask));
1123 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1124
1125 last_max = RX_SGE(fp->last_max_sge);
1126 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1127 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1128
1129 /* If ring is not full */
1130 if (last_elem + 1 != first_elem)
1131 last_elem++;
1132
1133 /* Now update the prod */
1134 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1135 if (likely(fp->sge_mask[i]))
1136 break;
1137
1138 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1139 delta += RX_SGE_MASK_ELEM_SZ;
1140 }
1141
1142 if (delta > 0) {
1143 fp->rx_sge_prod += delta;
1144 /* clear page-end entries */
1145 bnx2x_clear_sge_mask_next_elems(fp);
1146 }
1147
1148 DP(NETIF_MSG_RX_STATUS,
1149 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1150 fp->last_max_sge, fp->rx_sge_prod);
1151}
1152
1153static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1154{
1155 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1156 memset(fp->sge_mask, 0xff,
1157 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1158
1159 /* Clear the two last indeces in the page to 1:
1160 these are the indeces that correspond to the "next" element,
1161 hence will never be indicated and should be removed from
1162 the calculations. */
1163 bnx2x_clear_sge_mask_next_elems(fp);
1164}
1165
1166static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1167 struct sk_buff *skb, u16 cons, u16 prod)
1168{
1169 struct bnx2x *bp = fp->bp;
1170 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1171 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1172 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1173 dma_addr_t mapping;
1174
1175 /* move empty skb from pool to prod and map it */
1176 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1177 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1178 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1179 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1180
1181 /* move partial skb from cons to pool (don't unmap yet) */
1182 fp->tpa_pool[queue] = *cons_rx_buf;
1183
1184 /* mark bin state as start - print error if current state != stop */
1185 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1186 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1187
1188 fp->tpa_state[queue] = BNX2X_TPA_START;
1189
1190 /* point prod_bd to new skb */
1191 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1192 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1193
1194#ifdef BNX2X_STOP_ON_ERROR
1195 fp->tpa_queue_used |= (1 << queue);
1196#ifdef __powerpc64__
1197 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1198#else
1199 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1200#endif
1201 fp->tpa_queue_used);
1202#endif
1203}
1204
1205static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1206 struct sk_buff *skb,
1207 struct eth_fast_path_rx_cqe *fp_cqe,
1208 u16 cqe_idx)
1209{
1210 struct sw_rx_page *rx_pg, old_rx_pg;
1211 struct page *sge;
1212 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1213 u32 i, frag_len, frag_size, pages;
1214 int err;
1215 int j;
1216
1217 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1218 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1219
1220 /* This is needed in order to enable forwarding support */
1221 if (frag_size)
1222 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1223 max(frag_size, (u32)len_on_bd));
1224
1225#ifdef BNX2X_STOP_ON_ERROR
1226 if (pages > 8*PAGES_PER_SGE) {
1227 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1228 pages, cqe_idx);
1229 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1230 fp_cqe->pkt_len, len_on_bd);
1231 bnx2x_panic();
1232 return -EINVAL;
1233 }
1234#endif
1235
1236 /* Run through the SGL and compose the fragmented skb */
1237 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1238 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1239
1240 /* FW gives the indices of the SGE as if the ring is an array
1241 (meaning that "next" element will consume 2 indices) */
1242 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1243 rx_pg = &fp->rx_page_ring[sge_idx];
1244 sge = rx_pg->page;
1245 old_rx_pg = *rx_pg;
1246
1247 /* If we fail to allocate a substitute page, we simply stop
1248 where we are and drop the whole packet */
1249 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1250 if (unlikely(err)) {
66e855f3 1251 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1252 return err;
1253 }
1254
1255 /* Unmap the page as we r going to pass it to the stack */
1256 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1257 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1258
1259 /* Add one frag and update the appropriate fields in the skb */
1260 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1261
1262 skb->data_len += frag_len;
1263 skb->truesize += frag_len;
1264 skb->len += frag_len;
1265
1266 frag_size -= frag_len;
1267 }
1268
1269 return 0;
1270}
1271
1272static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1274 u16 cqe_idx)
1275{
1276 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1277 struct sk_buff *skb = rx_buf->skb;
1278 /* alloc new skb */
1279 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1280
1281 /* Unmap skb in the pool anyway, as we are going to change
1282 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1283 fails. */
1284 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1285 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1286
7a9b2557 1287 if (likely(new_skb)) {
66e855f3
YG
1288 /* fix ip xsum and give it to the stack */
1289 /* (no need to map the new skb) */
7a9b2557
VZ
1290
1291 prefetch(skb);
1292 prefetch(((char *)(skb)) + 128);
1293
7a9b2557
VZ
1294#ifdef BNX2X_STOP_ON_ERROR
1295 if (pad + len > bp->rx_buf_size) {
1296 BNX2X_ERR("skb_put is about to fail... "
1297 "pad %d len %d rx_buf_size %d\n",
1298 pad, len, bp->rx_buf_size);
1299 bnx2x_panic();
1300 return;
1301 }
1302#endif
1303
1304 skb_reserve(skb, pad);
1305 skb_put(skb, len);
1306
1307 skb->protocol = eth_type_trans(skb, bp->dev);
1308 skb->ip_summed = CHECKSUM_UNNECESSARY;
1309
1310 {
1311 struct iphdr *iph;
1312
1313 iph = (struct iphdr *)skb->data;
1314 iph->check = 0;
1315 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1316 }
1317
1318 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1319 &cqe->fast_path_cqe, cqe_idx)) {
1320#ifdef BCM_VLAN
1321 if ((bp->vlgrp != NULL) &&
1322 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1323 PARSING_FLAGS_VLAN))
1324 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1325 le16_to_cpu(cqe->fast_path_cqe.
1326 vlan_tag));
1327 else
1328#endif
1329 netif_receive_skb(skb);
1330 } else {
1331 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1332 " - dropping packet!\n");
1333 dev_kfree_skb(skb);
1334 }
1335
1336 bp->dev->last_rx = jiffies;
1337
1338 /* put new skb in bin */
1339 fp->tpa_pool[queue].skb = new_skb;
1340
1341 } else {
66e855f3 1342 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1343 DP(NETIF_MSG_RX_STATUS,
1344 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1345 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1346 }
1347
1348 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1349}
1350
1351static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1352 struct bnx2x_fastpath *fp,
1353 u16 bd_prod, u16 rx_comp_prod,
1354 u16 rx_sge_prod)
1355{
1356 struct tstorm_eth_rx_producers rx_prods = {0};
1357 int i;
1358
1359 /* Update producers */
1360 rx_prods.bd_prod = bd_prod;
1361 rx_prods.cqe_prod = rx_comp_prod;
1362 rx_prods.sge_prod = rx_sge_prod;
1363
1364 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1365 REG_WR(bp, BAR_TSTRORM_INTMEM +
1366 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1367 ((u32 *)&rx_prods)[i]);
1368
1369 DP(NETIF_MSG_RX_STATUS,
1370 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1371 bd_prod, rx_comp_prod, rx_sge_prod);
1372}
1373
a2fbb9ea
ET
1374static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1375{
1376 struct bnx2x *bp = fp->bp;
34f80b04 1377 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1378 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1379 int rx_pkt = 0;
7a9b2557 1380 u16 queue;
a2fbb9ea
ET
1381
1382#ifdef BNX2X_STOP_ON_ERROR
1383 if (unlikely(bp->panic))
1384 return 0;
1385#endif
1386
34f80b04
EG
1387 /* CQ "next element" is of the size of the regular element,
1388 that's why it's ok here */
a2fbb9ea
ET
1389 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1390 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1391 hw_comp_cons++;
1392
1393 bd_cons = fp->rx_bd_cons;
1394 bd_prod = fp->rx_bd_prod;
34f80b04 1395 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1396 sw_comp_cons = fp->rx_comp_cons;
1397 sw_comp_prod = fp->rx_comp_prod;
1398
1399 /* Memory barrier necessary as speculative reads of the rx
1400 * buffer can be ahead of the index in the status block
1401 */
1402 rmb();
1403
1404 DP(NETIF_MSG_RX_STATUS,
1405 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1406 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1407
1408 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1409 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1410 struct sk_buff *skb;
1411 union eth_rx_cqe *cqe;
34f80b04
EG
1412 u8 cqe_fp_flags;
1413 u16 len, pad;
a2fbb9ea
ET
1414
1415 comp_ring_cons = RCQ_BD(sw_comp_cons);
1416 bd_prod = RX_BD(bd_prod);
1417 bd_cons = RX_BD(bd_cons);
1418
1419 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1420 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1421
a2fbb9ea 1422 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1423 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1424 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1425 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1426 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1427 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1428
1429 /* is this a slowpath msg? */
34f80b04 1430 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1431 bnx2x_sp_event(fp, cqe);
1432 goto next_cqe;
1433
1434 /* this is an rx packet */
1435 } else {
1436 rx_buf = &fp->rx_buf_ring[bd_cons];
1437 skb = rx_buf->skb;
a2fbb9ea
ET
1438 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1439 pad = cqe->fast_path_cqe.placement_offset;
1440
7a9b2557
VZ
1441 /* If CQE is marked both TPA_START and TPA_END
1442 it is a non-TPA CQE */
1443 if ((!fp->disable_tpa) &&
1444 (TPA_TYPE(cqe_fp_flags) !=
1445 (TPA_TYPE_START | TPA_TYPE_END))) {
1446 queue = cqe->fast_path_cqe.queue_index;
1447
1448 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1449 DP(NETIF_MSG_RX_STATUS,
1450 "calling tpa_start on queue %d\n",
1451 queue);
1452
1453 bnx2x_tpa_start(fp, queue, skb,
1454 bd_cons, bd_prod);
1455 goto next_rx;
1456 }
1457
1458 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1459 DP(NETIF_MSG_RX_STATUS,
1460 "calling tpa_stop on queue %d\n",
1461 queue);
1462
1463 if (!BNX2X_RX_SUM_FIX(cqe))
1464 BNX2X_ERR("STOP on none TCP "
1465 "data\n");
1466
1467 /* This is a size of the linear data
1468 on this skb */
1469 len = le16_to_cpu(cqe->fast_path_cqe.
1470 len_on_bd);
1471 bnx2x_tpa_stop(bp, fp, queue, pad,
1472 len, cqe, comp_ring_cons);
1473#ifdef BNX2X_STOP_ON_ERROR
1474 if (bp->panic)
1475 return -EINVAL;
1476#endif
1477
1478 bnx2x_update_sge_prod(fp,
1479 &cqe->fast_path_cqe);
1480 goto next_cqe;
1481 }
1482 }
1483
a2fbb9ea
ET
1484 pci_dma_sync_single_for_device(bp->pdev,
1485 pci_unmap_addr(rx_buf, mapping),
1486 pad + RX_COPY_THRESH,
1487 PCI_DMA_FROMDEVICE);
1488 prefetch(skb);
1489 prefetch(((char *)(skb)) + 128);
1490
1491 /* is this an error packet? */
34f80b04 1492 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1493 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1494 "ERROR flags %x rx packet %u\n",
1495 cqe_fp_flags, sw_comp_cons);
66e855f3 1496 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1497 goto reuse_rx;
1498 }
1499
1500 /* Since we don't have a jumbo ring
1501 * copy small packets if mtu > 1500
1502 */
1503 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1504 (len <= RX_COPY_THRESH)) {
1505 struct sk_buff *new_skb;
1506
1507 new_skb = netdev_alloc_skb(bp->dev,
1508 len + pad);
1509 if (new_skb == NULL) {
1510 DP(NETIF_MSG_RX_ERR,
34f80b04 1511 "ERROR packet dropped "
a2fbb9ea 1512 "because of alloc failure\n");
66e855f3 1513 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1514 goto reuse_rx;
1515 }
1516
1517 /* aligned copy */
1518 skb_copy_from_linear_data_offset(skb, pad,
1519 new_skb->data + pad, len);
1520 skb_reserve(new_skb, pad);
1521 skb_put(new_skb, len);
1522
1523 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1524
1525 skb = new_skb;
1526
1527 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1528 pci_unmap_single(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 bp->rx_buf_use_size,
1531 PCI_DMA_FROMDEVICE);
1532 skb_reserve(skb, pad);
1533 skb_put(skb, len);
1534
1535 } else {
1536 DP(NETIF_MSG_RX_ERR,
34f80b04 1537 "ERROR packet dropped because "
a2fbb9ea 1538 "of alloc failure\n");
66e855f3 1539 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1540reuse_rx:
1541 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1542 goto next_rx;
1543 }
1544
1545 skb->protocol = eth_type_trans(skb, bp->dev);
1546
1547 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1548 if (bp->rx_csum) {
1adcd8be
EG
1549 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1551 else
1552 bp->eth_stats.hw_csum_err++;
1553 }
a2fbb9ea
ET
1554 }
1555
1556#ifdef BCM_VLAN
34f80b04
EG
1557 if ((bp->vlgrp != NULL) &&
1558 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1559 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1560 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1561 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1562 else
1563#endif
34f80b04 1564 netif_receive_skb(skb);
a2fbb9ea
ET
1565
1566 bp->dev->last_rx = jiffies;
1567
1568next_rx:
1569 rx_buf->skb = NULL;
1570
1571 bd_cons = NEXT_RX_IDX(bd_cons);
1572 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1573 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1574 rx_pkt++;
a2fbb9ea
ET
1575next_cqe:
1576 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1577 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1578
34f80b04 1579 if (rx_pkt == budget)
a2fbb9ea
ET
1580 break;
1581 } /* while */
1582
1583 fp->rx_bd_cons = bd_cons;
34f80b04 1584 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1585 fp->rx_comp_cons = sw_comp_cons;
1586 fp->rx_comp_prod = sw_comp_prod;
1587
7a9b2557
VZ
1588 /* Update producers */
1589 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1590 fp->rx_sge_prod);
a2fbb9ea
ET
1591 mmiowb(); /* keep prod updates ordered */
1592
1593 fp->rx_pkt += rx_pkt;
1594 fp->rx_calls++;
1595
1596 return rx_pkt;
1597}
1598
1599static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1600{
1601 struct bnx2x_fastpath *fp = fp_cookie;
1602 struct bnx2x *bp = fp->bp;
1603 struct net_device *dev = bp->dev;
34f80b04 1604 int index = FP_IDX(fp);
a2fbb9ea 1605
da5a662a
VZ
1606 /* Return here if interrupt is disabled */
1607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1609 return IRQ_HANDLED;
1610 }
1611
34f80b04
EG
1612 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1613 index, FP_SB_ID(fp));
1614 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1615
1616#ifdef BNX2X_STOP_ON_ERROR
1617 if (unlikely(bp->panic))
1618 return IRQ_HANDLED;
1619#endif
1620
1621 prefetch(fp->rx_cons_sb);
1622 prefetch(fp->tx_cons_sb);
1623 prefetch(&fp->status_blk->c_status_block.status_block_index);
1624 prefetch(&fp->status_blk->u_status_block.status_block_index);
1625
1626 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1627
a2fbb9ea
ET
1628 return IRQ_HANDLED;
1629}
1630
1631static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1632{
1633 struct net_device *dev = dev_instance;
1634 struct bnx2x *bp = netdev_priv(dev);
1635 u16 status = bnx2x_ack_int(bp);
34f80b04 1636 u16 mask;
a2fbb9ea 1637
34f80b04 1638 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1639 if (unlikely(status == 0)) {
1640 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1641 return IRQ_NONE;
1642 }
34f80b04 1643 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1644
1645#ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1647 return IRQ_HANDLED;
1648#endif
1649
34f80b04 1650 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1651 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1652 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1653 return IRQ_HANDLED;
1654 }
1655
34f80b04
EG
1656 mask = 0x2 << bp->fp[0].sb_id;
1657 if (status & mask) {
a2fbb9ea
ET
1658 struct bnx2x_fastpath *fp = &bp->fp[0];
1659
1660 prefetch(fp->rx_cons_sb);
1661 prefetch(fp->tx_cons_sb);
1662 prefetch(&fp->status_blk->c_status_block.status_block_index);
1663 prefetch(&fp->status_blk->u_status_block.status_block_index);
1664
1665 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1666
34f80b04 1667 status &= ~mask;
a2fbb9ea
ET
1668 }
1669
a2fbb9ea 1670
34f80b04 1671 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1672 schedule_work(&bp->sp_task);
1673
1674 status &= ~0x1;
1675 if (!status)
1676 return IRQ_HANDLED;
1677 }
1678
34f80b04
EG
1679 if (status)
1680 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1681 status);
a2fbb9ea 1682
c18487ee 1683 return IRQ_HANDLED;
a2fbb9ea
ET
1684}
1685
c18487ee 1686/* end of fast path */
a2fbb9ea 1687
bb2a0f7a 1688static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1689
c18487ee
YR
1690/* Link */
1691
1692/*
1693 * General service functions
1694 */
a2fbb9ea 1695
4a37fb66 1696static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1697{
1698 u32 lock_status;
1699 u32 resource_bit = (1 << resource);
4a37fb66
YG
1700 int func = BP_FUNC(bp);
1701 u32 hw_lock_control_reg;
c18487ee 1702 int cnt;
a2fbb9ea 1703
c18487ee
YR
1704 /* Validating that the resource is within range */
1705 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1706 DP(NETIF_MSG_HW,
1707 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1708 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1709 return -EINVAL;
1710 }
a2fbb9ea 1711
4a37fb66
YG
1712 if (func <= 5) {
1713 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1714 } else {
1715 hw_lock_control_reg =
1716 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1717 }
1718
c18487ee 1719 /* Validating that the resource is not already taken */
4a37fb66 1720 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1721 if (lock_status & resource_bit) {
1722 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1723 lock_status, resource_bit);
1724 return -EEXIST;
1725 }
a2fbb9ea 1726
c18487ee
YR
1727 /* Try for 1 second every 5ms */
1728 for (cnt = 0; cnt < 200; cnt++) {
1729 /* Try to acquire the lock */
4a37fb66
YG
1730 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1731 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1732 if (lock_status & resource_bit)
1733 return 0;
a2fbb9ea 1734
c18487ee 1735 msleep(5);
a2fbb9ea 1736 }
c18487ee
YR
1737 DP(NETIF_MSG_HW, "Timeout\n");
1738 return -EAGAIN;
1739}
a2fbb9ea 1740
4a37fb66 1741static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1742{
1743 u32 lock_status;
1744 u32 resource_bit = (1 << resource);
4a37fb66
YG
1745 int func = BP_FUNC(bp);
1746 u32 hw_lock_control_reg;
a2fbb9ea 1747
c18487ee
YR
1748 /* Validating that the resource is within range */
1749 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1750 DP(NETIF_MSG_HW,
1751 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1752 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1753 return -EINVAL;
1754 }
1755
4a37fb66
YG
1756 if (func <= 5) {
1757 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1758 } else {
1759 hw_lock_control_reg =
1760 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1761 }
1762
c18487ee 1763 /* Validating that the resource is currently taken */
4a37fb66 1764 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1765 if (!(lock_status & resource_bit)) {
1766 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1767 lock_status, resource_bit);
1768 return -EFAULT;
a2fbb9ea
ET
1769 }
1770
4a37fb66 1771 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1772 return 0;
1773}
1774
1775/* HW Lock for shared dual port PHYs */
4a37fb66 1776static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1777{
1778 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1779
34f80b04 1780 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1781
c18487ee
YR
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1785}
a2fbb9ea 1786
4a37fb66 1787static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1788{
1789 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1790
c18487ee
YR
1791 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1792 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1793 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1794
34f80b04 1795 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1796}
a2fbb9ea 1797
c18487ee
YR
1798int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1799{
1800 /* The GPIO should be swapped if swap register is set and active */
1801 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1802 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1803 int gpio_shift = gpio_num +
1804 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1805 u32 gpio_mask = (1 << gpio_shift);
1806 u32 gpio_reg;
a2fbb9ea 1807
c18487ee
YR
1808 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1809 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1810 return -EINVAL;
1811 }
a2fbb9ea 1812
4a37fb66 1813 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1814 /* read GPIO and mask except the float bits */
1815 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1816
c18487ee
YR
1817 switch (mode) {
1818 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1819 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1820 gpio_num, gpio_shift);
1821 /* clear FLOAT and set CLR */
1822 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1823 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1824 break;
a2fbb9ea 1825
c18487ee
YR
1826 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1827 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1828 gpio_num, gpio_shift);
1829 /* clear FLOAT and set SET */
1830 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1832 break;
a2fbb9ea 1833
c18487ee
YR
1834 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1835 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1836 gpio_num, gpio_shift);
1837 /* set FLOAT */
1838 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1839 break;
a2fbb9ea 1840
c18487ee
YR
1841 default:
1842 break;
a2fbb9ea
ET
1843 }
1844
c18487ee 1845 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1846 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1847
c18487ee 1848 return 0;
a2fbb9ea
ET
1849}
1850
c18487ee 1851static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1852{
c18487ee
YR
1853 u32 spio_mask = (1 << spio_num);
1854 u32 spio_reg;
a2fbb9ea 1855
c18487ee
YR
1856 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1857 (spio_num > MISC_REGISTERS_SPIO_7)) {
1858 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1859 return -EINVAL;
a2fbb9ea
ET
1860 }
1861
4a37fb66 1862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1863 /* read SPIO and mask except the float bits */
1864 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1865
c18487ee
YR
1866 switch (mode) {
1867 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1869 /* clear FLOAT and set CLR */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1872 break;
a2fbb9ea 1873
c18487ee
YR
1874 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1876 /* clear FLOAT and set SET */
1877 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1879 break;
a2fbb9ea 1880
c18487ee
YR
1881 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1882 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1883 /* set FLOAT */
1884 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1885 break;
a2fbb9ea 1886
c18487ee
YR
1887 default:
1888 break;
a2fbb9ea
ET
1889 }
1890
c18487ee 1891 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1892 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1893
a2fbb9ea
ET
1894 return 0;
1895}
1896
c18487ee 1897static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1898{
c18487ee
YR
1899 switch (bp->link_vars.ieee_fc) {
1900 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1901 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1902 ADVERTISED_Pause);
1903 break;
1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1905 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1909 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1910 break;
1911 default:
34f80b04 1912 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1913 ADVERTISED_Pause);
1914 break;
1915 }
1916}
f1410647 1917
c18487ee
YR
1918static void bnx2x_link_report(struct bnx2x *bp)
1919{
1920 if (bp->link_vars.link_up) {
1921 if (bp->state == BNX2X_STATE_OPEN)
1922 netif_carrier_on(bp->dev);
1923 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1924
c18487ee 1925 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1926
c18487ee
YR
1927 if (bp->link_vars.duplex == DUPLEX_FULL)
1928 printk("full duplex");
1929 else
1930 printk("half duplex");
f1410647 1931
c18487ee
YR
1932 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1933 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1934 printk(", receive ");
1935 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1936 printk("& transmit ");
1937 } else {
1938 printk(", transmit ");
1939 }
1940 printk("flow control ON");
1941 }
1942 printk("\n");
f1410647 1943
c18487ee
YR
1944 } else { /* link_down */
1945 netif_carrier_off(bp->dev);
1946 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1947 }
c18487ee
YR
1948}
1949
1950static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1951{
19680c48
EG
1952 if (!BP_NOMCP(bp)) {
1953 u8 rc;
a2fbb9ea 1954
19680c48
EG
1955 /* Initialize link parameters structure variables */
1956 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1957
4a37fb66 1958 bnx2x_acquire_phy_lock(bp);
19680c48 1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1960 bnx2x_release_phy_lock(bp);
a2fbb9ea 1961
19680c48
EG
1962 if (bp->link_vars.link_up)
1963 bnx2x_link_report(bp);
a2fbb9ea 1964
19680c48 1965 bnx2x_calc_fc_adv(bp);
34f80b04 1966
19680c48
EG
1967 return rc;
1968 }
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970 return -EINVAL;
a2fbb9ea
ET
1971}
1972
c18487ee 1973static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1974{
19680c48 1975 if (!BP_NOMCP(bp)) {
4a37fb66 1976 bnx2x_acquire_phy_lock(bp);
19680c48 1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1978 bnx2x_release_phy_lock(bp);
a2fbb9ea 1979
19680c48
EG
1980 bnx2x_calc_fc_adv(bp);
1981 } else
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1983}
a2fbb9ea 1984
c18487ee
YR
1985static void bnx2x__link_reset(struct bnx2x *bp)
1986{
19680c48 1987 if (!BP_NOMCP(bp)) {
4a37fb66 1988 bnx2x_acquire_phy_lock(bp);
19680c48 1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 1990 bnx2x_release_phy_lock(bp);
19680c48
EG
1991 } else
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1993}
a2fbb9ea 1994
c18487ee
YR
1995static u8 bnx2x_link_test(struct bnx2x *bp)
1996{
1997 u8 rc;
a2fbb9ea 1998
4a37fb66 1999 bnx2x_acquire_phy_lock(bp);
c18487ee 2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2001 bnx2x_release_phy_lock(bp);
a2fbb9ea 2002
c18487ee
YR
2003 return rc;
2004}
a2fbb9ea 2005
34f80b04
EG
2006/* Calculates the sum of vn_min_rates.
2007 It's needed for further normalizing of the min_rates.
2008
2009 Returns:
2010 sum of vn_min_rates
2011 or
2012 0 - if all the min_rates are 0.
2013 In the later case fainess algorithm should be deactivated.
2014 If not all min_rates are zero then those that are zeroes will
2015 be set to 1.
2016 */
2017static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2018{
2019 int i, port = BP_PORT(bp);
2020 u32 wsum = 0;
2021 int all_zero = 1;
2022
2023 for (i = 0; i < E1HVN_MAX; i++) {
2024 u32 vn_cfg =
2025 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029 /* If min rate is zero - set it to 1 */
2030 if (!vn_min_rate)
2031 vn_min_rate = DEF_MIN_RATE;
2032 else
2033 all_zero = 0;
2034
2035 wsum += vn_min_rate;
2036 }
2037 }
2038
2039 /* ... only if all min rates are zeros - disable FAIRNESS */
2040 if (all_zero)
2041 return 0;
2042
2043 return wsum;
2044}
2045
2046static void bnx2x_init_port_minmax(struct bnx2x *bp,
2047 int en_fness,
2048 u16 port_rate,
2049 struct cmng_struct_per_port *m_cmng_port)
2050{
2051 u32 r_param = port_rate / 8;
2052 int port = BP_PORT(bp);
2053 int i;
2054
2055 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2056
2057 /* Enable minmax only if we are in e1hmf mode */
2058 if (IS_E1HMF(bp)) {
2059 u32 fair_periodic_timeout_usec;
2060 u32 t_fair;
2061
2062 /* Enable rate shaping and fairness */
2063 m_cmng_port->flags.cmng_vn_enable = 1;
2064 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065 m_cmng_port->flags.rate_shaping_enable = 1;
2066
2067 if (!en_fness)
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2070
2071 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072 m_cmng_port->rs_vars.rs_periodic_timeout =
2073 RS_PERIODIC_TIMEOUT_USEC / 4;
2074
2075 /* this is the threshold below which no timer arming will occur
2076 1.25 coefficient is for the threshold to be a little bigger
2077 than the real time, to compensate for timer in-accuracy */
2078 m_cmng_port->rs_vars.rs_threshold =
2079 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2080
2081 /* resolution of fairness timer */
2082 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084 t_fair = T_FAIR_COEF / port_rate;
2085
2086 /* this is the threshold below which we won't arm
2087 the timer anymore */
2088 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2089
2090 /* we multiply by 1e3/8 to get bytes/msec.
2091 We don't want the credits to pass a credit
2092 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093 m_cmng_port->fair_vars.upper_bound =
2094 r_param * t_fair * FAIR_MEM;
2095 /* since each tick is 4 usec */
2096 m_cmng_port->fair_vars.fairness_timeout =
2097 fair_periodic_timeout_usec / 4;
2098
2099 } else {
2100 /* Disable rate shaping and fairness */
2101 m_cmng_port->flags.cmng_vn_enable = 0;
2102 m_cmng_port->flags.fairness_enable = 0;
2103 m_cmng_port->flags.rate_shaping_enable = 0;
2104
2105 DP(NETIF_MSG_IFUP,
2106 "Single function mode minmax will be disabled\n");
2107 }
2108
2109 /* Store it to internal memory */
2110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113 ((u32 *)(m_cmng_port))[i]);
2114}
2115
2116static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117 u32 wsum, u16 port_rate,
2118 struct cmng_struct_per_port *m_cmng_port)
2119{
2120 struct rate_shaping_vars_per_vn m_rs_vn;
2121 struct fairness_vars_per_vn m_fair_vn;
2122 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123 u16 vn_min_rate, vn_max_rate;
2124 int i;
2125
2126 /* If function is hidden - set min and max to zeroes */
2127 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2128 vn_min_rate = 0;
2129 vn_max_rate = 0;
2130
2131 } else {
2132 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135 if current min rate is zero - set it to 1.
2136 This is a requirment of the algorithm. */
2137 if ((vn_min_rate == 0) && wsum)
2138 vn_min_rate = DEF_MIN_RATE;
2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2141 }
2142
2143 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2144 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2145
2146 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2148
2149 /* global vn counter - maximal Mbps for this vn */
2150 m_rs_vn.vn_counter.rate = vn_max_rate;
2151
2152 /* quota - number of bytes transmitted in this period */
2153 m_rs_vn.vn_counter.quota =
2154 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2155
2156#ifdef BNX2X_PER_PROT_QOS
2157 /* per protocol counter */
2158 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159 /* maximal Mbps for this protocol */
2160 m_rs_vn.protocol_counters[protocol].rate =
2161 protocol_max_rate[protocol];
2162 /* the quota in each timer period -
2163 number of bytes transmitted in this period */
2164 m_rs_vn.protocol_counters[protocol].quota =
2165 (u32)(rs_periodic_timeout_usec *
2166 ((double)m_rs_vn.
2167 protocol_counters[protocol].rate/8));
2168 }
2169#endif
2170
2171 if (wsum) {
2172 /* credit for each period of the fairness algorithm:
2173 number of bytes in T_FAIR (the vn share the port rate).
2174 wsum should not be larger than 10000, thus
2175 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176 m_fair_vn.vn_credit_delta =
2177 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180 m_fair_vn.vn_credit_delta);
2181 }
2182
2183#ifdef BNX2X_PER_PROT_QOS
2184 do {
2185 u32 protocolWeightSum = 0;
2186
2187 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188 protocolWeightSum +=
2189 drvInit.protocol_min_rate[protocol];
2190 /* per protocol counter -
2191 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192 if (protocolWeightSum > 0) {
2193 for (protocol = 0;
2194 protocol < NUM_OF_PROTOCOLS; protocol++)
2195 /* credit for each period of the
2196 fairness algorithm - number of bytes in
2197 T_FAIR (the protocol share the vn rate) */
2198 m_fair_vn.protocol_credit_delta[protocol] =
2199 (u32)((vn_min_rate / 8) * t_fair *
2200 protocol_min_rate / protocolWeightSum);
2201 }
2202 } while (0);
2203#endif
2204
2205 /* Store it to internal memory */
2206 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209 ((u32 *)(&m_rs_vn))[i]);
2210
2211 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214 ((u32 *)(&m_fair_vn))[i]);
2215}
2216
c18487ee
YR
2217/* This function is called upon link interrupt */
2218static void bnx2x_link_attn(struct bnx2x *bp)
2219{
34f80b04
EG
2220 int vn;
2221
bb2a0f7a
YG
2222 /* Make sure that we are synced with the current statistics */
2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2224
4a37fb66 2225 bnx2x_acquire_phy_lock(bp);
c18487ee 2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2227 bnx2x_release_phy_lock(bp);
a2fbb9ea 2228
bb2a0f7a
YG
2229 if (bp->link_vars.link_up) {
2230
2231 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232 struct host_port_stats *pstats;
2233
2234 pstats = bnx2x_sp(bp, port_stats);
2235 /* reset old bmac stats */
2236 memset(&(pstats->mac_stx[0]), 0,
2237 sizeof(struct mac_stx));
2238 }
2239 if ((bp->state == BNX2X_STATE_OPEN) ||
2240 (bp->state == BNX2X_STATE_DISABLED))
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242 }
2243
c18487ee
YR
2244 /* indicate link status */
2245 bnx2x_link_report(bp);
34f80b04
EG
2246
2247 if (IS_E1HMF(bp)) {
2248 int func;
2249
2250 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251 if (vn == BP_E1HVN(bp))
2252 continue;
2253
2254 func = ((vn << 1) | BP_PORT(bp));
2255
2256 /* Set the attention towards other drivers
2257 on the same port */
2258 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2260 }
2261 }
2262
2263 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264 struct cmng_struct_per_port m_cmng_port;
2265 u32 wsum;
2266 int port = BP_PORT(bp);
2267
2268 /* Init RATE SHAPING and FAIRNESS contexts */
2269 wsum = bnx2x_calc_vn_wsum(bp);
2270 bnx2x_init_port_minmax(bp, (int)wsum,
2271 bp->link_vars.line_speed,
2272 &m_cmng_port);
2273 if (IS_E1HMF(bp))
2274 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276 wsum, bp->link_vars.line_speed,
2277 &m_cmng_port);
2278 }
c18487ee 2279}
a2fbb9ea 2280
c18487ee
YR
2281static void bnx2x__link_status_update(struct bnx2x *bp)
2282{
2283 if (bp->state != BNX2X_STATE_OPEN)
2284 return;
a2fbb9ea 2285
c18487ee 2286 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2287
bb2a0f7a
YG
2288 if (bp->link_vars.link_up)
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290 else
2291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2292
c18487ee
YR
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
a2fbb9ea 2295}
a2fbb9ea 2296
34f80b04
EG
2297static void bnx2x_pmf_update(struct bnx2x *bp)
2298{
2299 int port = BP_PORT(bp);
2300 u32 val;
2301
2302 bp->port.pmf = 1;
2303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2304
2305 /* enable nig attention */
2306 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2309
2310 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2311}
2312
c18487ee 2313/* end of Link */
a2fbb9ea
ET
2314
2315/* slow path */
2316
2317/*
2318 * General service functions
2319 */
2320
2321/* the slow path queue is odd since completions arrive on the fastpath ring */
2322static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323 u32 data_hi, u32 data_lo, int common)
2324{
34f80b04 2325 int func = BP_FUNC(bp);
a2fbb9ea 2326
34f80b04
EG
2327 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2329 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2332
2333#ifdef BNX2X_STOP_ON_ERROR
2334 if (unlikely(bp->panic))
2335 return -EIO;
2336#endif
2337
34f80b04 2338 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2339
2340 if (!bp->spq_left) {
2341 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2342 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2343 bnx2x_panic();
2344 return -EBUSY;
2345 }
f1410647 2346
a2fbb9ea
ET
2347 /* CID needs port number to be encoded int it */
2348 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2350 HW_CID(bp, cid)));
2351 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2352 if (common)
2353 bp->spq_prod_bd->hdr.type |=
2354 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2355
2356 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2358
2359 bp->spq_left--;
2360
2361 if (bp->spq_prod_bd == bp->spq_last_bd) {
2362 bp->spq_prod_bd = bp->spq;
2363 bp->spq_prod_idx = 0;
2364 DP(NETIF_MSG_TIMER, "end of spq\n");
2365
2366 } else {
2367 bp->spq_prod_bd++;
2368 bp->spq_prod_idx++;
2369 }
2370
34f80b04 2371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2372 bp->spq_prod_idx);
2373
34f80b04 2374 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2375 return 0;
2376}
2377
2378/* acquire split MCP access lock register */
4a37fb66 2379static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2380{
a2fbb9ea 2381 u32 i, j, val;
34f80b04 2382 int rc = 0;
a2fbb9ea
ET
2383
2384 might_sleep();
2385 i = 100;
2386 for (j = 0; j < i*10; j++) {
2387 val = (1UL << 31);
2388 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390 if (val & (1L << 31))
2391 break;
2392
2393 msleep(5);
2394 }
a2fbb9ea 2395 if (!(val & (1L << 31))) {
19680c48 2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2397 rc = -EBUSY;
2398 }
2399
2400 return rc;
2401}
2402
4a37fb66
YG
2403/* release split MCP access lock register */
2404static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2405{
2406 u32 val = 0;
2407
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409}
2410
2411static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2412{
2413 struct host_def_status_block *def_sb = bp->def_status_blk;
2414 u16 rc = 0;
2415
2416 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2419 rc |= 1;
2420 }
2421 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2423 rc |= 2;
2424 }
2425 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2427 rc |= 4;
2428 }
2429 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2431 rc |= 8;
2432 }
2433 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435 rc |= 16;
2436 }
2437 return rc;
2438}
2439
2440/*
2441 * slow path service functions
2442 */
2443
2444static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2445{
34f80b04
EG
2446 int port = BP_PORT(bp);
2447 int func = BP_FUNC(bp);
2448 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
a2fbb9ea
ET
2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2453 u32 aeu_mask;
a2fbb9ea 2454
a2fbb9ea
ET
2455 if (bp->attn_state & asserted)
2456 BNX2X_ERR("IGU ERROR\n");
2457
3fcaf2e5
EG
2458 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459 aeu_mask = REG_RD(bp, aeu_addr);
2460
a2fbb9ea 2461 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2462 aeu_mask, asserted);
2463 aeu_mask &= ~(asserted & 0xff);
2464 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2465
3fcaf2e5
EG
2466 REG_WR(bp, aeu_addr, aeu_mask);
2467 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2468
3fcaf2e5 2469 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2470 bp->attn_state |= asserted;
3fcaf2e5 2471 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2472
2473 if (asserted & ATTN_HARD_WIRED_MASK) {
2474 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2475
877e9aa4
ET
2476 /* save nig interrupt mask */
2477 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2479
c18487ee 2480 bnx2x_link_attn(bp);
a2fbb9ea
ET
2481
2482 /* handle unicore attn? */
2483 }
2484 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2486
2487 if (asserted & GPIO_2_FUNC)
2488 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2489
2490 if (asserted & GPIO_3_FUNC)
2491 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2492
2493 if (asserted & GPIO_4_FUNC)
2494 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2495
2496 if (port == 0) {
2497 if (asserted & ATTN_GENERAL_ATTN_1) {
2498 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2500 }
2501 if (asserted & ATTN_GENERAL_ATTN_2) {
2502 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2504 }
2505 if (asserted & ATTN_GENERAL_ATTN_3) {
2506 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2508 }
2509 } else {
2510 if (asserted & ATTN_GENERAL_ATTN_4) {
2511 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2513 }
2514 if (asserted & ATTN_GENERAL_ATTN_5) {
2515 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2517 }
2518 if (asserted & ATTN_GENERAL_ATTN_6) {
2519 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2521 }
2522 }
2523
2524 } /* if hardwired */
2525
2526 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2527 asserted, BAR_IGU_INTMEM + igu_addr);
2528 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2529
2530 /* now set back the mask */
2531 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2532 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2533}
2534
877e9aa4 2535static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2536{
34f80b04 2537 int port = BP_PORT(bp);
877e9aa4
ET
2538 int reg_offset;
2539 u32 val;
2540
34f80b04
EG
2541 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2543
34f80b04 2544 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2545
2546 val = REG_RD(bp, reg_offset);
2547 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548 REG_WR(bp, reg_offset, val);
2549
2550 BNX2X_ERR("SPIO5 hw attention\n");
2551
34f80b04 2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554 /* Fan failure attention */
2555
2556 /* The PHY reset is controled by GPIO 1 */
2557 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2558 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2559 /* Low power mode is controled by GPIO 2 */
2560 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2561 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2562 /* mark the failure */
c18487ee 2563 bp->link_params.ext_phy_config &=
877e9aa4 2564 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2565 bp->link_params.ext_phy_config |=
877e9aa4
ET
2566 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2567 SHMEM_WR(bp,
2568 dev_info.port_hw_config[port].
2569 external_phy_config,
c18487ee 2570 bp->link_params.ext_phy_config);
877e9aa4
ET
2571 /* log the failure */
2572 printk(KERN_ERR PFX "Fan Failure on Network"
2573 " Controller %s has caused the driver to"
2574 " shutdown the card to prevent permanent"
2575 " damage. Please contact Dell Support for"
2576 " assistance\n", bp->dev->name);
2577 break;
2578
2579 default:
2580 break;
2581 }
2582 }
34f80b04
EG
2583
2584 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2585
2586 val = REG_RD(bp, reg_offset);
2587 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2588 REG_WR(bp, reg_offset, val);
2589
2590 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2591 (attn & HW_INTERRUT_ASSERT_SET_0));
2592 bnx2x_panic();
2593 }
877e9aa4
ET
2594}
2595
2596static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2597{
2598 u32 val;
2599
2600 if (attn & BNX2X_DOORQ_ASSERT) {
2601
2602 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2603 BNX2X_ERR("DB hw attention 0x%x\n", val);
2604 /* DORQ discard attention */
2605 if (val & 0x2)
2606 BNX2X_ERR("FATAL error from DORQ\n");
2607 }
34f80b04
EG
2608
2609 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2610
2611 int port = BP_PORT(bp);
2612 int reg_offset;
2613
2614 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2615 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2616
2617 val = REG_RD(bp, reg_offset);
2618 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2619 REG_WR(bp, reg_offset, val);
2620
2621 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2622 (attn & HW_INTERRUT_ASSERT_SET_1));
2623 bnx2x_panic();
2624 }
877e9aa4
ET
2625}
2626
2627static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2628{
2629 u32 val;
2630
2631 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2632
2633 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2634 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2635 /* CFC error attention */
2636 if (val & 0x2)
2637 BNX2X_ERR("FATAL error from CFC\n");
2638 }
2639
2640 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2641
2642 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2643 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2644 /* RQ_USDMDP_FIFO_OVERFLOW */
2645 if (val & 0x18000)
2646 BNX2X_ERR("FATAL error from PXP\n");
2647 }
34f80b04
EG
2648
2649 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2650
2651 int port = BP_PORT(bp);
2652 int reg_offset;
2653
2654 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2655 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2656
2657 val = REG_RD(bp, reg_offset);
2658 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2659 REG_WR(bp, reg_offset, val);
2660
2661 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2662 (attn & HW_INTERRUT_ASSERT_SET_2));
2663 bnx2x_panic();
2664 }
877e9aa4
ET
2665}
2666
2667static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2668{
34f80b04
EG
2669 u32 val;
2670
877e9aa4
ET
2671 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2672
34f80b04
EG
2673 if (attn & BNX2X_PMF_LINK_ASSERT) {
2674 int func = BP_FUNC(bp);
2675
2676 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2677 bnx2x__link_status_update(bp);
2678 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2679 DRV_STATUS_PMF)
2680 bnx2x_pmf_update(bp);
2681
2682 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2683
2684 BNX2X_ERR("MC assert!\n");
2685 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2686 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2689 bnx2x_panic();
2690
2691 } else if (attn & BNX2X_MCP_ASSERT) {
2692
2693 BNX2X_ERR("MCP assert!\n");
2694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2695 bnx2x_fw_dump(bp);
877e9aa4
ET
2696
2697 } else
2698 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2699 }
2700
2701 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2702 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2703 if (attn & BNX2X_GRC_TIMEOUT) {
2704 val = CHIP_IS_E1H(bp) ?
2705 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2706 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2707 }
2708 if (attn & BNX2X_GRC_RSV) {
2709 val = CHIP_IS_E1H(bp) ?
2710 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2711 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2712 }
877e9aa4 2713 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2714 }
2715}
2716
2717static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2718{
a2fbb9ea
ET
2719 struct attn_route attn;
2720 struct attn_route group_mask;
34f80b04 2721 int port = BP_PORT(bp);
877e9aa4 2722 int index;
a2fbb9ea
ET
2723 u32 reg_addr;
2724 u32 val;
3fcaf2e5 2725 u32 aeu_mask;
a2fbb9ea
ET
2726
2727 /* need to take HW lock because MCP or other port might also
2728 try to handle this event */
4a37fb66 2729 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2730
2731 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2732 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2733 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2734 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2735 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2736 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2737
2738 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2739 if (deasserted & (1 << index)) {
2740 group_mask = bp->attn_group[index];
2741
34f80b04
EG
2742 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2743 index, group_mask.sig[0], group_mask.sig[1],
2744 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2745
877e9aa4
ET
2746 bnx2x_attn_int_deasserted3(bp,
2747 attn.sig[3] & group_mask.sig[3]);
2748 bnx2x_attn_int_deasserted1(bp,
2749 attn.sig[1] & group_mask.sig[1]);
2750 bnx2x_attn_int_deasserted2(bp,
2751 attn.sig[2] & group_mask.sig[2]);
2752 bnx2x_attn_int_deasserted0(bp,
2753 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2754
a2fbb9ea
ET
2755 if ((attn.sig[0] & group_mask.sig[0] &
2756 HW_PRTY_ASSERT_SET_0) ||
2757 (attn.sig[1] & group_mask.sig[1] &
2758 HW_PRTY_ASSERT_SET_1) ||
2759 (attn.sig[2] & group_mask.sig[2] &
2760 HW_PRTY_ASSERT_SET_2))
877e9aa4 2761 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2762 }
2763 }
2764
4a37fb66 2765 bnx2x_release_alr(bp);
a2fbb9ea 2766
34f80b04 2767 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
2768
2769 val = ~deasserted;
3fcaf2e5
EG
2770 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2771 val, reg_addr);
a2fbb9ea
ET
2772 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2773
a2fbb9ea 2774 if (~bp->attn_state & deasserted)
3fcaf2e5 2775 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2776
2777 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2778 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2779
3fcaf2e5
EG
2780 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2781 aeu_mask = REG_RD(bp, reg_addr);
2782
2783 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2784 aeu_mask, deasserted);
2785 aeu_mask |= (deasserted & 0xff);
2786 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2787
3fcaf2e5
EG
2788 REG_WR(bp, reg_addr, aeu_mask);
2789 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2790
2791 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2792 bp->attn_state &= ~deasserted;
2793 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2794}
2795
2796static void bnx2x_attn_int(struct bnx2x *bp)
2797{
2798 /* read local copy of bits */
2799 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2800 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2801 u32 attn_state = bp->attn_state;
2802
2803 /* look for changed bits */
2804 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2805 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2806
2807 DP(NETIF_MSG_HW,
2808 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2809 attn_bits, attn_ack, asserted, deasserted);
2810
2811 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2812 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2813
2814 /* handle bits that were raised */
2815 if (asserted)
2816 bnx2x_attn_int_asserted(bp, asserted);
2817
2818 if (deasserted)
2819 bnx2x_attn_int_deasserted(bp, deasserted);
2820}
2821
2822static void bnx2x_sp_task(struct work_struct *work)
2823{
2824 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2825 u16 status;
2826
34f80b04 2827
a2fbb9ea
ET
2828 /* Return here if interrupt is disabled */
2829 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2830 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2831 return;
2832 }
2833
2834 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2835/* if (status == 0) */
2836/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2837
34f80b04 2838 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2839
877e9aa4
ET
2840 /* HW attentions */
2841 if (status & 0x1)
a2fbb9ea 2842 bnx2x_attn_int(bp);
a2fbb9ea 2843
bb2a0f7a
YG
2844 /* CStorm events: query_stats, port delete ramrod */
2845 if (status & 0x2)
2846 bp->stats_pending = 0;
2847
a2fbb9ea
ET
2848 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2849 IGU_INT_NOP, 1);
2850 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2851 IGU_INT_NOP, 1);
2852 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2853 IGU_INT_NOP, 1);
2854 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2855 IGU_INT_NOP, 1);
2856 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2857 IGU_INT_ENABLE, 1);
877e9aa4 2858
a2fbb9ea
ET
2859}
2860
2861static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2862{
2863 struct net_device *dev = dev_instance;
2864 struct bnx2x *bp = netdev_priv(dev);
2865
2866 /* Return here if interrupt is disabled */
2867 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2868 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2869 return IRQ_HANDLED;
2870 }
2871
877e9aa4 2872 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2873
2874#ifdef BNX2X_STOP_ON_ERROR
2875 if (unlikely(bp->panic))
2876 return IRQ_HANDLED;
2877#endif
2878
2879 schedule_work(&bp->sp_task);
2880
2881 return IRQ_HANDLED;
2882}
2883
2884/* end of slow path */
2885
2886/* Statistics */
2887
2888/****************************************************************************
2889* Macros
2890****************************************************************************/
2891
a2fbb9ea
ET
2892/* sum[hi:lo] += add[hi:lo] */
2893#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2894 do { \
2895 s_lo += a_lo; \
2896 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2897 } while (0)
2898
2899/* difference = minuend - subtrahend */
2900#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2901 do { \
bb2a0f7a
YG
2902 if (m_lo < s_lo) { \
2903 /* underflow */ \
a2fbb9ea 2904 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2905 if (d_hi > 0) { \
2906 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2907 d_hi--; \
2908 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2909 } else { \
2910 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2911 d_hi = 0; \
2912 d_lo = 0; \
2913 } \
bb2a0f7a
YG
2914 } else { \
2915 /* m_lo >= s_lo */ \
a2fbb9ea 2916 if (m_hi < s_hi) { \
bb2a0f7a
YG
2917 d_hi = 0; \
2918 d_lo = 0; \
2919 } else { \
2920 /* m_hi >= s_hi */ \
2921 d_hi = m_hi - s_hi; \
2922 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2923 } \
2924 } \
2925 } while (0)
2926
bb2a0f7a 2927#define UPDATE_STAT64(s, t) \
a2fbb9ea 2928 do { \
bb2a0f7a
YG
2929 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2930 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2931 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2932 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2933 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2934 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2935 } while (0)
2936
bb2a0f7a 2937#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2938 do { \
bb2a0f7a
YG
2939 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2940 diff.lo, new->s##_lo, old->s##_lo); \
2941 ADD_64(estats->t##_hi, diff.hi, \
2942 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2943 } while (0)
2944
2945/* sum[hi:lo] += add */
2946#define ADD_EXTEND_64(s_hi, s_lo, a) \
2947 do { \
2948 s_lo += a; \
2949 s_hi += (s_lo < a) ? 1 : 0; \
2950 } while (0)
2951
bb2a0f7a 2952#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2953 do { \
bb2a0f7a
YG
2954 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2955 pstats->mac_stx[1].s##_lo, \
2956 new->s); \
a2fbb9ea
ET
2957 } while (0)
2958
bb2a0f7a 2959#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2960 do { \
2961 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2962 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2963 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964 } while (0)
2965
2966#define UPDATE_EXTEND_XSTAT(s, t) \
2967 do { \
2968 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2969 old_xclient->s = le32_to_cpu(xclient->s); \
2970 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2971 } while (0)
2972
2973/*
2974 * General service functions
2975 */
2976
2977static inline long bnx2x_hilo(u32 *hiref)
2978{
2979 u32 lo = *(hiref + 1);
2980#if (BITS_PER_LONG == 64)
2981 u32 hi = *hiref;
2982
2983 return HILO_U64(hi, lo);
2984#else
2985 return lo;
2986#endif
2987}
2988
2989/*
2990 * Init service functions
2991 */
2992
bb2a0f7a
YG
2993static void bnx2x_storm_stats_post(struct bnx2x *bp)
2994{
2995 if (!bp->stats_pending) {
2996 struct eth_query_ramrod_data ramrod_data = {0};
2997 int rc;
2998
2999 ramrod_data.drv_counter = bp->stats_counter++;
3000 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3001 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3002
3003 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3004 ((u32 *)&ramrod_data)[1],
3005 ((u32 *)&ramrod_data)[0], 0);
3006 if (rc == 0) {
3007 /* stats ramrod has it's own slot on the spq */
3008 bp->spq_left++;
3009 bp->stats_pending = 1;
3010 }
3011 }
3012}
3013
3014static void bnx2x_stats_init(struct bnx2x *bp)
3015{
3016 int port = BP_PORT(bp);
3017
3018 bp->executer_idx = 0;
3019 bp->stats_counter = 0;
3020
3021 /* port stats */
3022 if (!BP_NOMCP(bp))
3023 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3024 else
3025 bp->port.port_stx = 0;
3026 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3027
3028 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3029 bp->port.old_nig_stats.brb_discard =
3030 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3031 bp->port.old_nig_stats.brb_truncate =
3032 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3033 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3034 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3037
3038 /* function stats */
3039 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3040 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3041 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3042 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3043
3044 bp->stats_state = STATS_STATE_DISABLED;
3045 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3046 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3047}
3048
3049static void bnx2x_hw_stats_post(struct bnx2x *bp)
3050{
3051 struct dmae_command *dmae = &bp->stats_dmae;
3052 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3053
3054 *stats_comp = DMAE_COMP_VAL;
3055
3056 /* loader */
3057 if (bp->executer_idx) {
3058 int loader_idx = PMF_DMAE_C(bp);
3059
3060 memset(dmae, 0, sizeof(struct dmae_command));
3061
3062 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3063 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3064 DMAE_CMD_DST_RESET |
3065#ifdef __BIG_ENDIAN
3066 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3067#else
3068 DMAE_CMD_ENDIANITY_DW_SWAP |
3069#endif
3070 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3071 DMAE_CMD_PORT_0) |
3072 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3073 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3074 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3075 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3076 sizeof(struct dmae_command) *
3077 (loader_idx + 1)) >> 2;
3078 dmae->dst_addr_hi = 0;
3079 dmae->len = sizeof(struct dmae_command) >> 2;
3080 if (CHIP_IS_E1(bp))
3081 dmae->len--;
3082 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3083 dmae->comp_addr_hi = 0;
3084 dmae->comp_val = 1;
3085
3086 *stats_comp = 0;
3087 bnx2x_post_dmae(bp, dmae, loader_idx);
3088
3089 } else if (bp->func_stx) {
3090 *stats_comp = 0;
3091 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3092 }
3093}
3094
3095static int bnx2x_stats_comp(struct bnx2x *bp)
3096{
3097 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098 int cnt = 10;
3099
3100 might_sleep();
3101 while (*stats_comp != DMAE_COMP_VAL) {
3102 msleep(1);
3103 if (!cnt) {
3104 BNX2X_ERR("timeout waiting for stats finished\n");
3105 break;
3106 }
3107 cnt--;
3108 }
3109 return 1;
3110}
3111
3112/*
3113 * Statistics service functions
3114 */
3115
3116static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3117{
3118 struct dmae_command *dmae;
3119 u32 opcode;
3120 int loader_idx = PMF_DMAE_C(bp);
3121 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123 /* sanity */
3124 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3125 BNX2X_ERR("BUG!\n");
3126 return;
3127 }
3128
3129 bp->executer_idx = 0;
3130
3131 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3132 DMAE_CMD_C_ENABLE |
3133 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3134#ifdef __BIG_ENDIAN
3135 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3136#else
3137 DMAE_CMD_ENDIANITY_DW_SWAP |
3138#endif
3139 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3140 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3141
3142 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3143 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3144 dmae->src_addr_lo = bp->port.port_stx >> 2;
3145 dmae->src_addr_hi = 0;
3146 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3147 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3148 dmae->len = DMAE_LEN32_RD_MAX;
3149 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3150 dmae->comp_addr_hi = 0;
3151 dmae->comp_val = 1;
3152
3153 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3155 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3156 dmae->src_addr_hi = 0;
7a9b2557
VZ
3157 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3158 DMAE_LEN32_RD_MAX * 4);
3159 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3160 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3161 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3162 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3163 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3164 dmae->comp_val = DMAE_COMP_VAL;
3165
3166 *stats_comp = 0;
3167 bnx2x_hw_stats_post(bp);
3168 bnx2x_stats_comp(bp);
3169}
3170
3171static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3172{
3173 struct dmae_command *dmae;
34f80b04 3174 int port = BP_PORT(bp);
bb2a0f7a 3175 int vn = BP_E1HVN(bp);
a2fbb9ea 3176 u32 opcode;
bb2a0f7a 3177 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3178 u32 mac_addr;
bb2a0f7a
YG
3179 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180
3181 /* sanity */
3182 if (!bp->link_vars.link_up || !bp->port.pmf) {
3183 BNX2X_ERR("BUG!\n");
3184 return;
3185 }
a2fbb9ea
ET
3186
3187 bp->executer_idx = 0;
bb2a0f7a
YG
3188
3189 /* MCP */
3190 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3191 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3192 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3193#ifdef __BIG_ENDIAN
bb2a0f7a 3194 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3195#else
bb2a0f7a 3196 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3197#endif
bb2a0f7a
YG
3198 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3199 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3200
bb2a0f7a 3201 if (bp->port.port_stx) {
a2fbb9ea
ET
3202
3203 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3204 dmae->opcode = opcode;
bb2a0f7a
YG
3205 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3206 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3207 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3208 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3209 dmae->len = sizeof(struct host_port_stats) >> 2;
3210 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3211 dmae->comp_addr_hi = 0;
3212 dmae->comp_val = 1;
a2fbb9ea
ET
3213 }
3214
bb2a0f7a
YG
3215 if (bp->func_stx) {
3216
3217 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3218 dmae->opcode = opcode;
3219 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3220 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3221 dmae->dst_addr_lo = bp->func_stx >> 2;
3222 dmae->dst_addr_hi = 0;
3223 dmae->len = sizeof(struct host_func_stats) >> 2;
3224 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3225 dmae->comp_addr_hi = 0;
3226 dmae->comp_val = 1;
a2fbb9ea
ET
3227 }
3228
bb2a0f7a 3229 /* MAC */
a2fbb9ea
ET
3230 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3231 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3232 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3233#ifdef __BIG_ENDIAN
3234 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3235#else
3236 DMAE_CMD_ENDIANITY_DW_SWAP |
3237#endif
bb2a0f7a
YG
3238 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3239 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3240
c18487ee 3241 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3242
3243 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3244 NIG_REG_INGRESS_BMAC0_MEM);
3245
3246 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3247 BIGMAC_REGISTER_TX_STAT_GTBYT */
3248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 dmae->opcode = opcode;
3250 dmae->src_addr_lo = (mac_addr +
3251 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252 dmae->src_addr_hi = 0;
3253 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3254 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3255 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3256 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3257 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3258 dmae->comp_addr_hi = 0;
3259 dmae->comp_val = 1;
3260
3261 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3262 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264 dmae->opcode = opcode;
3265 dmae->src_addr_lo = (mac_addr +
3266 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267 dmae->src_addr_hi = 0;
3268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3269 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3270 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3271 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3272 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3273 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3274 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3275 dmae->comp_addr_hi = 0;
3276 dmae->comp_val = 1;
3277
c18487ee 3278 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3279
3280 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3281
3282 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3283 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3284 dmae->opcode = opcode;
3285 dmae->src_addr_lo = (mac_addr +
3286 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3287 dmae->src_addr_hi = 0;
3288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3290 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3291 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292 dmae->comp_addr_hi = 0;
3293 dmae->comp_val = 1;
3294
3295 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3296 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297 dmae->opcode = opcode;
3298 dmae->src_addr_lo = (mac_addr +
3299 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3300 dmae->src_addr_hi = 0;
3301 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3302 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3303 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3304 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3305 dmae->len = 1;
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3308 dmae->comp_val = 1;
3309
3310 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3311 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312 dmae->opcode = opcode;
3313 dmae->src_addr_lo = (mac_addr +
3314 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3315 dmae->src_addr_hi = 0;
3316 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3317 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3318 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3319 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3320 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3321 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322 dmae->comp_addr_hi = 0;
3323 dmae->comp_val = 1;
3324 }
3325
3326 /* NIG */
bb2a0f7a
YG
3327 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328 dmae->opcode = opcode;
3329 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3330 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3331 dmae->src_addr_hi = 0;
3332 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3333 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3334 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3335 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336 dmae->comp_addr_hi = 0;
3337 dmae->comp_val = 1;
3338
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3342 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3346 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348 dmae->len = (2*sizeof(u32)) >> 2;
3349 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3350 dmae->comp_addr_hi = 0;
3351 dmae->comp_val = 1;
3352
a2fbb9ea
ET
3353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3355 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3356 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3357#ifdef __BIG_ENDIAN
3358 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3359#else
3360 DMAE_CMD_ENDIANITY_DW_SWAP |
3361#endif
bb2a0f7a
YG
3362 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3363 (vn << DMAE_CMD_E1HVN_SHIFT));
3364 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3365 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3366 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3367 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3368 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3369 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3370 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371 dmae->len = (2*sizeof(u32)) >> 2;
3372 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3373 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3374 dmae->comp_val = DMAE_COMP_VAL;
3375
3376 *stats_comp = 0;
a2fbb9ea
ET
3377}
3378
bb2a0f7a 3379static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3380{
bb2a0f7a
YG
3381 struct dmae_command *dmae = &bp->stats_dmae;
3382 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3383
bb2a0f7a
YG
3384 /* sanity */
3385 if (!bp->func_stx) {
3386 BNX2X_ERR("BUG!\n");
3387 return;
3388 }
a2fbb9ea 3389
bb2a0f7a
YG
3390 bp->executer_idx = 0;
3391 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3392
bb2a0f7a
YG
3393 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3394 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3395 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3396#ifdef __BIG_ENDIAN
3397 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3398#else
3399 DMAE_CMD_ENDIANITY_DW_SWAP |
3400#endif
3401 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3402 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3404 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3405 dmae->dst_addr_lo = bp->func_stx >> 2;
3406 dmae->dst_addr_hi = 0;
3407 dmae->len = sizeof(struct host_func_stats) >> 2;
3408 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3409 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3410 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3411
bb2a0f7a
YG
3412 *stats_comp = 0;
3413}
a2fbb9ea 3414
bb2a0f7a
YG
3415static void bnx2x_stats_start(struct bnx2x *bp)
3416{
3417 if (bp->port.pmf)
3418 bnx2x_port_stats_init(bp);
3419
3420 else if (bp->func_stx)
3421 bnx2x_func_stats_init(bp);
3422
3423 bnx2x_hw_stats_post(bp);
3424 bnx2x_storm_stats_post(bp);
3425}
3426
3427static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3428{
3429 bnx2x_stats_comp(bp);
3430 bnx2x_stats_pmf_update(bp);
3431 bnx2x_stats_start(bp);
3432}
3433
3434static void bnx2x_stats_restart(struct bnx2x *bp)
3435{
3436 bnx2x_stats_comp(bp);
3437 bnx2x_stats_start(bp);
3438}
3439
3440static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3441{
3442 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3443 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3444 struct regpair diff;
3445
3446 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3447 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3448 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3449 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3450 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3451 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3452 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3453 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3455 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3457 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3458 UPDATE_STAT64(tx_stat_gt127,
3459 tx_stat_etherstatspkts65octetsto127octets);
3460 UPDATE_STAT64(tx_stat_gt255,
3461 tx_stat_etherstatspkts128octetsto255octets);
3462 UPDATE_STAT64(tx_stat_gt511,
3463 tx_stat_etherstatspkts256octetsto511octets);
3464 UPDATE_STAT64(tx_stat_gt1023,
3465 tx_stat_etherstatspkts512octetsto1023octets);
3466 UPDATE_STAT64(tx_stat_gt1518,
3467 tx_stat_etherstatspkts1024octetsto1522octets);
3468 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3469 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3470 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3471 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3472 UPDATE_STAT64(tx_stat_gterr,
3473 tx_stat_dot3statsinternalmactransmiterrors);
3474 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3475}
3476
3477static void bnx2x_emac_stats_update(struct bnx2x *bp)
3478{
3479 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3480 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3481
3482 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3483 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3484 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3485 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3487 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3488 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3489 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3490 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3492 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3493 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3494 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3495 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3496 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3497 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3498 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3499 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3500 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3501 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3505 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3513}
3514
3515static int bnx2x_hw_stats_update(struct bnx2x *bp)
3516{
3517 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3518 struct nig_stats *old = &(bp->port.old_nig_stats);
3519 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3521 struct regpair diff;
3522
3523 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3524 bnx2x_bmac_stats_update(bp);
3525
3526 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3527 bnx2x_emac_stats_update(bp);
3528
3529 else { /* unreached */
3530 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3531 return -1;
3532 }
a2fbb9ea 3533
bb2a0f7a
YG
3534 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3535 new->brb_discard - old->brb_discard);
66e855f3
YG
3536 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3537 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3538
bb2a0f7a
YG
3539 UPDATE_STAT64_NIG(egress_mac_pkt0,
3540 etherstatspkts1024octetsto1522octets);
3541 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3542
bb2a0f7a 3543 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3544
bb2a0f7a
YG
3545 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3546 sizeof(struct mac_stx));
3547 estats->brb_drop_hi = pstats->brb_drop_hi;
3548 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3549
bb2a0f7a 3550 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3551
bb2a0f7a 3552 return 0;
a2fbb9ea
ET
3553}
3554
bb2a0f7a 3555static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3556{
3557 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3558 int cl_id = BP_CL_ID(bp);
3559 struct tstorm_per_port_stats *tport =
3560 &stats->tstorm_common.port_statistics;
a2fbb9ea 3561 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3562 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3563 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3564 struct xstorm_per_client_stats *xclient =
3565 &stats->xstorm_common.client_statistics[cl_id];
3566 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3567 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3568 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3569 u32 diff;
3570
bb2a0f7a
YG
3571 /* are storm stats valid? */
3572 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3573 bp->stats_counter) {
3574 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3575 " tstorm counter (%d) != stats_counter (%d)\n",
3576 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3577 return -1;
3578 }
bb2a0f7a
YG
3579 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3580 bp->stats_counter) {
3581 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3582 " xstorm counter (%d) != stats_counter (%d)\n",
3583 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3584 return -2;
3585 }
a2fbb9ea 3586
bb2a0f7a
YG
3587 fstats->total_bytes_received_hi =
3588 fstats->valid_bytes_received_hi =
a2fbb9ea 3589 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3590 fstats->total_bytes_received_lo =
3591 fstats->valid_bytes_received_lo =
a2fbb9ea 3592 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3593
3594 estats->error_bytes_received_hi =
3595 le32_to_cpu(tclient->rcv_error_bytes.hi);
3596 estats->error_bytes_received_lo =
3597 le32_to_cpu(tclient->rcv_error_bytes.lo);
3598 ADD_64(estats->error_bytes_received_hi,
3599 estats->rx_stat_ifhcinbadoctets_hi,
3600 estats->error_bytes_received_lo,
3601 estats->rx_stat_ifhcinbadoctets_lo);
3602
3603 ADD_64(fstats->total_bytes_received_hi,
3604 estats->error_bytes_received_hi,
3605 fstats->total_bytes_received_lo,
3606 estats->error_bytes_received_lo);
3607
3608 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3609 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3610 total_multicast_packets_received);
a2fbb9ea 3611 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3612 total_broadcast_packets_received);
3613
3614 fstats->total_bytes_transmitted_hi =
3615 le32_to_cpu(xclient->total_sent_bytes.hi);
3616 fstats->total_bytes_transmitted_lo =
3617 le32_to_cpu(xclient->total_sent_bytes.lo);
3618
3619 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3620 total_unicast_packets_transmitted);
3621 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3622 total_multicast_packets_transmitted);
3623 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3624 total_broadcast_packets_transmitted);
3625
3626 memcpy(estats, &(fstats->total_bytes_received_hi),
3627 sizeof(struct host_func_stats) - 2*sizeof(u32));
3628
3629 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3630 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3631 estats->brb_truncate_discard =
3632 le32_to_cpu(tport->brb_truncate_discard);
3633 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3634
3635 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3636 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3637 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3638 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3639 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3640 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3641 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3642 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3643 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3644 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3645 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3646 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3647 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3648
bb2a0f7a
YG
3649 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3650 old_tclient->packets_too_big_discard =
a2fbb9ea 3651 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3652 estats->no_buff_discard =
3653 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3654 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3655
3656 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3657 old_xclient->unicast_bytes_sent.hi =
3658 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3659 old_xclient->unicast_bytes_sent.lo =
3660 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3661 old_xclient->multicast_bytes_sent.hi =
3662 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3663 old_xclient->multicast_bytes_sent.lo =
3664 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3665 old_xclient->broadcast_bytes_sent.hi =
3666 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3667 old_xclient->broadcast_bytes_sent.lo =
3668 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3669
3670 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3671
3672 return 0;
3673}
3674
bb2a0f7a 3675static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3676{
bb2a0f7a
YG
3677 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3678 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3679 struct net_device_stats *nstats = &bp->dev->stats;
3680
3681 nstats->rx_packets =
3682 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3683 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3684 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3685
3686 nstats->tx_packets =
3687 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3688 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3689 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3690
bb2a0f7a 3691 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3692
0e39e645 3693 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3694
bb2a0f7a
YG
3695 nstats->rx_dropped = old_tclient->checksum_discard +
3696 estats->mac_discard;
a2fbb9ea
ET
3697 nstats->tx_dropped = 0;
3698
3699 nstats->multicast =
3700 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3701
bb2a0f7a
YG
3702 nstats->collisions =
3703 estats->tx_stat_dot3statssinglecollisionframes_lo +
3704 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3705 estats->tx_stat_dot3statslatecollisions_lo +
3706 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3707
bb2a0f7a
YG
3708 estats->jabber_packets_received =
3709 old_tclient->packets_too_big_discard +
3710 estats->rx_stat_dot3statsframestoolong_lo;
3711
3712 nstats->rx_length_errors =
3713 estats->rx_stat_etherstatsundersizepkts_lo +
3714 estats->jabber_packets_received;
66e855f3 3715 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3716 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3717 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3718 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3719 nstats->rx_missed_errors = estats->xxoverflow_discard;
3720
3721 nstats->rx_errors = nstats->rx_length_errors +
3722 nstats->rx_over_errors +
3723 nstats->rx_crc_errors +
3724 nstats->rx_frame_errors +
0e39e645
ET
3725 nstats->rx_fifo_errors +
3726 nstats->rx_missed_errors;
a2fbb9ea 3727
bb2a0f7a
YG
3728 nstats->tx_aborted_errors =
3729 estats->tx_stat_dot3statslatecollisions_lo +
3730 estats->tx_stat_dot3statsexcessivecollisions_lo;
3731 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3732 nstats->tx_fifo_errors = 0;
3733 nstats->tx_heartbeat_errors = 0;
3734 nstats->tx_window_errors = 0;
3735
3736 nstats->tx_errors = nstats->tx_aborted_errors +
3737 nstats->tx_carrier_errors;
a2fbb9ea
ET
3738}
3739
bb2a0f7a 3740static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3741{
bb2a0f7a
YG
3742 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3743 int update = 0;
a2fbb9ea 3744
bb2a0f7a
YG
3745 if (*stats_comp != DMAE_COMP_VAL)
3746 return;
3747
3748 if (bp->port.pmf)
3749 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3750
bb2a0f7a 3751 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3752
bb2a0f7a
YG
3753 if (update)
3754 bnx2x_net_stats_update(bp);
a2fbb9ea 3755
bb2a0f7a
YG
3756 else {
3757 if (bp->stats_pending) {
3758 bp->stats_pending++;
3759 if (bp->stats_pending == 3) {
3760 BNX2X_ERR("stats not updated for 3 times\n");
3761 bnx2x_panic();
3762 return;
3763 }
3764 }
a2fbb9ea
ET
3765 }
3766
3767 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3768 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3769 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3770 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3771 int i;
a2fbb9ea
ET
3772
3773 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3774 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3775 " tx pkt (%lx)\n",
3776 bnx2x_tx_avail(bp->fp),
7a9b2557 3777 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3778 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3779 " rx pkt (%lx)\n",
7a9b2557
VZ
3780 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3781 bp->fp->rx_comp_cons),
3782 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea
ET
3783 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3784 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3785 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3786 printk(KERN_DEBUG "tstats: checksum_discard %u "
3787 "packets_too_big_discard %u no_buff_discard %u "
3788 "mac_discard %u mac_filter_discard %u "
3789 "xxovrflow_discard %u brb_truncate_discard %u "
3790 "ttl0_discard %u\n",
bb2a0f7a
YG
3791 old_tclient->checksum_discard,
3792 old_tclient->packets_too_big_discard,
3793 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3794 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3795 estats->brb_truncate_discard,
3796 old_tclient->ttl0_discard);
a2fbb9ea
ET
3797
3798 for_each_queue(bp, i) {
3799 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3800 bnx2x_fp(bp, i, tx_pkt),
3801 bnx2x_fp(bp, i, rx_pkt),
3802 bnx2x_fp(bp, i, rx_calls));
3803 }
3804 }
3805
bb2a0f7a
YG
3806 bnx2x_hw_stats_post(bp);
3807 bnx2x_storm_stats_post(bp);
3808}
a2fbb9ea 3809
bb2a0f7a
YG
3810static void bnx2x_port_stats_stop(struct bnx2x *bp)
3811{
3812 struct dmae_command *dmae;
3813 u32 opcode;
3814 int loader_idx = PMF_DMAE_C(bp);
3815 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3816
bb2a0f7a 3817 bp->executer_idx = 0;
a2fbb9ea 3818
bb2a0f7a
YG
3819 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3820 DMAE_CMD_C_ENABLE |
3821 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3822#ifdef __BIG_ENDIAN
bb2a0f7a 3823 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3824#else
bb2a0f7a 3825 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3826#endif
bb2a0f7a
YG
3827 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3828 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3829
3830 if (bp->port.port_stx) {
3831
3832 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3833 if (bp->func_stx)
3834 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3835 else
3836 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3837 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3838 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3839 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3840 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3841 dmae->len = sizeof(struct host_port_stats) >> 2;
3842 if (bp->func_stx) {
3843 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3844 dmae->comp_addr_hi = 0;
3845 dmae->comp_val = 1;
3846 } else {
3847 dmae->comp_addr_lo =
3848 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3849 dmae->comp_addr_hi =
3850 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3851 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3852
bb2a0f7a
YG
3853 *stats_comp = 0;
3854 }
a2fbb9ea
ET
3855 }
3856
bb2a0f7a
YG
3857 if (bp->func_stx) {
3858
3859 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3861 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3862 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3863 dmae->dst_addr_lo = bp->func_stx >> 2;
3864 dmae->dst_addr_hi = 0;
3865 dmae->len = sizeof(struct host_func_stats) >> 2;
3866 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3867 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3868 dmae->comp_val = DMAE_COMP_VAL;
3869
3870 *stats_comp = 0;
a2fbb9ea 3871 }
bb2a0f7a
YG
3872}
3873
3874static void bnx2x_stats_stop(struct bnx2x *bp)
3875{
3876 int update = 0;
3877
3878 bnx2x_stats_comp(bp);
3879
3880 if (bp->port.pmf)
3881 update = (bnx2x_hw_stats_update(bp) == 0);
3882
3883 update |= (bnx2x_storm_stats_update(bp) == 0);
3884
3885 if (update) {
3886 bnx2x_net_stats_update(bp);
a2fbb9ea 3887
bb2a0f7a
YG
3888 if (bp->port.pmf)
3889 bnx2x_port_stats_stop(bp);
3890
3891 bnx2x_hw_stats_post(bp);
3892 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3893 }
3894}
3895
bb2a0f7a
YG
3896static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3897{
3898}
3899
3900static const struct {
3901 void (*action)(struct bnx2x *bp);
3902 enum bnx2x_stats_state next_state;
3903} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3904/* state event */
3905{
3906/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3907/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3908/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3909/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3910},
3911{
3912/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3913/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3914/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3915/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3916}
3917};
3918
3919static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3920{
3921 enum bnx2x_stats_state state = bp->stats_state;
3922
3923 bnx2x_stats_stm[state][event].action(bp);
3924 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3925
3926 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3927 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3928 state, event, bp->stats_state);
3929}
3930
a2fbb9ea
ET
3931static void bnx2x_timer(unsigned long data)
3932{
3933 struct bnx2x *bp = (struct bnx2x *) data;
3934
3935 if (!netif_running(bp->dev))
3936 return;
3937
3938 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3939 goto timer_restart;
a2fbb9ea
ET
3940
3941 if (poll) {
3942 struct bnx2x_fastpath *fp = &bp->fp[0];
3943 int rc;
3944
3945 bnx2x_tx_int(fp, 1000);
3946 rc = bnx2x_rx_int(fp, 1000);
3947 }
3948
34f80b04
EG
3949 if (!BP_NOMCP(bp)) {
3950 int func = BP_FUNC(bp);
a2fbb9ea
ET
3951 u32 drv_pulse;
3952 u32 mcp_pulse;
3953
3954 ++bp->fw_drv_pulse_wr_seq;
3955 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3956 /* TBD - add SYSTEM_TIME */
3957 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3958 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3959
34f80b04 3960 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3961 MCP_PULSE_SEQ_MASK);
3962 /* The delta between driver pulse and mcp response
3963 * should be 1 (before mcp response) or 0 (after mcp response)
3964 */
3965 if ((drv_pulse != mcp_pulse) &&
3966 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3967 /* someone lost a heartbeat... */
3968 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3969 drv_pulse, mcp_pulse);
3970 }
3971 }
3972
bb2a0f7a
YG
3973 if ((bp->state == BNX2X_STATE_OPEN) ||
3974 (bp->state == BNX2X_STATE_DISABLED))
3975 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3976
f1410647 3977timer_restart:
a2fbb9ea
ET
3978 mod_timer(&bp->timer, jiffies + bp->current_interval);
3979}
3980
3981/* end of Statistics */
3982
3983/* nic init */
3984
3985/*
3986 * nic init service functions
3987 */
3988
34f80b04 3989static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3990{
34f80b04
EG
3991 int port = BP_PORT(bp);
3992
3993 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3994 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3995 sizeof(struct ustorm_def_status_block)/4);
3996 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3997 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3998 sizeof(struct cstorm_def_status_block)/4);
3999}
4000
4001static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
4002 struct host_status_block *sb, dma_addr_t mapping)
4003{
4004 int port = BP_PORT(bp);
bb2a0f7a 4005 int func = BP_FUNC(bp);
a2fbb9ea 4006 int index;
34f80b04 4007 u64 section;
a2fbb9ea
ET
4008
4009 /* USTORM */
4010 section = ((u64)mapping) + offsetof(struct host_status_block,
4011 u_status_block);
34f80b04 4012 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4013
4014 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4015 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4016 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4017 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4018 U64_HI(section));
bb2a0f7a
YG
4019 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4020 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4021
4022 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4023 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4024 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4025
4026 /* CSTORM */
4027 section = ((u64)mapping) + offsetof(struct host_status_block,
4028 c_status_block);
34f80b04 4029 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4030
4031 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4032 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4033 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4034 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4035 U64_HI(section));
7a9b2557
VZ
4036 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4037 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4038
4039 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4040 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4041 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4042
4043 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4044}
4045
4046static void bnx2x_zero_def_sb(struct bnx2x *bp)
4047{
4048 int func = BP_FUNC(bp);
a2fbb9ea 4049
34f80b04
EG
4050 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4051 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052 sizeof(struct ustorm_def_status_block)/4);
4053 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4054 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4055 sizeof(struct cstorm_def_status_block)/4);
4056 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4057 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4058 sizeof(struct xstorm_def_status_block)/4);
4059 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4060 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4061 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4062}
4063
4064static void bnx2x_init_def_sb(struct bnx2x *bp,
4065 struct host_def_status_block *def_sb,
34f80b04 4066 dma_addr_t mapping, int sb_id)
a2fbb9ea 4067{
34f80b04
EG
4068 int port = BP_PORT(bp);
4069 int func = BP_FUNC(bp);
a2fbb9ea
ET
4070 int index, val, reg_offset;
4071 u64 section;
4072
4073 /* ATTN */
4074 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4075 atten_status_block);
34f80b04 4076 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4077
49d66772
ET
4078 bp->def_att_idx = 0;
4079 bp->attn_state = 0;
4080
a2fbb9ea
ET
4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4082 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4083
34f80b04 4084 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4085 bp->attn_group[index].sig[0] = REG_RD(bp,
4086 reg_offset + 0x10*index);
4087 bp->attn_group[index].sig[1] = REG_RD(bp,
4088 reg_offset + 0x4 + 0x10*index);
4089 bp->attn_group[index].sig[2] = REG_RD(bp,
4090 reg_offset + 0x8 + 0x10*index);
4091 bp->attn_group[index].sig[3] = REG_RD(bp,
4092 reg_offset + 0xc + 0x10*index);
4093 }
4094
a2fbb9ea
ET
4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4096 HC_REG_ATTN_MSG0_ADDR_L);
4097
4098 REG_WR(bp, reg_offset, U64_LO(section));
4099 REG_WR(bp, reg_offset + 4, U64_HI(section));
4100
4101 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4102
4103 val = REG_RD(bp, reg_offset);
34f80b04 4104 val |= sb_id;
a2fbb9ea
ET
4105 REG_WR(bp, reg_offset, val);
4106
4107 /* USTORM */
4108 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4109 u_def_status_block);
34f80b04 4110 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4111
49d66772
ET
4112 bp->def_u_idx = 0;
4113
a2fbb9ea 4114 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4115 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4116 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4117 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4118 U64_HI(section));
34f80b04
EG
4119 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4120 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4121 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4122 BNX2X_BTR);
4123
4124 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4125 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4126 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4127
4128 /* CSTORM */
4129 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4130 c_def_status_block);
34f80b04 4131 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea 4132
49d66772
ET
4133 bp->def_c_idx = 0;
4134
a2fbb9ea 4135 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4136 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4137 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4138 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4139 U64_HI(section));
34f80b04
EG
4140 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4141 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4142 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4143 BNX2X_BTR);
4144
4145 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4146 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4147 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4148
4149 /* TSTORM */
4150 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4151 t_def_status_block);
34f80b04 4152 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea 4153
49d66772
ET
4154 bp->def_t_idx = 0;
4155
a2fbb9ea 4156 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4157 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4158 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4159 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4160 U64_HI(section));
34f80b04
EG
4161 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4162 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4163 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4164 BNX2X_BTR);
4165
4166 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4167 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4168 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4169
4170 /* XSTORM */
4171 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4172 x_def_status_block);
34f80b04 4173 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea 4174
49d66772
ET
4175 bp->def_x_idx = 0;
4176
a2fbb9ea 4177 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4178 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4179 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4180 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4181 U64_HI(section));
34f80b04
EG
4182 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4183 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4184 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4185 BNX2X_BTR);
4186
4187 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4188 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4189 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4190
bb2a0f7a 4191 bp->stats_pending = 0;
66e855f3 4192 bp->set_mac_pending = 0;
bb2a0f7a 4193
34f80b04 4194 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4195}
4196
4197static void bnx2x_update_coalesce(struct bnx2x *bp)
4198{
34f80b04 4199 int port = BP_PORT(bp);
a2fbb9ea
ET
4200 int i;
4201
4202 for_each_queue(bp, i) {
34f80b04 4203 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4204
4205 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4206 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4207 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4208 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4209 bp->rx_ticks/12);
a2fbb9ea 4210 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4211 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4212 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4213 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4214
4215 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4216 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4217 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4218 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4219 bp->tx_ticks/12);
a2fbb9ea 4220 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4221 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4222 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4223 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4224 }
4225}
4226
7a9b2557
VZ
4227static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4228 struct bnx2x_fastpath *fp, int last)
4229{
4230 int i;
4231
4232 for (i = 0; i < last; i++) {
4233 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4234 struct sk_buff *skb = rx_buf->skb;
4235
4236 if (skb == NULL) {
4237 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4238 continue;
4239 }
4240
4241 if (fp->tpa_state[i] == BNX2X_TPA_START)
4242 pci_unmap_single(bp->pdev,
4243 pci_unmap_addr(rx_buf, mapping),
4244 bp->rx_buf_use_size,
4245 PCI_DMA_FROMDEVICE);
4246
4247 dev_kfree_skb(skb);
4248 rx_buf->skb = NULL;
4249 }
4250}
4251
a2fbb9ea
ET
4252static void bnx2x_init_rx_rings(struct bnx2x *bp)
4253{
7a9b2557
VZ
4254 int func = BP_FUNC(bp);
4255 u16 ring_prod, cqe_ring_prod = 0;
a2fbb9ea 4256 int i, j;
a2fbb9ea
ET
4257
4258 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4259 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4260 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4261
7a9b2557
VZ
4262 if (bp->flags & TPA_ENABLE_FLAG) {
4263 DP(NETIF_MSG_IFUP,
4264 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4265 bp->rx_buf_use_size, bp->rx_buf_size,
4266 bp->dev->mtu + ETH_OVREHEAD);
4267
4268 for_each_queue(bp, j) {
4269 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4270 struct bnx2x_fastpath *fp = &bp->fp[j];
4271
4272 fp->tpa_pool[i].skb =
4273 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4274 if (!fp->tpa_pool[i].skb) {
4275 BNX2X_ERR("Failed to allocate TPA "
4276 "skb pool for queue[%d] - "
4277 "disabling TPA on this "
4278 "queue!\n", j);
4279 bnx2x_free_tpa_pool(bp, fp, i);
4280 fp->disable_tpa = 1;
4281 break;
4282 }
4283 pci_unmap_addr_set((struct sw_rx_bd *)
4284 &bp->fp->tpa_pool[i],
4285 mapping, 0);
4286 fp->tpa_state[i] = BNX2X_TPA_STOP;
4287 }
4288 }
4289 }
4290
a2fbb9ea
ET
4291 for_each_queue(bp, j) {
4292 struct bnx2x_fastpath *fp = &bp->fp[j];
4293
4294 fp->rx_bd_cons = 0;
4295 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4296 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4297
4298 /* "next page" elements initialization */
4299 /* SGE ring */
4300 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4301 struct eth_rx_sge *sge;
4302
4303 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4304 sge->addr_hi =
4305 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4306 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4307 sge->addr_lo =
4308 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4309 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4310 }
4311
4312 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4313
7a9b2557 4314 /* RX BD ring */
a2fbb9ea
ET
4315 for (i = 1; i <= NUM_RX_RINGS; i++) {
4316 struct eth_rx_bd *rx_bd;
4317
4318 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4319 rx_bd->addr_hi =
4320 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4321 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4322 rx_bd->addr_lo =
4323 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4324 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4325 }
4326
34f80b04 4327 /* CQ ring */
a2fbb9ea
ET
4328 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4329 struct eth_rx_cqe_next_page *nextpg;
4330
4331 nextpg = (struct eth_rx_cqe_next_page *)
4332 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4333 nextpg->addr_hi =
4334 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4335 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4336 nextpg->addr_lo =
4337 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4338 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4339 }
4340
7a9b2557
VZ
4341 /* Allocate SGEs and initialize the ring elements */
4342 for (i = 0, ring_prod = 0;
4343 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4344
7a9b2557
VZ
4345 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4346 BNX2X_ERR("was only able to allocate "
4347 "%d rx sges\n", i);
4348 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4349 /* Cleanup already allocated elements */
4350 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4351 bnx2x_free_tpa_pool(bp, fp,
4352 ETH_MAX_AGGREGATION_QUEUES_E1H);
4353 fp->disable_tpa = 1;
4354 ring_prod = 0;
4355 break;
4356 }
4357 ring_prod = NEXT_SGE_IDX(ring_prod);
4358 }
4359 fp->rx_sge_prod = ring_prod;
4360
4361 /* Allocate BDs and initialize BD ring */
66e855f3 4362 fp->rx_comp_cons = 0;
7a9b2557 4363 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4364 for (i = 0; i < bp->rx_ring_size; i++) {
4365 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4366 BNX2X_ERR("was only able to allocate "
4367 "%d rx skbs\n", i);
66e855f3 4368 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4369 break;
4370 }
4371 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4372 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4373 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4374 }
4375
7a9b2557
VZ
4376 fp->rx_bd_prod = ring_prod;
4377 /* must not have more available CQEs than BDs */
4378 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4379 cqe_ring_prod);
a2fbb9ea
ET
4380 fp->rx_pkt = fp->rx_calls = 0;
4381
7a9b2557
VZ
4382 /* Warning!
4383 * this will generate an interrupt (to the TSTORM)
4384 * must only be done after chip is initialized
4385 */
4386 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4387 fp->rx_sge_prod);
a2fbb9ea
ET
4388 if (j != 0)
4389 continue;
4390
4391 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4392 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4393 U64_LO(fp->rx_comp_mapping));
4394 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4395 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4396 U64_HI(fp->rx_comp_mapping));
4397 }
4398}
4399
4400static void bnx2x_init_tx_ring(struct bnx2x *bp)
4401{
4402 int i, j;
4403
4404 for_each_queue(bp, j) {
4405 struct bnx2x_fastpath *fp = &bp->fp[j];
4406
4407 for (i = 1; i <= NUM_TX_RINGS; i++) {
4408 struct eth_tx_bd *tx_bd =
4409 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4410
4411 tx_bd->addr_hi =
4412 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4413 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4414 tx_bd->addr_lo =
4415 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4416 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4417 }
4418
4419 fp->tx_pkt_prod = 0;
4420 fp->tx_pkt_cons = 0;
4421 fp->tx_bd_prod = 0;
4422 fp->tx_bd_cons = 0;
4423 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4424 fp->tx_pkt = 0;
4425 }
4426}
4427
4428static void bnx2x_init_sp_ring(struct bnx2x *bp)
4429{
34f80b04 4430 int func = BP_FUNC(bp);
a2fbb9ea
ET
4431
4432 spin_lock_init(&bp->spq_lock);
4433
4434 bp->spq_left = MAX_SPQ_PENDING;
4435 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4436 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4437 bp->spq_prod_bd = bp->spq;
4438 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4439
34f80b04 4440 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4441 U64_LO(bp->spq_mapping));
34f80b04
EG
4442 REG_WR(bp,
4443 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4444 U64_HI(bp->spq_mapping));
4445
34f80b04 4446 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4447 bp->spq_prod_idx);
4448}
4449
4450static void bnx2x_init_context(struct bnx2x *bp)
4451{
4452 int i;
4453
4454 for_each_queue(bp, i) {
4455 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4456 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4457 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4458
4459 context->xstorm_st_context.tx_bd_page_base_hi =
4460 U64_HI(fp->tx_desc_mapping);
4461 context->xstorm_st_context.tx_bd_page_base_lo =
4462 U64_LO(fp->tx_desc_mapping);
4463 context->xstorm_st_context.db_data_addr_hi =
4464 U64_HI(fp->tx_prods_mapping);
4465 context->xstorm_st_context.db_data_addr_lo =
4466 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4467 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4468 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4469
4470 context->ustorm_st_context.common.sb_index_numbers =
4471 BNX2X_RX_SB_INDEX_NUM;
4472 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4473 context->ustorm_st_context.common.status_block_id = sb_id;
4474 context->ustorm_st_context.common.flags =
4475 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4476 context->ustorm_st_context.common.mc_alignment_size = 64;
4477 context->ustorm_st_context.common.bd_buff_size =
4478 bp->rx_buf_use_size;
4479 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4480 U64_HI(fp->rx_desc_mapping);
34f80b04 4481 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4482 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4483 if (!fp->disable_tpa) {
4484 context->ustorm_st_context.common.flags |=
4485 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4486 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4487 context->ustorm_st_context.common.sge_buff_size =
4488 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4489 context->ustorm_st_context.common.sge_page_base_hi =
4490 U64_HI(fp->rx_sge_mapping);
4491 context->ustorm_st_context.common.sge_page_base_lo =
4492 U64_LO(fp->rx_sge_mapping);
4493 }
4494
a2fbb9ea
ET
4495 context->cstorm_st_context.sb_index_number =
4496 HC_INDEX_C_ETH_TX_CQ_CONS;
34f80b04 4497 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4498
4499 context->xstorm_ag_context.cdu_reserved =
4500 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4501 CDU_REGION_NUMBER_XCM_AG,
4502 ETH_CONNECTION_TYPE);
4503 context->ustorm_ag_context.cdu_usage =
4504 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4505 CDU_REGION_NUMBER_UCM_AG,
4506 ETH_CONNECTION_TYPE);
4507 }
4508}
4509
4510static void bnx2x_init_ind_table(struct bnx2x *bp)
4511{
34f80b04 4512 int port = BP_PORT(bp);
a2fbb9ea
ET
4513 int i;
4514
4515 if (!is_multi(bp))
4516 return;
4517
34f80b04 4518 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4519 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4520 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4521 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4522 i % bp->num_queues);
4523
4524 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4525}
4526
49d66772
ET
4527static void bnx2x_set_client_config(struct bnx2x *bp)
4528{
49d66772 4529 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4530 int port = BP_PORT(bp);
4531 int i;
49d66772 4532
34f80b04 4533 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4534 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4535 tstorm_client.config_flags =
4536 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4537#ifdef BCM_VLAN
34f80b04 4538 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4539 tstorm_client.config_flags |=
4540 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4541 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4542 }
4543#endif
49d66772 4544
7a9b2557
VZ
4545 if (bp->flags & TPA_ENABLE_FLAG) {
4546 tstorm_client.max_sges_for_packet =
4547 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4548 tstorm_client.max_sges_for_packet =
4549 ((tstorm_client.max_sges_for_packet +
4550 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4551 PAGES_PER_SGE_SHIFT;
4552
4553 tstorm_client.config_flags |=
4554 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4555 }
4556
49d66772
ET
4557 for_each_queue(bp, i) {
4558 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4559 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4560 ((u32 *)&tstorm_client)[0]);
4561 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4562 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4563 ((u32 *)&tstorm_client)[1]);
4564 }
4565
34f80b04
EG
4566 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4567 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4568}
4569
a2fbb9ea
ET
4570static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4571{
a2fbb9ea 4572 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4573 int mode = bp->rx_mode;
4574 int mask = (1 << BP_L_ID(bp));
4575 int func = BP_FUNC(bp);
a2fbb9ea
ET
4576 int i;
4577
4578 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4579
4580 switch (mode) {
4581 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4582 tstorm_mac_filter.ucast_drop_all = mask;
4583 tstorm_mac_filter.mcast_drop_all = mask;
4584 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4585 break;
4586 case BNX2X_RX_MODE_NORMAL:
34f80b04 4587 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4588 break;
4589 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4590 tstorm_mac_filter.mcast_accept_all = mask;
4591 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4592 break;
4593 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4594 tstorm_mac_filter.ucast_accept_all = mask;
4595 tstorm_mac_filter.mcast_accept_all = mask;
4596 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4597 break;
4598 default:
34f80b04
EG
4599 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4600 break;
a2fbb9ea
ET
4601 }
4602
4603 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4604 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4605 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4606 ((u32 *)&tstorm_mac_filter)[i]);
4607
34f80b04 4608/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4609 ((u32 *)&tstorm_mac_filter)[i]); */
4610 }
a2fbb9ea 4611
49d66772
ET
4612 if (mode != BNX2X_RX_MODE_NONE)
4613 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4614}
4615
471de716
EG
4616static void bnx2x_init_internal_common(struct bnx2x *bp)
4617{
4618 int i;
4619
4620 /* Zero this manually as its initialization is
4621 currently missing in the initTool */
4622 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4623 REG_WR(bp, BAR_USTRORM_INTMEM +
4624 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4625}
4626
4627static void bnx2x_init_internal_port(struct bnx2x *bp)
4628{
4629 int port = BP_PORT(bp);
4630
4631 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4635}
4636
4637static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4638{
a2fbb9ea
ET
4639 struct tstorm_eth_function_common_config tstorm_config = {0};
4640 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4641 int port = BP_PORT(bp);
4642 int func = BP_FUNC(bp);
4643 int i;
471de716 4644 u16 max_agg_size;
a2fbb9ea
ET
4645
4646 if (is_multi(bp)) {
4647 tstorm_config.config_flags = MULTI_FLAGS;
4648 tstorm_config.rss_result_mask = MULTI_MASK;
4649 }
4650
34f80b04
EG
4651 tstorm_config.leading_client_id = BP_L_ID(bp);
4652
a2fbb9ea 4653 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4654 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4655 (*(u32 *)&tstorm_config));
4656
c14423fe 4657 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4658 bnx2x_set_storm_rx_mode(bp);
4659
66e855f3
YG
4660 /* reset xstorm per client statistics */
4661 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4662 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4664 i*4, 0);
4665 }
4666 /* reset tstorm per client statistics */
4667 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4668 REG_WR(bp, BAR_TSTRORM_INTMEM +
4669 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4670 i*4, 0);
4671 }
4672
4673 /* Init statistics related context */
34f80b04 4674 stats_flags.collect_eth = 1;
a2fbb9ea 4675
66e855f3 4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4677 ((u32 *)&stats_flags)[0]);
66e855f3 4678 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4679 ((u32 *)&stats_flags)[1]);
4680
66e855f3 4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4682 ((u32 *)&stats_flags)[0]);
66e855f3 4683 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4684 ((u32 *)&stats_flags)[1]);
4685
66e855f3 4686 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4687 ((u32 *)&stats_flags)[0]);
66e855f3 4688 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4689 ((u32 *)&stats_flags)[1]);
4690
66e855f3
YG
4691 REG_WR(bp, BAR_XSTRORM_INTMEM +
4692 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4693 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4694 REG_WR(bp, BAR_XSTRORM_INTMEM +
4695 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4696 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4697
4698 REG_WR(bp, BAR_TSTRORM_INTMEM +
4699 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4700 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4701 REG_WR(bp, BAR_TSTRORM_INTMEM +
4702 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4703 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4704
4705 if (CHIP_IS_E1H(bp)) {
4706 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4707 IS_E1HMF(bp));
4708 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4709 IS_E1HMF(bp));
4710 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4711 IS_E1HMF(bp));
4712 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4713 IS_E1HMF(bp));
4714
7a9b2557
VZ
4715 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4716 bp->e1hov);
34f80b04
EG
4717 }
4718
471de716
EG
4719 /* Init CQ ring mapping and aggregation size */
4720 max_agg_size = min((u32)(bp->rx_buf_use_size +
4721 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4722 (u32)0xffff);
7a9b2557
VZ
4723 for_each_queue(bp, i) {
4724 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4725
4726 REG_WR(bp, BAR_USTRORM_INTMEM +
4727 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4728 U64_LO(fp->rx_comp_mapping));
4729 REG_WR(bp, BAR_USTRORM_INTMEM +
4730 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4731 U64_HI(fp->rx_comp_mapping));
4732
7a9b2557
VZ
4733 REG_WR16(bp, BAR_USTRORM_INTMEM +
4734 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4735 max_agg_size);
4736 }
a2fbb9ea
ET
4737}
4738
471de716
EG
4739static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4740{
4741 switch (load_code) {
4742 case FW_MSG_CODE_DRV_LOAD_COMMON:
4743 bnx2x_init_internal_common(bp);
4744 /* no break */
4745
4746 case FW_MSG_CODE_DRV_LOAD_PORT:
4747 bnx2x_init_internal_port(bp);
4748 /* no break */
4749
4750 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4751 bnx2x_init_internal_func(bp);
4752 break;
4753
4754 default:
4755 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4756 break;
4757 }
4758}
4759
4760static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4761{
4762 int i;
4763
4764 for_each_queue(bp, i) {
4765 struct bnx2x_fastpath *fp = &bp->fp[i];
4766
34f80b04 4767 fp->bp = bp;
a2fbb9ea 4768 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4769 fp->index = i;
34f80b04
EG
4770 fp->cl_id = BP_L_ID(bp) + i;
4771 fp->sb_id = fp->cl_id;
4772 DP(NETIF_MSG_IFUP,
4773 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4774 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4775 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4776 fp->status_blk_mapping);
a2fbb9ea
ET
4777 }
4778
4779 bnx2x_init_def_sb(bp, bp->def_status_blk,
34f80b04 4780 bp->def_status_blk_mapping, DEF_SB_ID);
a2fbb9ea
ET
4781 bnx2x_update_coalesce(bp);
4782 bnx2x_init_rx_rings(bp);
4783 bnx2x_init_tx_ring(bp);
4784 bnx2x_init_sp_ring(bp);
4785 bnx2x_init_context(bp);
471de716 4786 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4787 bnx2x_init_ind_table(bp);
615f8fd9 4788 bnx2x_int_enable(bp);
a2fbb9ea
ET
4789}
4790
4791/* end of nic init */
4792
4793/*
4794 * gzip service functions
4795 */
4796
4797static int bnx2x_gunzip_init(struct bnx2x *bp)
4798{
4799 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4800 &bp->gunzip_mapping);
4801 if (bp->gunzip_buf == NULL)
4802 goto gunzip_nomem1;
4803
4804 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4805 if (bp->strm == NULL)
4806 goto gunzip_nomem2;
4807
4808 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4809 GFP_KERNEL);
4810 if (bp->strm->workspace == NULL)
4811 goto gunzip_nomem3;
4812
4813 return 0;
4814
4815gunzip_nomem3:
4816 kfree(bp->strm);
4817 bp->strm = NULL;
4818
4819gunzip_nomem2:
4820 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4821 bp->gunzip_mapping);
4822 bp->gunzip_buf = NULL;
4823
4824gunzip_nomem1:
4825 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4826 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4827 return -ENOMEM;
4828}
4829
4830static void bnx2x_gunzip_end(struct bnx2x *bp)
4831{
4832 kfree(bp->strm->workspace);
4833
4834 kfree(bp->strm);
4835 bp->strm = NULL;
4836
4837 if (bp->gunzip_buf) {
4838 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4839 bp->gunzip_mapping);
4840 bp->gunzip_buf = NULL;
4841 }
4842}
4843
4844static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4845{
4846 int n, rc;
4847
4848 /* check gzip header */
4849 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4850 return -EINVAL;
4851
4852 n = 10;
4853
34f80b04 4854#define FNAME 0x8
a2fbb9ea
ET
4855
4856 if (zbuf[3] & FNAME)
4857 while ((zbuf[n++] != 0) && (n < len));
4858
4859 bp->strm->next_in = zbuf + n;
4860 bp->strm->avail_in = len - n;
4861 bp->strm->next_out = bp->gunzip_buf;
4862 bp->strm->avail_out = FW_BUF_SIZE;
4863
4864 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4865 if (rc != Z_OK)
4866 return rc;
4867
4868 rc = zlib_inflate(bp->strm, Z_FINISH);
4869 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4870 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4871 bp->dev->name, bp->strm->msg);
4872
4873 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4874 if (bp->gunzip_outlen & 0x3)
4875 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4876 " gunzip_outlen (%d) not aligned\n",
4877 bp->dev->name, bp->gunzip_outlen);
4878 bp->gunzip_outlen >>= 2;
4879
4880 zlib_inflateEnd(bp->strm);
4881
4882 if (rc == Z_STREAM_END)
4883 return 0;
4884
4885 return rc;
4886}
4887
4888/* nic load/unload */
4889
4890/*
34f80b04 4891 * General service functions
a2fbb9ea
ET
4892 */
4893
4894/* send a NIG loopback debug packet */
4895static void bnx2x_lb_pckt(struct bnx2x *bp)
4896{
a2fbb9ea 4897 u32 wb_write[3];
a2fbb9ea
ET
4898
4899 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4900 wb_write[0] = 0x55555555;
4901 wb_write[1] = 0x55555555;
34f80b04 4902 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4903 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4904
4905 /* NON-IP protocol */
a2fbb9ea
ET
4906 wb_write[0] = 0x09000000;
4907 wb_write[1] = 0x55555555;
34f80b04 4908 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4909 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4910}
4911
4912/* some of the internal memories
4913 * are not directly readable from the driver
4914 * to test them we send debug packets
4915 */
4916static int bnx2x_int_mem_test(struct bnx2x *bp)
4917{
4918 int factor;
4919 int count, i;
4920 u32 val = 0;
4921
ad8d3948 4922 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4923 factor = 120;
ad8d3948
EG
4924 else if (CHIP_REV_IS_EMUL(bp))
4925 factor = 200;
4926 else
a2fbb9ea 4927 factor = 1;
a2fbb9ea
ET
4928
4929 DP(NETIF_MSG_HW, "start part1\n");
4930
4931 /* Disable inputs of parser neighbor blocks */
4932 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4933 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4934 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4935 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4936
4937 /* Write 0 to parser credits for CFC search request */
4938 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4939
4940 /* send Ethernet packet */
4941 bnx2x_lb_pckt(bp);
4942
4943 /* TODO do i reset NIG statistic? */
4944 /* Wait until NIG register shows 1 packet of size 0x10 */
4945 count = 1000 * factor;
4946 while (count) {
34f80b04 4947
a2fbb9ea
ET
4948 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4949 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4950 if (val == 0x10)
4951 break;
4952
4953 msleep(10);
4954 count--;
4955 }
4956 if (val != 0x10) {
4957 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4958 return -1;
4959 }
4960
4961 /* Wait until PRS register shows 1 packet */
4962 count = 1000 * factor;
4963 while (count) {
4964 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4965 if (val == 1)
4966 break;
4967
4968 msleep(10);
4969 count--;
4970 }
4971 if (val != 0x1) {
4972 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4973 return -2;
4974 }
4975
4976 /* Reset and init BRB, PRS */
34f80b04 4977 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4978 msleep(50);
34f80b04 4979 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4980 msleep(50);
4981 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4982 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4983
4984 DP(NETIF_MSG_HW, "part2\n");
4985
4986 /* Disable inputs of parser neighbor blocks */
4987 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4988 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4989 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4990 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4991
4992 /* Write 0 to parser credits for CFC search request */
4993 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4994
4995 /* send 10 Ethernet packets */
4996 for (i = 0; i < 10; i++)
4997 bnx2x_lb_pckt(bp);
4998
4999 /* Wait until NIG register shows 10 + 1
5000 packets of size 11*0x10 = 0xb0 */
5001 count = 1000 * factor;
5002 while (count) {
34f80b04 5003
a2fbb9ea
ET
5004 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5005 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5006 if (val == 0xb0)
5007 break;
5008
5009 msleep(10);
5010 count--;
5011 }
5012 if (val != 0xb0) {
5013 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5014 return -3;
5015 }
5016
5017 /* Wait until PRS register shows 2 packets */
5018 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5019 if (val != 2)
5020 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5021
5022 /* Write 1 to parser credits for CFC search request */
5023 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5024
5025 /* Wait until PRS register shows 3 packets */
5026 msleep(10 * factor);
5027 /* Wait until NIG register shows 1 packet of size 0x10 */
5028 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5029 if (val != 3)
5030 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5031
5032 /* clear NIG EOP FIFO */
5033 for (i = 0; i < 11; i++)
5034 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5035 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5036 if (val != 1) {
5037 BNX2X_ERR("clear of NIG failed\n");
5038 return -4;
5039 }
5040
5041 /* Reset and init BRB, PRS, NIG */
5042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5043 msleep(50);
5044 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5045 msleep(50);
5046 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5047 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5048#ifndef BCM_ISCSI
5049 /* set NIC mode */
5050 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5051#endif
5052
5053 /* Enable inputs of parser neighbor blocks */
5054 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5055 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5056 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5057 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5058
5059 DP(NETIF_MSG_HW, "done\n");
5060
5061 return 0; /* OK */
5062}
5063
5064static void enable_blocks_attention(struct bnx2x *bp)
5065{
5066 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5067 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5068 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5069 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5070 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5071 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5072 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5073 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5074 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5075/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5076/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5077 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5078 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5079 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5080/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5081/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5082 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5083 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5084 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5085 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5086/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5087/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5088 if (CHIP_REV_IS_FPGA(bp))
5089 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5090 else
5091 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5092 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5093 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5094 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5095/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5096/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5097 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5098 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5099/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5100 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5101}
5102
34f80b04
EG
5103
5104static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5105{
a2fbb9ea 5106 u32 val, i;
a2fbb9ea 5107
34f80b04 5108 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5109
34f80b04
EG
5110 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5111 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5112
34f80b04
EG
5113 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5114 if (CHIP_IS_E1H(bp))
5115 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5116
34f80b04
EG
5117 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5118 msleep(30);
5119 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5120
34f80b04
EG
5121 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5122 if (CHIP_IS_E1(bp)) {
5123 /* enable HW interrupt from PXP on USDM overflow
5124 bit 16 on INT_MASK_0 */
5125 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5126 }
a2fbb9ea 5127
34f80b04
EG
5128 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5129 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5130
5131#ifdef __BIG_ENDIAN
34f80b04
EG
5132 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5133 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5134 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5135 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5136 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5137 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5138
5139/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5140 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5141 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5142 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5143 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5144#endif
5145
5146#ifndef BCM_ISCSI
5147 /* set NIC mode */
5148 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5149#endif
5150
34f80b04 5151 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5152#ifdef BCM_ISCSI
34f80b04
EG
5153 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5154 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5155 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5156#endif
5157
34f80b04
EG
5158 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5159 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5160
34f80b04
EG
5161 /* let the HW do it's magic ... */
5162 msleep(100);
5163 /* finish PXP init */
5164 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5165 if (val != 1) {
5166 BNX2X_ERR("PXP2 CFG failed\n");
5167 return -EBUSY;
5168 }
5169 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5170 if (val != 1) {
5171 BNX2X_ERR("PXP2 RD_INIT failed\n");
5172 return -EBUSY;
5173 }
a2fbb9ea 5174
34f80b04
EG
5175 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5176 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5177
34f80b04 5178 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5179
34f80b04
EG
5180 /* clean the DMAE memory */
5181 bp->dmae_ready = 1;
5182 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5183
34f80b04
EG
5184 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5185 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5186 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5187 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5188
34f80b04
EG
5189 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5190 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5191 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5192 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5193
5194 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5195 /* soft reset pulse */
5196 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5197 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5198
5199#ifdef BCM_ISCSI
34f80b04 5200 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5201#endif
a2fbb9ea 5202
34f80b04
EG
5203 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5204 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5205 if (!CHIP_REV_IS_SLOW(bp)) {
5206 /* enable hw interrupt from doorbell Q */
5207 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5208 }
a2fbb9ea 5209
34f80b04
EG
5210 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5211 if (CHIP_REV_IS_SLOW(bp)) {
5212 /* fix for emulation and FPGA for no pause */
5213 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5214 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5215 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5216 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5217 }
a2fbb9ea 5218
34f80b04
EG
5219 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5220 if (CHIP_IS_E1H(bp))
5221 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5222
34f80b04
EG
5223 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5224 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5225 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5226 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5227
34f80b04
EG
5228 if (CHIP_IS_E1H(bp)) {
5229 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5230 STORM_INTMEM_SIZE_E1H/2);
5231 bnx2x_init_fill(bp,
5232 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5233 0, STORM_INTMEM_SIZE_E1H/2);
5234 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5235 STORM_INTMEM_SIZE_E1H/2);
5236 bnx2x_init_fill(bp,
5237 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5238 0, STORM_INTMEM_SIZE_E1H/2);
5239 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5240 STORM_INTMEM_SIZE_E1H/2);
5241 bnx2x_init_fill(bp,
5242 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5243 0, STORM_INTMEM_SIZE_E1H/2);
5244 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1H/2);
5246 bnx2x_init_fill(bp,
5247 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5248 0, STORM_INTMEM_SIZE_E1H/2);
5249 } else { /* E1 */
ad8d3948
EG
5250 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5251 STORM_INTMEM_SIZE_E1);
5252 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5253 STORM_INTMEM_SIZE_E1);
5254 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5255 STORM_INTMEM_SIZE_E1);
5256 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5257 STORM_INTMEM_SIZE_E1);
34f80b04 5258 }
a2fbb9ea 5259
34f80b04
EG
5260 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5261 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5262 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5263 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5264
34f80b04
EG
5265 /* sync semi rtc */
5266 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5267 0x80000000);
5268 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5269 0x80000000);
a2fbb9ea 5270
34f80b04
EG
5271 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5272 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5273 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5274
34f80b04
EG
5275 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5276 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5277 REG_WR(bp, i, 0xc0cac01a);
5278 /* TODO: replace with something meaningful */
5279 }
5280 if (CHIP_IS_E1H(bp))
5281 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5282 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5283
34f80b04
EG
5284 if (sizeof(union cdu_context) != 1024)
5285 /* we currently assume that a context is 1024 bytes */
5286 printk(KERN_ALERT PFX "please adjust the size of"
5287 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5288
34f80b04
EG
5289 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5290 val = (4 << 24) + (0 << 12) + 1024;
5291 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5292 if (CHIP_IS_E1(bp)) {
5293 /* !!! fix pxp client crdit until excel update */
5294 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5295 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5296 }
a2fbb9ea 5297
34f80b04
EG
5298 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5299 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5300
34f80b04
EG
5301 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5302 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5303
34f80b04
EG
5304 /* PXPCS COMMON comes here */
5305 /* Reset PCIE errors for debug */
5306 REG_WR(bp, 0x2814, 0xffffffff);
5307 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5308
34f80b04
EG
5309 /* EMAC0 COMMON comes here */
5310 /* EMAC1 COMMON comes here */
5311 /* DBU COMMON comes here */
5312 /* DBG COMMON comes here */
5313
5314 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5315 if (CHIP_IS_E1H(bp)) {
5316 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5317 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5318 }
5319
5320 if (CHIP_REV_IS_SLOW(bp))
5321 msleep(200);
5322
5323 /* finish CFC init */
5324 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5325 if (val != 1) {
5326 BNX2X_ERR("CFC LL_INIT failed\n");
5327 return -EBUSY;
5328 }
5329 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5330 if (val != 1) {
5331 BNX2X_ERR("CFC AC_INIT failed\n");
5332 return -EBUSY;
5333 }
5334 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5335 if (val != 1) {
5336 BNX2X_ERR("CFC CAM_INIT failed\n");
5337 return -EBUSY;
5338 }
5339 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5340
34f80b04
EG
5341 /* read NIG statistic
5342 to see if this is our first up since powerup */
5343 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5344 val = *bnx2x_sp(bp, wb_data[0]);
5345
5346 /* do internal memory self test */
5347 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5348 BNX2X_ERR("internal mem self test failed\n");
5349 return -EBUSY;
5350 }
5351
5352 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5354 /* Fan failure is indicated by SPIO 5 */
5355 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5356 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5357
5358 /* set to active low mode */
5359 val = REG_RD(bp, MISC_REG_SPIO_INT);
5360 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5361 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5362 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5363
34f80b04
EG
5364 /* enable interrupt to signal the IGU */
5365 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5366 val |= (1 << MISC_REGISTERS_SPIO_5);
5367 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5368 break;
f1410647 5369
34f80b04
EG
5370 default:
5371 break;
5372 }
f1410647 5373
34f80b04
EG
5374 /* clear PXP2 attentions */
5375 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5376
34f80b04 5377 enable_blocks_attention(bp);
a2fbb9ea 5378
7a9b2557
VZ
5379 if (bp->flags & TPA_ENABLE_FLAG) {
5380 struct tstorm_eth_tpa_exist tmp = {0};
5381
5382 tmp.tpa_exist = 1;
5383
5384 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5385 ((u32 *)&tmp)[0]);
5386 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5387 ((u32 *)&tmp)[1]);
5388 }
5389
34f80b04
EG
5390 return 0;
5391}
a2fbb9ea 5392
34f80b04
EG
5393static int bnx2x_init_port(struct bnx2x *bp)
5394{
5395 int port = BP_PORT(bp);
5396 u32 val;
a2fbb9ea 5397
34f80b04
EG
5398 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5399
5400 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5401
5402 /* Port PXP comes here */
5403 /* Port PXP2 comes here */
a2fbb9ea
ET
5404#ifdef BCM_ISCSI
5405 /* Port0 1
5406 * Port1 385 */
5407 i++;
5408 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5409 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5410 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5411 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5412
5413 /* Port0 2
5414 * Port1 386 */
5415 i++;
5416 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5417 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5418 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5419 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5420
5421 /* Port0 3
5422 * Port1 387 */
5423 i++;
5424 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5425 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5426 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5427 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5428#endif
34f80b04 5429 /* Port CMs come here */
a2fbb9ea
ET
5430
5431 /* Port QM comes here */
a2fbb9ea
ET
5432#ifdef BCM_ISCSI
5433 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5434 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5435
5436 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5437 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5438#endif
5439 /* Port DQ comes here */
5440 /* Port BRB1 comes here */
ad8d3948 5441 /* Port PRS comes here */
a2fbb9ea
ET
5442 /* Port TSDM comes here */
5443 /* Port CSDM comes here */
5444 /* Port USDM comes here */
5445 /* Port XSDM comes here */
34f80b04
EG
5446 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5447 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5448 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5449 port ? USEM_PORT1_END : USEM_PORT0_END);
5450 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5451 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5452 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5453 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5454 /* Port UPB comes here */
34f80b04
EG
5455 /* Port XPB comes here */
5456
5457 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5458 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5459
5460 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5461 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5462
5463 /* update threshold */
34f80b04 5464 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5465 /* update init credit */
34f80b04 5466 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5467
5468 /* probe changes */
34f80b04 5469 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5470 msleep(5);
34f80b04 5471 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5472
5473#ifdef BCM_ISCSI
5474 /* tell the searcher where the T2 table is */
5475 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5476
5477 wb_write[0] = U64_LO(bp->t2_mapping);
5478 wb_write[1] = U64_HI(bp->t2_mapping);
5479 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5480 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5481 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5482 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5483
5484 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5485 /* Port SRCH comes here */
5486#endif
5487 /* Port CDU comes here */
5488 /* Port CFC comes here */
34f80b04
EG
5489
5490 if (CHIP_IS_E1(bp)) {
5491 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5492 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5493 }
5494 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5495 port ? HC_PORT1_END : HC_PORT0_END);
5496
5497 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5498 MISC_AEU_PORT0_START,
34f80b04
EG
5499 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5500 /* init aeu_mask_attn_func_0/1:
5501 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5502 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5503 * bits 4-7 are used for "per vn group attention" */
5504 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5505 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5506
a2fbb9ea
ET
5507 /* Port PXPCS comes here */
5508 /* Port EMAC0 comes here */
5509 /* Port EMAC1 comes here */
5510 /* Port DBU comes here */
5511 /* Port DBG comes here */
34f80b04
EG
5512 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5513 port ? NIG_PORT1_END : NIG_PORT0_END);
5514
5515 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5516
5517 if (CHIP_IS_E1H(bp)) {
5518 u32 wsum;
5519 struct cmng_struct_per_port m_cmng_port;
5520 int vn;
5521
5522 /* 0x2 disable e1hov, 0x1 enable */
5523 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5524 (IS_E1HMF(bp) ? 0x1 : 0x2));
5525
5526 /* Init RATE SHAPING and FAIRNESS contexts.
5527 Initialize as if there is 10G link. */
5528 wsum = bnx2x_calc_vn_wsum(bp);
5529 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5530 if (IS_E1HMF(bp))
5531 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5532 bnx2x_init_vn_minmax(bp, 2*vn + port,
5533 wsum, 10000, &m_cmng_port);
5534 }
5535
a2fbb9ea
ET
5536 /* Port MCP comes here */
5537 /* Port DMAE comes here */
5538
34f80b04 5539 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
5540 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5541 /* add SPIO 5 to group 0 */
5542 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5543 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5544 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5545 break;
5546
5547 default:
5548 break;
5549 }
5550
c18487ee 5551 bnx2x__link_reset(bp);
a2fbb9ea 5552
34f80b04
EG
5553 return 0;
5554}
5555
5556#define ILT_PER_FUNC (768/2)
5557#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5558/* the phys address is shifted right 12 bits and has an added
5559 1=valid bit added to the 53rd bit
5560 then since this is a wide register(TM)
5561 we split it into two 32 bit writes
5562 */
5563#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5564#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5565#define PXP_ONE_ILT(x) (((x) << 10) | x)
5566#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5567
5568#define CNIC_ILT_LINES 0
5569
5570static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5571{
5572 int reg;
5573
5574 if (CHIP_IS_E1H(bp))
5575 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5576 else /* E1 */
5577 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5578
5579 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5580}
5581
5582static int bnx2x_init_func(struct bnx2x *bp)
5583{
5584 int port = BP_PORT(bp);
5585 int func = BP_FUNC(bp);
5586 int i;
5587
5588 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5589
5590 i = FUNC_ILT_BASE(func);
5591
5592 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5593 if (CHIP_IS_E1H(bp)) {
5594 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5595 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5596 } else /* E1 */
5597 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5598 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5599
5600
5601 if (CHIP_IS_E1H(bp)) {
5602 for (i = 0; i < 9; i++)
5603 bnx2x_init_block(bp,
5604 cm_start[func][i], cm_end[func][i]);
5605
5606 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5607 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5608 }
5609
5610 /* HC init per function */
5611 if (CHIP_IS_E1H(bp)) {
5612 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5613
5614 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5615 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5616 }
5617 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5618
5619 if (CHIP_IS_E1H(bp))
5620 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5621
c14423fe 5622 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5623 REG_WR(bp, 0x2114, 0xffffffff);
5624 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5625
34f80b04
EG
5626 return 0;
5627}
5628
5629static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5630{
5631 int i, rc = 0;
a2fbb9ea 5632
34f80b04
EG
5633 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5634 BP_FUNC(bp), load_code);
a2fbb9ea 5635
34f80b04
EG
5636 bp->dmae_ready = 0;
5637 mutex_init(&bp->dmae_mutex);
5638 bnx2x_gunzip_init(bp);
a2fbb9ea 5639
34f80b04
EG
5640 switch (load_code) {
5641 case FW_MSG_CODE_DRV_LOAD_COMMON:
5642 rc = bnx2x_init_common(bp);
5643 if (rc)
5644 goto init_hw_err;
5645 /* no break */
5646
5647 case FW_MSG_CODE_DRV_LOAD_PORT:
5648 bp->dmae_ready = 1;
5649 rc = bnx2x_init_port(bp);
5650 if (rc)
5651 goto init_hw_err;
5652 /* no break */
5653
5654 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5655 bp->dmae_ready = 1;
5656 rc = bnx2x_init_func(bp);
5657 if (rc)
5658 goto init_hw_err;
5659 break;
5660
5661 default:
5662 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5663 break;
5664 }
5665
5666 if (!BP_NOMCP(bp)) {
5667 int func = BP_FUNC(bp);
a2fbb9ea
ET
5668
5669 bp->fw_drv_pulse_wr_seq =
34f80b04 5670 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5671 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5672 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5673 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5674 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5675 } else
5676 bp->func_stx = 0;
a2fbb9ea 5677
34f80b04
EG
5678 /* this needs to be done before gunzip end */
5679 bnx2x_zero_def_sb(bp);
5680 for_each_queue(bp, i)
5681 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5682
5683init_hw_err:
5684 bnx2x_gunzip_end(bp);
5685
5686 return rc;
a2fbb9ea
ET
5687}
5688
c14423fe 5689/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5690static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5691{
34f80b04 5692 int func = BP_FUNC(bp);
f1410647
ET
5693 u32 seq = ++bp->fw_seq;
5694 u32 rc = 0;
19680c48
EG
5695 u32 cnt = 1;
5696 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5697
34f80b04 5698 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5699 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5700
19680c48
EG
5701 do {
5702 /* let the FW do it's magic ... */
5703 msleep(delay);
a2fbb9ea 5704
19680c48 5705 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5706
19680c48
EG
5707 /* Give the FW up to 2 second (200*10ms) */
5708 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5709
5710 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5711 cnt*delay, rc, seq);
a2fbb9ea
ET
5712
5713 /* is this a reply to our command? */
5714 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5715 rc &= FW_MSG_CODE_MASK;
f1410647 5716
a2fbb9ea
ET
5717 } else {
5718 /* FW BUG! */
5719 BNX2X_ERR("FW failed to respond!\n");
5720 bnx2x_fw_dump(bp);
5721 rc = 0;
5722 }
f1410647 5723
a2fbb9ea
ET
5724 return rc;
5725}
5726
5727static void bnx2x_free_mem(struct bnx2x *bp)
5728{
5729
5730#define BNX2X_PCI_FREE(x, y, size) \
5731 do { \
5732 if (x) { \
5733 pci_free_consistent(bp->pdev, size, x, y); \
5734 x = NULL; \
5735 y = 0; \
5736 } \
5737 } while (0)
5738
5739#define BNX2X_FREE(x) \
5740 do { \
5741 if (x) { \
5742 vfree(x); \
5743 x = NULL; \
5744 } \
5745 } while (0)
5746
5747 int i;
5748
5749 /* fastpath */
5750 for_each_queue(bp, i) {
5751
5752 /* Status blocks */
5753 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5754 bnx2x_fp(bp, i, status_blk_mapping),
5755 sizeof(struct host_status_block) +
5756 sizeof(struct eth_tx_db_data));
5757
5758 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5759 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5760 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5761 bnx2x_fp(bp, i, tx_desc_mapping),
5762 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5763
5764 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5765 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5766 bnx2x_fp(bp, i, rx_desc_mapping),
5767 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5768
5769 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5770 bnx2x_fp(bp, i, rx_comp_mapping),
5771 sizeof(struct eth_fast_path_rx_cqe) *
5772 NUM_RCQ_BD);
a2fbb9ea 5773
7a9b2557
VZ
5774 /* SGE ring */
5775 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5776 bnx2x_fp(bp, i, rx_sge_mapping),
5777 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5778 }
a2fbb9ea
ET
5779 /* end of fastpath */
5780
5781 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5782 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5783
5784 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5785 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5786
5787#ifdef BCM_ISCSI
5788 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5789 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5790 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5791 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5792#endif
7a9b2557 5793 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5794
5795#undef BNX2X_PCI_FREE
5796#undef BNX2X_KFREE
5797}
5798
5799static int bnx2x_alloc_mem(struct bnx2x *bp)
5800{
5801
5802#define BNX2X_PCI_ALLOC(x, y, size) \
5803 do { \
5804 x = pci_alloc_consistent(bp->pdev, size, y); \
5805 if (x == NULL) \
5806 goto alloc_mem_err; \
5807 memset(x, 0, size); \
5808 } while (0)
5809
5810#define BNX2X_ALLOC(x, size) \
5811 do { \
5812 x = vmalloc(size); \
5813 if (x == NULL) \
5814 goto alloc_mem_err; \
5815 memset(x, 0, size); \
5816 } while (0)
5817
5818 int i;
5819
5820 /* fastpath */
a2fbb9ea
ET
5821 for_each_queue(bp, i) {
5822 bnx2x_fp(bp, i, bp) = bp;
5823
5824 /* Status blocks */
5825 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5826 &bnx2x_fp(bp, i, status_blk_mapping),
5827 sizeof(struct host_status_block) +
5828 sizeof(struct eth_tx_db_data));
5829
5830 bnx2x_fp(bp, i, hw_tx_prods) =
5831 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5832
5833 bnx2x_fp(bp, i, tx_prods_mapping) =
5834 bnx2x_fp(bp, i, status_blk_mapping) +
5835 sizeof(struct host_status_block);
5836
5837 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5838 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5839 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5840 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5841 &bnx2x_fp(bp, i, tx_desc_mapping),
5842 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5843
5844 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5845 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5846 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5847 &bnx2x_fp(bp, i, rx_desc_mapping),
5848 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5849
5850 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5851 &bnx2x_fp(bp, i, rx_comp_mapping),
5852 sizeof(struct eth_fast_path_rx_cqe) *
5853 NUM_RCQ_BD);
5854
7a9b2557
VZ
5855 /* SGE ring */
5856 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5857 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5858 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5859 &bnx2x_fp(bp, i, rx_sge_mapping),
5860 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5861 }
5862 /* end of fastpath */
5863
5864 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5865 sizeof(struct host_def_status_block));
5866
5867 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5868 sizeof(struct bnx2x_slowpath));
5869
5870#ifdef BCM_ISCSI
5871 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5872
5873 /* Initialize T1 */
5874 for (i = 0; i < 64*1024; i += 64) {
5875 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5876 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5877 }
5878
5879 /* allocate searcher T2 table
5880 we allocate 1/4 of alloc num for T2
5881 (which is not entered into the ILT) */
5882 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5883
5884 /* Initialize T2 */
5885 for (i = 0; i < 16*1024; i += 64)
5886 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5887
c14423fe 5888 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5889 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5890
5891 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5892 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5893
5894 /* QM queues (128*MAX_CONN) */
5895 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5896#endif
5897
5898 /* Slow path ring */
5899 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5900
5901 return 0;
5902
5903alloc_mem_err:
5904 bnx2x_free_mem(bp);
5905 return -ENOMEM;
5906
5907#undef BNX2X_PCI_ALLOC
5908#undef BNX2X_ALLOC
5909}
5910
5911static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5912{
5913 int i;
5914
5915 for_each_queue(bp, i) {
5916 struct bnx2x_fastpath *fp = &bp->fp[i];
5917
5918 u16 bd_cons = fp->tx_bd_cons;
5919 u16 sw_prod = fp->tx_pkt_prod;
5920 u16 sw_cons = fp->tx_pkt_cons;
5921
a2fbb9ea
ET
5922 while (sw_cons != sw_prod) {
5923 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5924 sw_cons++;
5925 }
5926 }
5927}
5928
5929static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5930{
5931 int i, j;
5932
5933 for_each_queue(bp, j) {
5934 struct bnx2x_fastpath *fp = &bp->fp[j];
5935
a2fbb9ea
ET
5936 for (i = 0; i < NUM_RX_BD; i++) {
5937 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5938 struct sk_buff *skb = rx_buf->skb;
5939
5940 if (skb == NULL)
5941 continue;
5942
5943 pci_unmap_single(bp->pdev,
5944 pci_unmap_addr(rx_buf, mapping),
5945 bp->rx_buf_use_size,
5946 PCI_DMA_FROMDEVICE);
5947
5948 rx_buf->skb = NULL;
5949 dev_kfree_skb(skb);
5950 }
7a9b2557
VZ
5951 if (!fp->disable_tpa)
5952 bnx2x_free_tpa_pool(bp, fp,
5953 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5954 }
5955}
5956
5957static void bnx2x_free_skbs(struct bnx2x *bp)
5958{
5959 bnx2x_free_tx_skbs(bp);
5960 bnx2x_free_rx_skbs(bp);
5961}
5962
5963static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5964{
34f80b04 5965 int i, offset = 1;
a2fbb9ea
ET
5966
5967 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5968 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5969 bp->msix_table[0].vector);
5970
5971 for_each_queue(bp, i) {
c14423fe 5972 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5973 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5974 bnx2x_fp(bp, i, state));
5975
228241eb
ET
5976 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5977 BNX2X_ERR("IRQ of fp #%d being freed while "
5978 "state != closed\n", i);
a2fbb9ea 5979
34f80b04 5980 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5981 }
a2fbb9ea
ET
5982}
5983
5984static void bnx2x_free_irq(struct bnx2x *bp)
5985{
a2fbb9ea 5986 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5987 bnx2x_free_msix_irqs(bp);
5988 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5989 bp->flags &= ~USING_MSIX_FLAG;
5990
5991 } else
5992 free_irq(bp->pdev->irq, bp->dev);
5993}
5994
5995static int bnx2x_enable_msix(struct bnx2x *bp)
5996{
34f80b04 5997 int i, rc, offset;
a2fbb9ea
ET
5998
5999 bp->msix_table[0].entry = 0;
34f80b04
EG
6000 offset = 1;
6001 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6002
34f80b04
EG
6003 for_each_queue(bp, i) {
6004 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6005
34f80b04
EG
6006 bp->msix_table[i + offset].entry = igu_vec;
6007 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6008 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6009 }
6010
34f80b04
EG
6011 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6012 bp->num_queues + offset);
6013 if (rc) {
6014 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6015 return -1;
6016 }
a2fbb9ea
ET
6017 bp->flags |= USING_MSIX_FLAG;
6018
6019 return 0;
a2fbb9ea
ET
6020}
6021
a2fbb9ea
ET
6022static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6023{
34f80b04 6024 int i, rc, offset = 1;
a2fbb9ea 6025
a2fbb9ea
ET
6026 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6027 bp->dev->name, bp->dev);
a2fbb9ea
ET
6028 if (rc) {
6029 BNX2X_ERR("request sp irq failed\n");
6030 return -EBUSY;
6031 }
6032
6033 for_each_queue(bp, i) {
34f80b04 6034 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6035 bnx2x_msix_fp_int, 0,
6036 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6037 if (rc) {
34f80b04
EG
6038 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6039 i + offset, rc);
a2fbb9ea
ET
6040 bnx2x_free_msix_irqs(bp);
6041 return -EBUSY;
6042 }
6043
6044 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6045 }
6046
6047 return 0;
a2fbb9ea
ET
6048}
6049
6050static int bnx2x_req_irq(struct bnx2x *bp)
6051{
34f80b04 6052 int rc;
a2fbb9ea 6053
34f80b04
EG
6054 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6055 bp->dev->name, bp->dev);
a2fbb9ea
ET
6056 if (!rc)
6057 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6058
6059 return rc;
a2fbb9ea
ET
6060}
6061
6062/*
6063 * Init service functions
6064 */
6065
34f80b04 6066static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
a2fbb9ea
ET
6067{
6068 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6069 int port = BP_PORT(bp);
a2fbb9ea
ET
6070
6071 /* CAM allocation
6072 * unicasts 0-31:port0 32-63:port1
6073 * multicast 64-127:port0 128-191:port1
6074 */
6075 config->hdr.length_6b = 2;
34f80b04
EG
6076 config->hdr.offset = port ? 31 : 0;
6077 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6078 config->hdr.reserved1 = 0;
6079
6080 /* primary MAC */
6081 config->config_table[0].cam_entry.msb_mac_addr =
6082 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6083 config->config_table[0].cam_entry.middle_mac_addr =
6084 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6085 config->config_table[0].cam_entry.lsb_mac_addr =
6086 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6087 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6088 config->config_table[0].target_table_entry.flags = 0;
6089 config->config_table[0].target_table_entry.client_id = 0;
6090 config->config_table[0].target_table_entry.vlan_id = 0;
6091
6092 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6093 config->config_table[0].cam_entry.msb_mac_addr,
6094 config->config_table[0].cam_entry.middle_mac_addr,
6095 config->config_table[0].cam_entry.lsb_mac_addr);
6096
6097 /* broadcast */
6098 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6099 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6100 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6101 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6102 config->config_table[1].target_table_entry.flags =
6103 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6104 config->config_table[1].target_table_entry.client_id = 0;
6105 config->config_table[1].target_table_entry.vlan_id = 0;
6106
6107 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6108 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6109 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6110}
6111
34f80b04
EG
6112static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6113{
6114 struct mac_configuration_cmd_e1h *config =
6115 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6116
6117 if (bp->state != BNX2X_STATE_OPEN) {
6118 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6119 return;
6120 }
6121
6122 /* CAM allocation for E1H
6123 * unicasts: by func number
6124 * multicast: 20+FUNC*20, 20 each
6125 */
6126 config->hdr.length_6b = 1;
6127 config->hdr.offset = BP_FUNC(bp);
6128 config->hdr.client_id = BP_CL_ID(bp);
6129 config->hdr.reserved1 = 0;
6130
6131 /* primary MAC */
6132 config->config_table[0].msb_mac_addr =
6133 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6134 config->config_table[0].middle_mac_addr =
6135 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6136 config->config_table[0].lsb_mac_addr =
6137 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6138 config->config_table[0].client_id = BP_L_ID(bp);
6139 config->config_table[0].vlan_id = 0;
6140 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6141 config->config_table[0].flags = BP_PORT(bp);
6142
6143 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6144 config->config_table[0].msb_mac_addr,
6145 config->config_table[0].middle_mac_addr,
6146 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6147
6148 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6149 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6150 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6151}
6152
a2fbb9ea
ET
6153static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6154 int *state_p, int poll)
6155{
6156 /* can take a while if any port is running */
34f80b04 6157 int cnt = 500;
a2fbb9ea 6158
c14423fe
ET
6159 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6160 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6161
6162 might_sleep();
34f80b04 6163 while (cnt--) {
a2fbb9ea
ET
6164 if (poll) {
6165 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6166 /* if index is different from 0
6167 * the reply for some commands will
a2fbb9ea
ET
6168 * be on the none default queue
6169 */
6170 if (idx)
6171 bnx2x_rx_int(&bp->fp[idx], 10);
6172 }
34f80b04 6173 mb(); /* state is changed by bnx2x_sp_event() */
a2fbb9ea 6174
49d66772 6175 if (*state_p == state)
a2fbb9ea
ET
6176 return 0;
6177
a2fbb9ea 6178 msleep(1);
a2fbb9ea
ET
6179 }
6180
a2fbb9ea 6181 /* timeout! */
49d66772
ET
6182 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6183 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6184#ifdef BNX2X_STOP_ON_ERROR
6185 bnx2x_panic();
6186#endif
a2fbb9ea 6187
49d66772 6188 return -EBUSY;
a2fbb9ea
ET
6189}
6190
6191static int bnx2x_setup_leading(struct bnx2x *bp)
6192{
34f80b04 6193 int rc;
a2fbb9ea 6194
c14423fe 6195 /* reset IGU state */
34f80b04 6196 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6197
6198 /* SETUP ramrod */
6199 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6200
34f80b04
EG
6201 /* Wait for completion */
6202 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6203
34f80b04 6204 return rc;
a2fbb9ea
ET
6205}
6206
6207static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6208{
a2fbb9ea 6209 /* reset IGU state */
34f80b04 6210 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6211
228241eb 6212 /* SETUP ramrod */
a2fbb9ea
ET
6213 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6214 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6215
6216 /* Wait for completion */
6217 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6218 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6219}
6220
a2fbb9ea
ET
6221static int bnx2x_poll(struct napi_struct *napi, int budget);
6222static void bnx2x_set_rx_mode(struct net_device *dev);
6223
34f80b04
EG
6224/* must be called with rtnl_lock */
6225static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6226{
228241eb 6227 u32 load_code;
34f80b04
EG
6228 int i, rc;
6229
6230#ifdef BNX2X_STOP_ON_ERROR
6231 if (unlikely(bp->panic))
6232 return -EPERM;
6233#endif
a2fbb9ea
ET
6234
6235 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6236
34f80b04
EG
6237 /* Send LOAD_REQUEST command to MCP
6238 Returns the type of LOAD command:
6239 if it is the first port to be initialized
6240 common blocks should be initialized, otherwise - not
a2fbb9ea 6241 */
34f80b04 6242 if (!BP_NOMCP(bp)) {
228241eb
ET
6243 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6244 if (!load_code) {
da5a662a 6245 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6246 return -EBUSY;
6247 }
34f80b04 6248 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6249 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6250
a2fbb9ea 6251 } else {
da5a662a
VZ
6252 int port = BP_PORT(bp);
6253
34f80b04
EG
6254 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6255 load_count[0], load_count[1], load_count[2]);
6256 load_count[0]++;
da5a662a 6257 load_count[1 + port]++;
34f80b04
EG
6258 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6259 load_count[0], load_count[1], load_count[2]);
6260 if (load_count[0] == 1)
6261 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6262 else if (load_count[1 + port] == 1)
34f80b04
EG
6263 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6264 else
6265 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6266 }
6267
34f80b04
EG
6268 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6269 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6270 bp->port.pmf = 1;
6271 else
6272 bp->port.pmf = 0;
6273 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6274
6275 /* if we can't use MSI-X we only need one fp,
6276 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6277 * and fallback to inta with one fp
6278 */
34f80b04
EG
6279 if (use_inta) {
6280 bp->num_queues = 1;
6281
6282 } else {
6283 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6284 /* user requested number */
6285 bp->num_queues = use_multi;
6286
6287 else if (use_multi)
6288 bp->num_queues = min_t(u32, num_online_cpus(),
6289 BP_MAX_QUEUES(bp));
6290 else
a2fbb9ea 6291 bp->num_queues = 1;
34f80b04
EG
6292
6293 if (bnx2x_enable_msix(bp)) {
6294 /* failed to enable MSI-X */
6295 bp->num_queues = 1;
6296 if (use_multi)
6297 BNX2X_ERR("Multi requested but failed"
6298 " to enable MSI-X\n");
a2fbb9ea
ET
6299 }
6300 }
34f80b04
EG
6301 DP(NETIF_MSG_IFUP,
6302 "set number of queues to %d\n", bp->num_queues);
c14423fe 6303
a2fbb9ea
ET
6304 if (bnx2x_alloc_mem(bp))
6305 return -ENOMEM;
6306
7a9b2557
VZ
6307 for_each_queue(bp, i)
6308 bnx2x_fp(bp, i, disable_tpa) =
6309 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6310
34f80b04
EG
6311 if (bp->flags & USING_MSIX_FLAG) {
6312 rc = bnx2x_req_msix_irqs(bp);
6313 if (rc) {
6314 pci_disable_msix(bp->pdev);
6315 goto load_error;
6316 }
6317 } else {
6318 bnx2x_ack_int(bp);
6319 rc = bnx2x_req_irq(bp);
6320 if (rc) {
6321 BNX2X_ERR("IRQ request failed, aborting\n");
6322 goto load_error;
a2fbb9ea
ET
6323 }
6324 }
6325
6326 for_each_queue(bp, i)
6327 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6328 bnx2x_poll, 128);
6329
a2fbb9ea 6330 /* Initialize HW */
34f80b04
EG
6331 rc = bnx2x_init_hw(bp, load_code);
6332 if (rc) {
a2fbb9ea 6333 BNX2X_ERR("HW init failed, aborting\n");
228241eb 6334 goto load_error;
a2fbb9ea
ET
6335 }
6336
a2fbb9ea 6337 /* Setup NIC internals and enable interrupts */
471de716 6338 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6339
6340 /* Send LOAD_DONE command to MCP */
34f80b04 6341 if (!BP_NOMCP(bp)) {
228241eb
ET
6342 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6343 if (!load_code) {
da5a662a 6344 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6345 rc = -EBUSY;
228241eb 6346 goto load_int_disable;
a2fbb9ea
ET
6347 }
6348 }
6349
bb2a0f7a
YG
6350 bnx2x_stats_init(bp);
6351
a2fbb9ea
ET
6352 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6353
6354 /* Enable Rx interrupt handling before sending the ramrod
6355 as it's completed on Rx FP queue */
6356 for_each_queue(bp, i)
6357 napi_enable(&bnx2x_fp(bp, i, napi));
6358
da5a662a
VZ
6359 /* Enable interrupt handling */
6360 atomic_set(&bp->intr_sem, 0);
6361
34f80b04
EG
6362 rc = bnx2x_setup_leading(bp);
6363 if (rc) {
da5a662a 6364 BNX2X_ERR("Setup leading failed!\n");
228241eb 6365 goto load_stop_netif;
34f80b04 6366 }
a2fbb9ea 6367
34f80b04
EG
6368 if (CHIP_IS_E1H(bp))
6369 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6370 BNX2X_ERR("!!! mf_cfg function disabled\n");
6371 bp->state = BNX2X_STATE_DISABLED;
6372 }
a2fbb9ea 6373
34f80b04
EG
6374 if (bp->state == BNX2X_STATE_OPEN)
6375 for_each_nondefault_queue(bp, i) {
6376 rc = bnx2x_setup_multi(bp, i);
6377 if (rc)
6378 goto load_stop_netif;
6379 }
a2fbb9ea 6380
34f80b04
EG
6381 if (CHIP_IS_E1(bp))
6382 bnx2x_set_mac_addr_e1(bp);
6383 else
6384 bnx2x_set_mac_addr_e1h(bp);
6385
6386 if (bp->port.pmf)
6387 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6388
6389 /* Start fast path */
34f80b04
EG
6390 switch (load_mode) {
6391 case LOAD_NORMAL:
6392 /* Tx queue should be only reenabled */
6393 netif_wake_queue(bp->dev);
6394 bnx2x_set_rx_mode(bp->dev);
6395 break;
6396
6397 case LOAD_OPEN:
a2fbb9ea 6398 netif_start_queue(bp->dev);
34f80b04 6399 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6400 if (bp->flags & USING_MSIX_FLAG)
6401 printk(KERN_INFO PFX "%s: using MSI-X\n",
6402 bp->dev->name);
34f80b04 6403 break;
a2fbb9ea 6404
34f80b04 6405 case LOAD_DIAG:
a2fbb9ea 6406 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6407 bp->state = BNX2X_STATE_DIAG;
6408 break;
6409
6410 default:
6411 break;
a2fbb9ea
ET
6412 }
6413
34f80b04
EG
6414 if (!bp->port.pmf)
6415 bnx2x__link_status_update(bp);
6416
a2fbb9ea
ET
6417 /* start the timer */
6418 mod_timer(&bp->timer, jiffies + bp->current_interval);
6419
34f80b04 6420
a2fbb9ea
ET
6421 return 0;
6422
228241eb 6423load_stop_netif:
a2fbb9ea
ET
6424 for_each_queue(bp, i)
6425 napi_disable(&bnx2x_fp(bp, i, napi));
6426
228241eb 6427load_int_disable:
615f8fd9 6428 bnx2x_int_disable_sync(bp);
a2fbb9ea 6429
34f80b04 6430 /* Release IRQs */
a2fbb9ea
ET
6431 bnx2x_free_irq(bp);
6432
7a9b2557
VZ
6433 /* Free SKBs, SGEs, TPA pool and driver internals */
6434 bnx2x_free_skbs(bp);
6435 for_each_queue(bp, i)
6436 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6437 RX_SGE_CNT*NUM_RX_SGE_PAGES);
228241eb 6438load_error:
a2fbb9ea
ET
6439 bnx2x_free_mem(bp);
6440
6441 /* TBD we really need to reset the chip
6442 if we want to recover from this */
34f80b04 6443 return rc;
a2fbb9ea
ET
6444}
6445
6446static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6447{
a2fbb9ea
ET
6448 int rc;
6449
c14423fe 6450 /* halt the connection */
a2fbb9ea
ET
6451 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6452 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6453
34f80b04 6454 /* Wait for completion */
a2fbb9ea 6455 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6456 &(bp->fp[index].state), 1);
c14423fe 6457 if (rc) /* timeout */
a2fbb9ea
ET
6458 return rc;
6459
6460 /* delete cfc entry */
6461 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6462
34f80b04
EG
6463 /* Wait for completion */
6464 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6465 &(bp->fp[index].state), 1);
6466 return rc;
a2fbb9ea
ET
6467}
6468
da5a662a 6469static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6470{
49d66772 6471 u16 dsb_sp_prod_idx;
c14423fe 6472 /* if the other port is handling traffic,
a2fbb9ea 6473 this can take a lot of time */
34f80b04
EG
6474 int cnt = 500;
6475 int rc;
a2fbb9ea
ET
6476
6477 might_sleep();
6478
6479 /* Send HALT ramrod */
6480 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6481 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6482
34f80b04
EG
6483 /* Wait for completion */
6484 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6485 &(bp->fp[0].state), 1);
6486 if (rc) /* timeout */
da5a662a 6487 return rc;
a2fbb9ea 6488
49d66772 6489 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6490
228241eb 6491 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6492 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6493
49d66772 6494 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6495 we are going to reset the chip anyway
6496 so there is not much to do if this times out
6497 */
34f80b04 6498 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
49d66772 6499 msleep(1);
34f80b04
EG
6500 if (!cnt) {
6501 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6502 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6503 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6504#ifdef BNX2X_STOP_ON_ERROR
6505 bnx2x_panic();
da5a662a
VZ
6506#else
6507 rc = -EBUSY;
34f80b04
EG
6508#endif
6509 break;
6510 }
6511 cnt--;
da5a662a 6512 msleep(1);
49d66772
ET
6513 }
6514 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6515 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6516
6517 return rc;
a2fbb9ea
ET
6518}
6519
34f80b04
EG
6520static void bnx2x_reset_func(struct bnx2x *bp)
6521{
6522 int port = BP_PORT(bp);
6523 int func = BP_FUNC(bp);
6524 int base, i;
6525
6526 /* Configure IGU */
6527 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6528 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6529
6530 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6531
6532 /* Clear ILT */
6533 base = FUNC_ILT_BASE(func);
6534 for (i = base; i < base + ILT_PER_FUNC; i++)
6535 bnx2x_ilt_wr(bp, i, 0);
6536}
6537
6538static void bnx2x_reset_port(struct bnx2x *bp)
6539{
6540 int port = BP_PORT(bp);
6541 u32 val;
6542
6543 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6544
6545 /* Do not rcv packets to BRB */
6546 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6547 /* Do not direct rcv packets that are not for MCP to the BRB */
6548 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6549 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6550
6551 /* Configure AEU */
6552 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6553
6554 msleep(100);
6555 /* Check for BRB port occupancy */
6556 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6557 if (val)
6558 DP(NETIF_MSG_IFDOWN,
6559 "BRB1 is not empty %d blooks are occupied\n", val);
6560
6561 /* TODO: Close Doorbell port? */
6562}
6563
6564static void bnx2x_reset_common(struct bnx2x *bp)
6565{
6566 /* reset_common */
6567 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6568 0xd3ffff7f);
6569 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6570}
6571
6572static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6573{
6574 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6575 BP_FUNC(bp), reset_code);
6576
6577 switch (reset_code) {
6578 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6579 bnx2x_reset_port(bp);
6580 bnx2x_reset_func(bp);
6581 bnx2x_reset_common(bp);
6582 break;
6583
6584 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6585 bnx2x_reset_port(bp);
6586 bnx2x_reset_func(bp);
6587 break;
6588
6589 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6590 bnx2x_reset_func(bp);
6591 break;
49d66772 6592
34f80b04
EG
6593 default:
6594 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6595 break;
6596 }
6597}
6598
6599/* msut be called with rtnl_lock */
6600static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6601{
da5a662a 6602 int port = BP_PORT(bp);
a2fbb9ea 6603 u32 reset_code = 0;
da5a662a 6604 int i, cnt, rc;
a2fbb9ea
ET
6605
6606 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6607
228241eb
ET
6608 bp->rx_mode = BNX2X_RX_MODE_NONE;
6609 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6610
228241eb
ET
6611 if (netif_running(bp->dev)) {
6612 netif_tx_disable(bp->dev);
6613 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6614 }
6615
34f80b04
EG
6616 del_timer_sync(&bp->timer);
6617 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6618 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6619 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6620
da5a662a 6621 /* Wait until tx fast path tasks complete */
228241eb
ET
6622 for_each_queue(bp, i) {
6623 struct bnx2x_fastpath *fp = &bp->fp[i];
6624
34f80b04
EG
6625 cnt = 1000;
6626 smp_rmb();
da5a662a
VZ
6627 while (BNX2X_HAS_TX_WORK(fp)) {
6628
6629 if (!netif_running(bp->dev))
6630 bnx2x_tx_int(fp, 1000);
6631
34f80b04
EG
6632 if (!cnt) {
6633 BNX2X_ERR("timeout waiting for queue[%d]\n",
6634 i);
6635#ifdef BNX2X_STOP_ON_ERROR
6636 bnx2x_panic();
6637 return -EBUSY;
6638#else
6639 break;
6640#endif
6641 }
6642 cnt--;
da5a662a 6643 msleep(1);
34f80b04
EG
6644 smp_rmb();
6645 }
228241eb 6646 }
a2fbb9ea 6647
da5a662a
VZ
6648 /* Give HW time to discard old tx messages */
6649 msleep(1);
a2fbb9ea 6650
228241eb
ET
6651 for_each_queue(bp, i)
6652 napi_disable(&bnx2x_fp(bp, i, napi));
6653 /* Disable interrupts after Tx and Rx are disabled on stack level */
6654 bnx2x_int_disable_sync(bp);
a2fbb9ea 6655
34f80b04
EG
6656 /* Release IRQs */
6657 bnx2x_free_irq(bp);
6658
da5a662a
VZ
6659 if (unload_mode == UNLOAD_NORMAL)
6660 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6661
6662 else if (bp->flags & NO_WOL_FLAG) {
a2fbb9ea 6663 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
da5a662a
VZ
6664 if (CHIP_IS_E1H(bp))
6665 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
228241eb 6666
da5a662a
VZ
6667 } else if (bp->wol) {
6668 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6669 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6670 u32 val;
34f80b04
EG
6671 /* The mac address is written to entries 1-4 to
6672 preserve entry 0 which is used by the PMF */
da5a662a
VZ
6673 u8 entry = (BP_E1HVN(bp) + 1)*8;
6674
a2fbb9ea 6675 val = (mac_addr[0] << 8) | mac_addr[1];
da5a662a 6676 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
a2fbb9ea
ET
6677
6678 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6679 (mac_addr[4] << 8) | mac_addr[5];
da5a662a 6680 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
a2fbb9ea
ET
6681
6682 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6683
a2fbb9ea
ET
6684 } else
6685 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6686
da5a662a
VZ
6687 if (CHIP_IS_E1H(bp))
6688 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6689
34f80b04
EG
6690 /* Close multi and leading connections
6691 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6692 for_each_nondefault_queue(bp, i)
6693 if (bnx2x_stop_multi(bp, i))
228241eb 6694 goto unload_error;
a2fbb9ea 6695
da5a662a
VZ
6696 rc = bnx2x_stop_leading(bp);
6697 if (rc) {
34f80b04 6698 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6699#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6700 return -EBUSY;
da5a662a
VZ
6701#else
6702 goto unload_error;
34f80b04 6703#endif
228241eb
ET
6704 }
6705
6706unload_error:
34f80b04 6707 if (!BP_NOMCP(bp))
228241eb 6708 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6709 else {
6710 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6711 load_count[0], load_count[1], load_count[2]);
6712 load_count[0]--;
da5a662a 6713 load_count[1 + port]--;
34f80b04
EG
6714 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6715 load_count[0], load_count[1], load_count[2]);
6716 if (load_count[0] == 0)
6717 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6718 else if (load_count[1 + port] == 0)
34f80b04
EG
6719 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6720 else
6721 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6722 }
a2fbb9ea 6723
34f80b04
EG
6724 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6725 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6726 bnx2x__link_reset(bp);
a2fbb9ea
ET
6727
6728 /* Reset the chip */
228241eb 6729 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6730
6731 /* Report UNLOAD_DONE to MCP */
34f80b04 6732 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6733 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6734
7a9b2557 6735 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6736 bnx2x_free_skbs(bp);
7a9b2557
VZ
6737 for_each_queue(bp, i)
6738 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6739 RX_SGE_CNT*NUM_RX_SGE_PAGES);
a2fbb9ea
ET
6740 bnx2x_free_mem(bp);
6741
6742 bp->state = BNX2X_STATE_CLOSED;
228241eb 6743
a2fbb9ea
ET
6744 netif_carrier_off(bp->dev);
6745
6746 return 0;
6747}
6748
34f80b04
EG
6749static void bnx2x_reset_task(struct work_struct *work)
6750{
6751 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6752
6753#ifdef BNX2X_STOP_ON_ERROR
6754 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6755 " so reset not done to allow debug dump,\n"
6756 KERN_ERR " you will need to reboot when done\n");
6757 return;
6758#endif
6759
6760 rtnl_lock();
6761
6762 if (!netif_running(bp->dev))
6763 goto reset_task_exit;
6764
6765 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6766 bnx2x_nic_load(bp, LOAD_NORMAL);
6767
6768reset_task_exit:
6769 rtnl_unlock();
6770}
6771
a2fbb9ea
ET
6772/* end of nic load/unload */
6773
6774/* ethtool_ops */
6775
6776/*
6777 * Init service functions
6778 */
6779
34f80b04
EG
6780static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6781{
6782 u32 val;
6783
6784 /* Check if there is any driver already loaded */
6785 val = REG_RD(bp, MISC_REG_UNPREPARED);
6786 if (val == 0x1) {
6787 /* Check if it is the UNDI driver
6788 * UNDI driver initializes CID offset for normal bell to 0x7
6789 */
4a37fb66 6790 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6791 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6792 if (val == 0x7) {
6793 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6794 /* save our func */
34f80b04 6795 int func = BP_FUNC(bp);
da5a662a
VZ
6796 u32 swap_en;
6797 u32 swap_val;
34f80b04
EG
6798
6799 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6800
6801 /* try unload UNDI on port 0 */
6802 bp->func = 0;
da5a662a
VZ
6803 bp->fw_seq =
6804 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6805 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6806 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6807
6808 /* if UNDI is loaded on the other port */
6809 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6810
da5a662a
VZ
6811 /* send "DONE" for previous unload */
6812 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6813
6814 /* unload UNDI on port 1 */
34f80b04 6815 bp->func = 1;
da5a662a
VZ
6816 bp->fw_seq =
6817 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6818 DRV_MSG_SEQ_NUMBER_MASK);
6819 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6820
6821 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6822 }
6823
da5a662a
VZ
6824 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6825 HC_REG_CONFIG_0), 0x1000);
6826
6827 /* close input traffic and wait for it */
6828 /* Do not rcv packets to BRB */
6829 REG_WR(bp,
6830 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6831 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6832 /* Do not direct rcv packets that are not for MCP to
6833 * the BRB */
6834 REG_WR(bp,
6835 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6836 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6837 /* clear AEU */
6838 REG_WR(bp,
6839 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6840 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6841 msleep(10);
6842
6843 /* save NIG port swap info */
6844 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6845 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6846 /* reset device */
6847 REG_WR(bp,
6848 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6849 0xd3ffffff);
34f80b04
EG
6850 REG_WR(bp,
6851 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6852 0x1403);
da5a662a
VZ
6853 /* take the NIG out of reset and restore swap values */
6854 REG_WR(bp,
6855 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6856 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6857 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6858 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6859
6860 /* send unload done to the MCP */
6861 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6862
6863 /* restore our func and fw_seq */
6864 bp->func = func;
6865 bp->fw_seq =
6866 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6867 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6868 }
4a37fb66 6869 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6870 }
6871}
6872
6873static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6874{
6875 u32 val, val2, val3, val4, id;
6876
6877 /* Get the chip revision id and number. */
6878 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6879 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6880 id = ((val & 0xffff) << 16);
6881 val = REG_RD(bp, MISC_REG_CHIP_REV);
6882 id |= ((val & 0xf) << 12);
6883 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6884 id |= ((val & 0xff) << 4);
6885 REG_RD(bp, MISC_REG_BOND_ID);
6886 id |= (val & 0xf);
6887 bp->common.chip_id = id;
6888 bp->link_params.chip_id = bp->common.chip_id;
6889 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6890
6891 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6892 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6893 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6894 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6895 bp->common.flash_size, bp->common.flash_size);
6896
6897 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6898 bp->link_params.shmem_base = bp->common.shmem_base;
6899 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6900
6901 if (!bp->common.shmem_base ||
6902 (bp->common.shmem_base < 0xA0000) ||
6903 (bp->common.shmem_base >= 0xC0000)) {
6904 BNX2X_DEV_INFO("MCP not active\n");
6905 bp->flags |= NO_MCP_FLAG;
6906 return;
6907 }
6908
6909 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6910 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6911 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6912 BNX2X_ERR("BAD MCP validity signature\n");
6913
6914 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6915 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6916
6917 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6918 bp->common.hw_config, bp->common.board);
6919
6920 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6921 SHARED_HW_CFG_LED_MODE_MASK) >>
6922 SHARED_HW_CFG_LED_MODE_SHIFT);
6923
6924 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6925 bp->common.bc_ver = val;
6926 BNX2X_DEV_INFO("bc_ver %X\n", val);
6927 if (val < BNX2X_BC_VER) {
6928 /* for now only warn
6929 * later we might need to enforce this */
6930 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6931 " please upgrade BC\n", BNX2X_BC_VER, val);
6932 }
6933 BNX2X_DEV_INFO("%sWoL Capable\n",
6934 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6935
6936 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6937 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6938 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6939 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6940
6941 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6942 val, val2, val3, val4);
6943}
6944
6945static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6946 u32 switch_cfg)
a2fbb9ea 6947{
34f80b04 6948 int port = BP_PORT(bp);
a2fbb9ea
ET
6949 u32 ext_phy_type;
6950
a2fbb9ea
ET
6951 switch (switch_cfg) {
6952 case SWITCH_CFG_1G:
6953 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6954
c18487ee
YR
6955 ext_phy_type =
6956 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6957 switch (ext_phy_type) {
6958 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6959 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6960 ext_phy_type);
6961
34f80b04
EG
6962 bp->port.supported |= (SUPPORTED_10baseT_Half |
6963 SUPPORTED_10baseT_Full |
6964 SUPPORTED_100baseT_Half |
6965 SUPPORTED_100baseT_Full |
6966 SUPPORTED_1000baseT_Full |
6967 SUPPORTED_2500baseX_Full |
6968 SUPPORTED_TP |
6969 SUPPORTED_FIBRE |
6970 SUPPORTED_Autoneg |
6971 SUPPORTED_Pause |
6972 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6973 break;
6974
6975 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6976 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6977 ext_phy_type);
6978
34f80b04
EG
6979 bp->port.supported |= (SUPPORTED_10baseT_Half |
6980 SUPPORTED_10baseT_Full |
6981 SUPPORTED_100baseT_Half |
6982 SUPPORTED_100baseT_Full |
6983 SUPPORTED_1000baseT_Full |
6984 SUPPORTED_TP |
6985 SUPPORTED_FIBRE |
6986 SUPPORTED_Autoneg |
6987 SUPPORTED_Pause |
6988 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6989 break;
6990
6991 default:
6992 BNX2X_ERR("NVRAM config error. "
6993 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6994 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6995 return;
6996 }
6997
34f80b04
EG
6998 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6999 port*0x10);
7000 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7001 break;
7002
7003 case SWITCH_CFG_10G:
7004 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7005
c18487ee
YR
7006 ext_phy_type =
7007 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7008 switch (ext_phy_type) {
7009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7010 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7011 ext_phy_type);
7012
34f80b04
EG
7013 bp->port.supported |= (SUPPORTED_10baseT_Half |
7014 SUPPORTED_10baseT_Full |
7015 SUPPORTED_100baseT_Half |
7016 SUPPORTED_100baseT_Full |
7017 SUPPORTED_1000baseT_Full |
7018 SUPPORTED_2500baseX_Full |
7019 SUPPORTED_10000baseT_Full |
7020 SUPPORTED_TP |
7021 SUPPORTED_FIBRE |
7022 SUPPORTED_Autoneg |
7023 SUPPORTED_Pause |
7024 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7025 break;
7026
7027 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7028 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7029 ext_phy_type);
f1410647 7030
34f80b04
EG
7031 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7032 SUPPORTED_FIBRE |
7033 SUPPORTED_Pause |
7034 SUPPORTED_Asym_Pause);
f1410647
ET
7035 break;
7036
a2fbb9ea 7037 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7038 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7039 ext_phy_type);
7040
34f80b04
EG
7041 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7042 SUPPORTED_1000baseT_Full |
7043 SUPPORTED_FIBRE |
7044 SUPPORTED_Pause |
7045 SUPPORTED_Asym_Pause);
f1410647
ET
7046 break;
7047
7048 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7049 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7050 ext_phy_type);
7051
34f80b04
EG
7052 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7053 SUPPORTED_1000baseT_Full |
7054 SUPPORTED_FIBRE |
7055 SUPPORTED_Autoneg |
7056 SUPPORTED_Pause |
7057 SUPPORTED_Asym_Pause);
f1410647
ET
7058 break;
7059
c18487ee
YR
7060 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7061 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7062 ext_phy_type);
7063
34f80b04
EG
7064 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7065 SUPPORTED_2500baseX_Full |
7066 SUPPORTED_1000baseT_Full |
7067 SUPPORTED_FIBRE |
7068 SUPPORTED_Autoneg |
7069 SUPPORTED_Pause |
7070 SUPPORTED_Asym_Pause);
c18487ee
YR
7071 break;
7072
f1410647
ET
7073 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7074 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7075 ext_phy_type);
7076
34f80b04
EG
7077 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7078 SUPPORTED_TP |
7079 SUPPORTED_Autoneg |
7080 SUPPORTED_Pause |
7081 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7082 break;
7083
c18487ee
YR
7084 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7085 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7086 bp->link_params.ext_phy_config);
7087 break;
7088
a2fbb9ea
ET
7089 default:
7090 BNX2X_ERR("NVRAM config error. "
7091 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7092 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7093 return;
7094 }
7095
34f80b04
EG
7096 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7097 port*0x18);
7098 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7099
a2fbb9ea
ET
7100 break;
7101
7102 default:
7103 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7104 bp->port.link_config);
a2fbb9ea
ET
7105 return;
7106 }
34f80b04 7107 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7108
7109 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7110 if (!(bp->link_params.speed_cap_mask &
7111 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7112 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7113
c18487ee
YR
7114 if (!(bp->link_params.speed_cap_mask &
7115 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7116 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7117
c18487ee
YR
7118 if (!(bp->link_params.speed_cap_mask &
7119 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7120 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7121
c18487ee
YR
7122 if (!(bp->link_params.speed_cap_mask &
7123 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7124 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7125
c18487ee
YR
7126 if (!(bp->link_params.speed_cap_mask &
7127 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7128 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7129 SUPPORTED_1000baseT_Full);
a2fbb9ea 7130
c18487ee
YR
7131 if (!(bp->link_params.speed_cap_mask &
7132 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7133 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7134
c18487ee
YR
7135 if (!(bp->link_params.speed_cap_mask &
7136 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7137 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7138
34f80b04 7139 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7140}
7141
34f80b04 7142static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7143{
c18487ee 7144 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7145
34f80b04 7146 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7147 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7148 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7149 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7150 bp->port.advertising = bp->port.supported;
a2fbb9ea 7151 } else {
c18487ee
YR
7152 u32 ext_phy_type =
7153 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7154
7155 if ((ext_phy_type ==
7156 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7157 (ext_phy_type ==
7158 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7159 /* force 10G, no AN */
c18487ee 7160 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7161 bp->port.advertising =
a2fbb9ea
ET
7162 (ADVERTISED_10000baseT_Full |
7163 ADVERTISED_FIBRE);
7164 break;
7165 }
7166 BNX2X_ERR("NVRAM config error. "
7167 "Invalid link_config 0x%x"
7168 " Autoneg not supported\n",
34f80b04 7169 bp->port.link_config);
a2fbb9ea
ET
7170 return;
7171 }
7172 break;
7173
7174 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7175 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7176 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7177 bp->port.advertising = (ADVERTISED_10baseT_Full |
7178 ADVERTISED_TP);
a2fbb9ea
ET
7179 } else {
7180 BNX2X_ERR("NVRAM config error. "
7181 "Invalid link_config 0x%x"
7182 " speed_cap_mask 0x%x\n",
34f80b04 7183 bp->port.link_config,
c18487ee 7184 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7185 return;
7186 }
7187 break;
7188
7189 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7190 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7191 bp->link_params.req_line_speed = SPEED_10;
7192 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7193 bp->port.advertising = (ADVERTISED_10baseT_Half |
7194 ADVERTISED_TP);
a2fbb9ea
ET
7195 } else {
7196 BNX2X_ERR("NVRAM config error. "
7197 "Invalid link_config 0x%x"
7198 " speed_cap_mask 0x%x\n",
34f80b04 7199 bp->port.link_config,
c18487ee 7200 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7201 return;
7202 }
7203 break;
7204
7205 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7206 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7207 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7208 bp->port.advertising = (ADVERTISED_100baseT_Full |
7209 ADVERTISED_TP);
a2fbb9ea
ET
7210 } else {
7211 BNX2X_ERR("NVRAM config error. "
7212 "Invalid link_config 0x%x"
7213 " speed_cap_mask 0x%x\n",
34f80b04 7214 bp->port.link_config,
c18487ee 7215 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7216 return;
7217 }
7218 break;
7219
7220 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7221 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7222 bp->link_params.req_line_speed = SPEED_100;
7223 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7224 bp->port.advertising = (ADVERTISED_100baseT_Half |
7225 ADVERTISED_TP);
a2fbb9ea
ET
7226 } else {
7227 BNX2X_ERR("NVRAM config error. "
7228 "Invalid link_config 0x%x"
7229 " speed_cap_mask 0x%x\n",
34f80b04 7230 bp->port.link_config,
c18487ee 7231 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7232 return;
7233 }
7234 break;
7235
7236 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7237 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7238 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7239 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7240 ADVERTISED_TP);
a2fbb9ea
ET
7241 } else {
7242 BNX2X_ERR("NVRAM config error. "
7243 "Invalid link_config 0x%x"
7244 " speed_cap_mask 0x%x\n",
34f80b04 7245 bp->port.link_config,
c18487ee 7246 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7247 return;
7248 }
7249 break;
7250
7251 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7252 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7253 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7254 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7255 ADVERTISED_TP);
a2fbb9ea
ET
7256 } else {
7257 BNX2X_ERR("NVRAM config error. "
7258 "Invalid link_config 0x%x"
7259 " speed_cap_mask 0x%x\n",
34f80b04 7260 bp->port.link_config,
c18487ee 7261 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7262 return;
7263 }
7264 break;
7265
7266 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7267 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7268 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7269 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7270 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7271 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7272 ADVERTISED_FIBRE);
a2fbb9ea
ET
7273 } else {
7274 BNX2X_ERR("NVRAM config error. "
7275 "Invalid link_config 0x%x"
7276 " speed_cap_mask 0x%x\n",
34f80b04 7277 bp->port.link_config,
c18487ee 7278 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7279 return;
7280 }
7281 break;
7282
7283 default:
7284 BNX2X_ERR("NVRAM config error. "
7285 "BAD link speed link_config 0x%x\n",
34f80b04 7286 bp->port.link_config);
c18487ee 7287 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7288 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7289 break;
7290 }
a2fbb9ea 7291
34f80b04
EG
7292 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7293 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7294 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7295 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7296 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7297
c18487ee 7298 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7299 " advertising 0x%x\n",
c18487ee
YR
7300 bp->link_params.req_line_speed,
7301 bp->link_params.req_duplex,
34f80b04 7302 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7303}
7304
34f80b04 7305static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7306{
34f80b04
EG
7307 int port = BP_PORT(bp);
7308 u32 val, val2;
a2fbb9ea 7309
c18487ee 7310 bp->link_params.bp = bp;
34f80b04 7311 bp->link_params.port = port;
c18487ee 7312
c18487ee 7313 bp->link_params.serdes_config =
f1410647 7314 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7315 bp->link_params.lane_config =
a2fbb9ea 7316 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7317 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7318 SHMEM_RD(bp,
7319 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7320 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7321 SHMEM_RD(bp,
7322 dev_info.port_hw_config[port].speed_capability_mask);
7323
34f80b04 7324 bp->port.link_config =
a2fbb9ea
ET
7325 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7326
34f80b04
EG
7327 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7328 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7329 " link_config 0x%08x\n",
c18487ee
YR
7330 bp->link_params.serdes_config,
7331 bp->link_params.lane_config,
7332 bp->link_params.ext_phy_config,
34f80b04 7333 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7334
34f80b04 7335 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7336 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7337 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7338
7339 bnx2x_link_settings_requested(bp);
7340
7341 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7342 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7343 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7344 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7345 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7346 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7347 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7348 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7349 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7350 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7351}
7352
7353static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7354{
7355 int func = BP_FUNC(bp);
7356 u32 val, val2;
7357 int rc = 0;
a2fbb9ea 7358
34f80b04 7359 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7360
34f80b04
EG
7361 bp->e1hov = 0;
7362 bp->e1hmf = 0;
7363 if (CHIP_IS_E1H(bp)) {
7364 bp->mf_config =
7365 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7366
34f80b04
EG
7367 val =
7368 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7369 FUNC_MF_CFG_E1HOV_TAG_MASK);
7370 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7371
34f80b04
EG
7372 bp->e1hov = val;
7373 bp->e1hmf = 1;
7374 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7375 "(0x%04x)\n",
7376 func, bp->e1hov, bp->e1hov);
7377 } else {
7378 BNX2X_DEV_INFO("Single function mode\n");
7379 if (BP_E1HVN(bp)) {
7380 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7381 " aborting\n", func);
7382 rc = -EPERM;
7383 }
7384 }
7385 }
a2fbb9ea 7386
34f80b04
EG
7387 if (!BP_NOMCP(bp)) {
7388 bnx2x_get_port_hwinfo(bp);
7389
7390 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7391 DRV_MSG_SEQ_NUMBER_MASK);
7392 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7393 }
7394
7395 if (IS_E1HMF(bp)) {
7396 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7397 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7398 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7399 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7400 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7401 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7402 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7403 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7404 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7405 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7406 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7407 ETH_ALEN);
7408 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7409 ETH_ALEN);
a2fbb9ea 7410 }
34f80b04
EG
7411
7412 return rc;
a2fbb9ea
ET
7413 }
7414
34f80b04
EG
7415 if (BP_NOMCP(bp)) {
7416 /* only supposed to happen on emulation/FPGA */
7417 BNX2X_ERR("warning rendom MAC workaround active\n");
7418 random_ether_addr(bp->dev->dev_addr);
7419 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7420 }
a2fbb9ea 7421
34f80b04
EG
7422 return rc;
7423}
7424
7425static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7426{
7427 int func = BP_FUNC(bp);
7428 int rc;
7429
da5a662a
VZ
7430 /* Disable interrupt handling until HW is initialized */
7431 atomic_set(&bp->intr_sem, 1);
7432
34f80b04 7433 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7434
34f80b04
EG
7435 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7436 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7437
7438 rc = bnx2x_get_hwinfo(bp);
7439
7440 /* need to reset chip if undi was active */
7441 if (!BP_NOMCP(bp))
7442 bnx2x_undi_unload(bp);
7443
7444 if (CHIP_REV_IS_FPGA(bp))
7445 printk(KERN_ERR PFX "FPGA detected\n");
7446
7447 if (BP_NOMCP(bp) && (func == 0))
7448 printk(KERN_ERR PFX
7449 "MCP disabled, must load devices in order!\n");
7450
7a9b2557
VZ
7451 /* Set TPA flags */
7452 if (disable_tpa) {
7453 bp->flags &= ~TPA_ENABLE_FLAG;
7454 bp->dev->features &= ~NETIF_F_LRO;
7455 } else {
7456 bp->flags |= TPA_ENABLE_FLAG;
7457 bp->dev->features |= NETIF_F_LRO;
7458 }
7459
7460
34f80b04
EG
7461 bp->tx_ring_size = MAX_TX_AVAIL;
7462 bp->rx_ring_size = MAX_RX_AVAIL;
7463
7464 bp->rx_csum = 1;
7465 bp->rx_offset = 0;
7466
7467 bp->tx_ticks = 50;
7468 bp->rx_ticks = 25;
7469
34f80b04
EG
7470 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7471 bp->current_interval = (poll ? poll : bp->timer_interval);
7472
7473 init_timer(&bp->timer);
7474 bp->timer.expires = jiffies + bp->current_interval;
7475 bp->timer.data = (unsigned long) bp;
7476 bp->timer.function = bnx2x_timer;
7477
7478 return rc;
a2fbb9ea
ET
7479}
7480
7481/*
7482 * ethtool service functions
7483 */
7484
7485/* All ethtool functions called with rtnl_lock */
7486
7487static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7488{
7489 struct bnx2x *bp = netdev_priv(dev);
7490
34f80b04
EG
7491 cmd->supported = bp->port.supported;
7492 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7493
7494 if (netif_carrier_ok(dev)) {
c18487ee
YR
7495 cmd->speed = bp->link_vars.line_speed;
7496 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7497 } else {
c18487ee
YR
7498 cmd->speed = bp->link_params.req_line_speed;
7499 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7500 }
34f80b04
EG
7501 if (IS_E1HMF(bp)) {
7502 u16 vn_max_rate;
7503
7504 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7505 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7506 if (vn_max_rate < cmd->speed)
7507 cmd->speed = vn_max_rate;
7508 }
a2fbb9ea 7509
c18487ee
YR
7510 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7511 u32 ext_phy_type =
7512 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7513
7514 switch (ext_phy_type) {
7515 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7516 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7517 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7518 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7519 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7520 cmd->port = PORT_FIBRE;
7521 break;
7522
7523 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7524 cmd->port = PORT_TP;
7525 break;
7526
c18487ee
YR
7527 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7528 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7529 bp->link_params.ext_phy_config);
7530 break;
7531
f1410647
ET
7532 default:
7533 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7534 bp->link_params.ext_phy_config);
7535 break;
f1410647
ET
7536 }
7537 } else
a2fbb9ea 7538 cmd->port = PORT_TP;
a2fbb9ea 7539
34f80b04 7540 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7541 cmd->transceiver = XCVR_INTERNAL;
7542
c18487ee 7543 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7544 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7545 else
a2fbb9ea 7546 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7547
7548 cmd->maxtxpkt = 0;
7549 cmd->maxrxpkt = 0;
7550
7551 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7552 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7553 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7554 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7555 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7556 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7557 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7558
7559 return 0;
7560}
7561
7562static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7563{
7564 struct bnx2x *bp = netdev_priv(dev);
7565 u32 advertising;
7566
34f80b04
EG
7567 if (IS_E1HMF(bp))
7568 return 0;
7569
a2fbb9ea
ET
7570 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7571 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7572 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7573 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7574 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7575 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7576 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7577
a2fbb9ea 7578 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7579 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7580 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7581 return -EINVAL;
f1410647 7582 }
a2fbb9ea
ET
7583
7584 /* advertise the requested speed and duplex if supported */
34f80b04 7585 cmd->advertising &= bp->port.supported;
a2fbb9ea 7586
c18487ee
YR
7587 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7588 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7589 bp->port.advertising |= (ADVERTISED_Autoneg |
7590 cmd->advertising);
a2fbb9ea
ET
7591
7592 } else { /* forced speed */
7593 /* advertise the requested speed and duplex if supported */
7594 switch (cmd->speed) {
7595 case SPEED_10:
7596 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7597 if (!(bp->port.supported &
f1410647
ET
7598 SUPPORTED_10baseT_Full)) {
7599 DP(NETIF_MSG_LINK,
7600 "10M full not supported\n");
a2fbb9ea 7601 return -EINVAL;
f1410647 7602 }
a2fbb9ea
ET
7603
7604 advertising = (ADVERTISED_10baseT_Full |
7605 ADVERTISED_TP);
7606 } else {
34f80b04 7607 if (!(bp->port.supported &
f1410647
ET
7608 SUPPORTED_10baseT_Half)) {
7609 DP(NETIF_MSG_LINK,
7610 "10M half not supported\n");
a2fbb9ea 7611 return -EINVAL;
f1410647 7612 }
a2fbb9ea
ET
7613
7614 advertising = (ADVERTISED_10baseT_Half |
7615 ADVERTISED_TP);
7616 }
7617 break;
7618
7619 case SPEED_100:
7620 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7621 if (!(bp->port.supported &
f1410647
ET
7622 SUPPORTED_100baseT_Full)) {
7623 DP(NETIF_MSG_LINK,
7624 "100M full not supported\n");
a2fbb9ea 7625 return -EINVAL;
f1410647 7626 }
a2fbb9ea
ET
7627
7628 advertising = (ADVERTISED_100baseT_Full |
7629 ADVERTISED_TP);
7630 } else {
34f80b04 7631 if (!(bp->port.supported &
f1410647
ET
7632 SUPPORTED_100baseT_Half)) {
7633 DP(NETIF_MSG_LINK,
7634 "100M half not supported\n");
a2fbb9ea 7635 return -EINVAL;
f1410647 7636 }
a2fbb9ea
ET
7637
7638 advertising = (ADVERTISED_100baseT_Half |
7639 ADVERTISED_TP);
7640 }
7641 break;
7642
7643 case SPEED_1000:
f1410647
ET
7644 if (cmd->duplex != DUPLEX_FULL) {
7645 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7646 return -EINVAL;
f1410647 7647 }
a2fbb9ea 7648
34f80b04 7649 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7650 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7651 return -EINVAL;
f1410647 7652 }
a2fbb9ea
ET
7653
7654 advertising = (ADVERTISED_1000baseT_Full |
7655 ADVERTISED_TP);
7656 break;
7657
7658 case SPEED_2500:
f1410647
ET
7659 if (cmd->duplex != DUPLEX_FULL) {
7660 DP(NETIF_MSG_LINK,
7661 "2.5G half not supported\n");
a2fbb9ea 7662 return -EINVAL;
f1410647 7663 }
a2fbb9ea 7664
34f80b04 7665 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7666 DP(NETIF_MSG_LINK,
7667 "2.5G full not supported\n");
a2fbb9ea 7668 return -EINVAL;
f1410647 7669 }
a2fbb9ea 7670
f1410647 7671 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7672 ADVERTISED_TP);
7673 break;
7674
7675 case SPEED_10000:
f1410647
ET
7676 if (cmd->duplex != DUPLEX_FULL) {
7677 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7678 return -EINVAL;
f1410647 7679 }
a2fbb9ea 7680
34f80b04 7681 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7682 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7683 return -EINVAL;
f1410647 7684 }
a2fbb9ea
ET
7685
7686 advertising = (ADVERTISED_10000baseT_Full |
7687 ADVERTISED_FIBRE);
7688 break;
7689
7690 default:
f1410647 7691 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7692 return -EINVAL;
7693 }
7694
c18487ee
YR
7695 bp->link_params.req_line_speed = cmd->speed;
7696 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7697 bp->port.advertising = advertising;
a2fbb9ea
ET
7698 }
7699
c18487ee 7700 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7701 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7702 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7703 bp->port.advertising);
a2fbb9ea 7704
34f80b04 7705 if (netif_running(dev)) {
bb2a0f7a 7706 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7707 bnx2x_link_set(bp);
7708 }
a2fbb9ea
ET
7709
7710 return 0;
7711}
7712
c18487ee
YR
7713#define PHY_FW_VER_LEN 10
7714
a2fbb9ea
ET
7715static void bnx2x_get_drvinfo(struct net_device *dev,
7716 struct ethtool_drvinfo *info)
7717{
7718 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7719 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7720
7721 strcpy(info->driver, DRV_MODULE_NAME);
7722 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7723
7724 phy_fw_ver[0] = '\0';
34f80b04 7725 if (bp->port.pmf) {
4a37fb66 7726 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7727 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7728 (bp->state != BNX2X_STATE_CLOSED),
7729 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7730 bnx2x_release_phy_lock(bp);
34f80b04 7731 }
c18487ee
YR
7732
7733 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7734 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7735 BCM_5710_FW_REVISION_VERSION,
34f80b04 7736 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7737 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7738 strcpy(info->bus_info, pci_name(bp->pdev));
7739 info->n_stats = BNX2X_NUM_STATS;
7740 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7741 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7742 info->regdump_len = 0;
7743}
7744
7745static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7746{
7747 struct bnx2x *bp = netdev_priv(dev);
7748
7749 if (bp->flags & NO_WOL_FLAG) {
7750 wol->supported = 0;
7751 wol->wolopts = 0;
7752 } else {
7753 wol->supported = WAKE_MAGIC;
7754 if (bp->wol)
7755 wol->wolopts = WAKE_MAGIC;
7756 else
7757 wol->wolopts = 0;
7758 }
7759 memset(&wol->sopass, 0, sizeof(wol->sopass));
7760}
7761
7762static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7763{
7764 struct bnx2x *bp = netdev_priv(dev);
7765
7766 if (wol->wolopts & ~WAKE_MAGIC)
7767 return -EINVAL;
7768
7769 if (wol->wolopts & WAKE_MAGIC) {
7770 if (bp->flags & NO_WOL_FLAG)
7771 return -EINVAL;
7772
7773 bp->wol = 1;
34f80b04 7774 } else
a2fbb9ea 7775 bp->wol = 0;
34f80b04 7776
a2fbb9ea
ET
7777 return 0;
7778}
7779
7780static u32 bnx2x_get_msglevel(struct net_device *dev)
7781{
7782 struct bnx2x *bp = netdev_priv(dev);
7783
7784 return bp->msglevel;
7785}
7786
7787static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7788{
7789 struct bnx2x *bp = netdev_priv(dev);
7790
7791 if (capable(CAP_NET_ADMIN))
7792 bp->msglevel = level;
7793}
7794
7795static int bnx2x_nway_reset(struct net_device *dev)
7796{
7797 struct bnx2x *bp = netdev_priv(dev);
7798
34f80b04
EG
7799 if (!bp->port.pmf)
7800 return 0;
a2fbb9ea 7801
34f80b04 7802 if (netif_running(dev)) {
bb2a0f7a 7803 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7804 bnx2x_link_set(bp);
7805 }
a2fbb9ea
ET
7806
7807 return 0;
7808}
7809
7810static int bnx2x_get_eeprom_len(struct net_device *dev)
7811{
7812 struct bnx2x *bp = netdev_priv(dev);
7813
34f80b04 7814 return bp->common.flash_size;
a2fbb9ea
ET
7815}
7816
7817static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7818{
34f80b04 7819 int port = BP_PORT(bp);
a2fbb9ea
ET
7820 int count, i;
7821 u32 val = 0;
7822
7823 /* adjust timeout for emulation/FPGA */
7824 count = NVRAM_TIMEOUT_COUNT;
7825 if (CHIP_REV_IS_SLOW(bp))
7826 count *= 100;
7827
7828 /* request access to nvram interface */
7829 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7830 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7831
7832 for (i = 0; i < count*10; i++) {
7833 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7834 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7835 break;
7836
7837 udelay(5);
7838 }
7839
7840 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7841 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7842 return -EBUSY;
7843 }
7844
7845 return 0;
7846}
7847
7848static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7849{
34f80b04 7850 int port = BP_PORT(bp);
a2fbb9ea
ET
7851 int count, i;
7852 u32 val = 0;
7853
7854 /* adjust timeout for emulation/FPGA */
7855 count = NVRAM_TIMEOUT_COUNT;
7856 if (CHIP_REV_IS_SLOW(bp))
7857 count *= 100;
7858
7859 /* relinquish nvram interface */
7860 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7861 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7862
7863 for (i = 0; i < count*10; i++) {
7864 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7865 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7866 break;
7867
7868 udelay(5);
7869 }
7870
7871 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7872 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7873 return -EBUSY;
7874 }
7875
7876 return 0;
7877}
7878
7879static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7880{
7881 u32 val;
7882
7883 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7884
7885 /* enable both bits, even on read */
7886 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7887 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7888 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7889}
7890
7891static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7892{
7893 u32 val;
7894
7895 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7896
7897 /* disable both bits, even after read */
7898 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7899 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7900 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7901}
7902
7903static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7904 u32 cmd_flags)
7905{
f1410647 7906 int count, i, rc;
a2fbb9ea
ET
7907 u32 val;
7908
7909 /* build the command word */
7910 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7911
7912 /* need to clear DONE bit separately */
7913 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7914
7915 /* address of the NVRAM to read from */
7916 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7917 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7918
7919 /* issue a read command */
7920 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7921
7922 /* adjust timeout for emulation/FPGA */
7923 count = NVRAM_TIMEOUT_COUNT;
7924 if (CHIP_REV_IS_SLOW(bp))
7925 count *= 100;
7926
7927 /* wait for completion */
7928 *ret_val = 0;
7929 rc = -EBUSY;
7930 for (i = 0; i < count; i++) {
7931 udelay(5);
7932 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7933
7934 if (val & MCPR_NVM_COMMAND_DONE) {
7935 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7936 /* we read nvram data in cpu order
7937 * but ethtool sees it as an array of bytes
7938 * converting to big-endian will do the work */
7939 val = cpu_to_be32(val);
7940 *ret_val = val;
7941 rc = 0;
7942 break;
7943 }
7944 }
7945
7946 return rc;
7947}
7948
7949static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7950 int buf_size)
7951{
7952 int rc;
7953 u32 cmd_flags;
7954 u32 val;
7955
7956 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7957 DP(BNX2X_MSG_NVM,
c14423fe 7958 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7959 offset, buf_size);
7960 return -EINVAL;
7961 }
7962
34f80b04
EG
7963 if (offset + buf_size > bp->common.flash_size) {
7964 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7965 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7966 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7967 return -EINVAL;
7968 }
7969
7970 /* request access to nvram interface */
7971 rc = bnx2x_acquire_nvram_lock(bp);
7972 if (rc)
7973 return rc;
7974
7975 /* enable access to nvram interface */
7976 bnx2x_enable_nvram_access(bp);
7977
7978 /* read the first word(s) */
7979 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7980 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7981 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7982 memcpy(ret_buf, &val, 4);
7983
7984 /* advance to the next dword */
7985 offset += sizeof(u32);
7986 ret_buf += sizeof(u32);
7987 buf_size -= sizeof(u32);
7988 cmd_flags = 0;
7989 }
7990
7991 if (rc == 0) {
7992 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7993 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7994 memcpy(ret_buf, &val, 4);
7995 }
7996
7997 /* disable access to nvram interface */
7998 bnx2x_disable_nvram_access(bp);
7999 bnx2x_release_nvram_lock(bp);
8000
8001 return rc;
8002}
8003
8004static int bnx2x_get_eeprom(struct net_device *dev,
8005 struct ethtool_eeprom *eeprom, u8 *eebuf)
8006{
8007 struct bnx2x *bp = netdev_priv(dev);
8008 int rc;
8009
34f80b04 8010 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8011 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8012 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8013 eeprom->len, eeprom->len);
8014
8015 /* parameters already validated in ethtool_get_eeprom */
8016
8017 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8018
8019 return rc;
8020}
8021
8022static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8023 u32 cmd_flags)
8024{
f1410647 8025 int count, i, rc;
a2fbb9ea
ET
8026
8027 /* build the command word */
8028 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8029
8030 /* need to clear DONE bit separately */
8031 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8032
8033 /* write the data */
8034 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8035
8036 /* address of the NVRAM to write to */
8037 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8038 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8039
8040 /* issue the write command */
8041 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8042
8043 /* adjust timeout for emulation/FPGA */
8044 count = NVRAM_TIMEOUT_COUNT;
8045 if (CHIP_REV_IS_SLOW(bp))
8046 count *= 100;
8047
8048 /* wait for completion */
8049 rc = -EBUSY;
8050 for (i = 0; i < count; i++) {
8051 udelay(5);
8052 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8053 if (val & MCPR_NVM_COMMAND_DONE) {
8054 rc = 0;
8055 break;
8056 }
8057 }
8058
8059 return rc;
8060}
8061
f1410647 8062#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8063
8064static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8065 int buf_size)
8066{
8067 int rc;
8068 u32 cmd_flags;
8069 u32 align_offset;
8070 u32 val;
8071
34f80b04
EG
8072 if (offset + buf_size > bp->common.flash_size) {
8073 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8074 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8075 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8076 return -EINVAL;
8077 }
8078
8079 /* request access to nvram interface */
8080 rc = bnx2x_acquire_nvram_lock(bp);
8081 if (rc)
8082 return rc;
8083
8084 /* enable access to nvram interface */
8085 bnx2x_enable_nvram_access(bp);
8086
8087 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8088 align_offset = (offset & ~0x03);
8089 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8090
8091 if (rc == 0) {
8092 val &= ~(0xff << BYTE_OFFSET(offset));
8093 val |= (*data_buf << BYTE_OFFSET(offset));
8094
8095 /* nvram data is returned as an array of bytes
8096 * convert it back to cpu order */
8097 val = be32_to_cpu(val);
8098
a2fbb9ea
ET
8099 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8100 cmd_flags);
8101 }
8102
8103 /* disable access to nvram interface */
8104 bnx2x_disable_nvram_access(bp);
8105 bnx2x_release_nvram_lock(bp);
8106
8107 return rc;
8108}
8109
8110static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8111 int buf_size)
8112{
8113 int rc;
8114 u32 cmd_flags;
8115 u32 val;
8116 u32 written_so_far;
8117
34f80b04 8118 if (buf_size == 1) /* ethtool */
a2fbb9ea 8119 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8120
8121 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8122 DP(BNX2X_MSG_NVM,
c14423fe 8123 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8124 offset, buf_size);
8125 return -EINVAL;
8126 }
8127
34f80b04
EG
8128 if (offset + buf_size > bp->common.flash_size) {
8129 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8130 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8131 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8132 return -EINVAL;
8133 }
8134
8135 /* request access to nvram interface */
8136 rc = bnx2x_acquire_nvram_lock(bp);
8137 if (rc)
8138 return rc;
8139
8140 /* enable access to nvram interface */
8141 bnx2x_enable_nvram_access(bp);
8142
8143 written_so_far = 0;
8144 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8145 while ((written_so_far < buf_size) && (rc == 0)) {
8146 if (written_so_far == (buf_size - sizeof(u32)))
8147 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8148 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8149 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8150 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8151 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8152
8153 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8154
8155 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8156
8157 /* advance to the next dword */
8158 offset += sizeof(u32);
8159 data_buf += sizeof(u32);
8160 written_so_far += sizeof(u32);
8161 cmd_flags = 0;
8162 }
8163
8164 /* disable access to nvram interface */
8165 bnx2x_disable_nvram_access(bp);
8166 bnx2x_release_nvram_lock(bp);
8167
8168 return rc;
8169}
8170
8171static int bnx2x_set_eeprom(struct net_device *dev,
8172 struct ethtool_eeprom *eeprom, u8 *eebuf)
8173{
8174 struct bnx2x *bp = netdev_priv(dev);
8175 int rc;
8176
34f80b04 8177 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8178 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8179 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8180 eeprom->len, eeprom->len);
8181
8182 /* parameters already validated in ethtool_set_eeprom */
8183
c18487ee 8184 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8185 if (eeprom->magic == 0x00504859)
8186 if (bp->port.pmf) {
8187
4a37fb66 8188 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8189 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8190 bp->link_params.ext_phy_config,
8191 (bp->state != BNX2X_STATE_CLOSED),
8192 eebuf, eeprom->len);
bb2a0f7a
YG
8193 if ((bp->state == BNX2X_STATE_OPEN) ||
8194 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8195 rc |= bnx2x_link_reset(&bp->link_params,
8196 &bp->link_vars);
8197 rc |= bnx2x_phy_init(&bp->link_params,
8198 &bp->link_vars);
bb2a0f7a 8199 }
4a37fb66 8200 bnx2x_release_phy_lock(bp);
34f80b04
EG
8201
8202 } else /* Only the PMF can access the PHY */
8203 return -EINVAL;
8204 else
c18487ee 8205 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8206
8207 return rc;
8208}
8209
8210static int bnx2x_get_coalesce(struct net_device *dev,
8211 struct ethtool_coalesce *coal)
8212{
8213 struct bnx2x *bp = netdev_priv(dev);
8214
8215 memset(coal, 0, sizeof(struct ethtool_coalesce));
8216
8217 coal->rx_coalesce_usecs = bp->rx_ticks;
8218 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8219
8220 return 0;
8221}
8222
8223static int bnx2x_set_coalesce(struct net_device *dev,
8224 struct ethtool_coalesce *coal)
8225{
8226 struct bnx2x *bp = netdev_priv(dev);
8227
8228 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8229 if (bp->rx_ticks > 3000)
8230 bp->rx_ticks = 3000;
8231
8232 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8233 if (bp->tx_ticks > 0x3000)
8234 bp->tx_ticks = 0x3000;
8235
34f80b04 8236 if (netif_running(dev))
a2fbb9ea
ET
8237 bnx2x_update_coalesce(bp);
8238
8239 return 0;
8240}
8241
7a9b2557
VZ
8242static int bnx2x_set_flags(struct net_device *dev, u32 data)
8243{
8244 struct bnx2x *bp = netdev_priv(dev);
8245 int changed = 0;
8246 int rc = 0;
8247
8248 if (data & ETH_FLAG_LRO) {
8249 if (!(dev->features & NETIF_F_LRO)) {
8250 dev->features |= NETIF_F_LRO;
8251 bp->flags |= TPA_ENABLE_FLAG;
8252 changed = 1;
8253 }
8254
8255 } else if (dev->features & NETIF_F_LRO) {
8256 dev->features &= ~NETIF_F_LRO;
8257 bp->flags &= ~TPA_ENABLE_FLAG;
8258 changed = 1;
8259 }
8260
8261 if (changed && netif_running(dev)) {
8262 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8263 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8264 }
8265
8266 return rc;
8267}
8268
a2fbb9ea
ET
8269static void bnx2x_get_ringparam(struct net_device *dev,
8270 struct ethtool_ringparam *ering)
8271{
8272 struct bnx2x *bp = netdev_priv(dev);
8273
8274 ering->rx_max_pending = MAX_RX_AVAIL;
8275 ering->rx_mini_max_pending = 0;
8276 ering->rx_jumbo_max_pending = 0;
8277
8278 ering->rx_pending = bp->rx_ring_size;
8279 ering->rx_mini_pending = 0;
8280 ering->rx_jumbo_pending = 0;
8281
8282 ering->tx_max_pending = MAX_TX_AVAIL;
8283 ering->tx_pending = bp->tx_ring_size;
8284}
8285
8286static int bnx2x_set_ringparam(struct net_device *dev,
8287 struct ethtool_ringparam *ering)
8288{
8289 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8290 int rc = 0;
a2fbb9ea
ET
8291
8292 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8293 (ering->tx_pending > MAX_TX_AVAIL) ||
8294 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8295 return -EINVAL;
8296
8297 bp->rx_ring_size = ering->rx_pending;
8298 bp->tx_ring_size = ering->tx_pending;
8299
34f80b04
EG
8300 if (netif_running(dev)) {
8301 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8302 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8303 }
8304
34f80b04 8305 return rc;
a2fbb9ea
ET
8306}
8307
8308static void bnx2x_get_pauseparam(struct net_device *dev,
8309 struct ethtool_pauseparam *epause)
8310{
8311 struct bnx2x *bp = netdev_priv(dev);
8312
c18487ee
YR
8313 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8314 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8315
8316 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8317 FLOW_CTRL_RX);
8318 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8319 FLOW_CTRL_TX);
a2fbb9ea
ET
8320
8321 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8322 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8323 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8324}
8325
8326static int bnx2x_set_pauseparam(struct net_device *dev,
8327 struct ethtool_pauseparam *epause)
8328{
8329 struct bnx2x *bp = netdev_priv(dev);
8330
34f80b04
EG
8331 if (IS_E1HMF(bp))
8332 return 0;
8333
a2fbb9ea
ET
8334 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8335 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8336 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8337
c18487ee 8338 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8339
f1410647 8340 if (epause->rx_pause)
c18487ee
YR
8341 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8342
f1410647 8343 if (epause->tx_pause)
c18487ee
YR
8344 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8345
8346 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8347 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8348
c18487ee 8349 if (epause->autoneg) {
34f80b04 8350 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
8351 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8352 return -EINVAL;
8353 }
a2fbb9ea 8354
c18487ee
YR
8355 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8356 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8357 }
a2fbb9ea 8358
c18487ee
YR
8359 DP(NETIF_MSG_LINK,
8360 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8361
8362 if (netif_running(dev)) {
bb2a0f7a 8363 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8364 bnx2x_link_set(bp);
8365 }
a2fbb9ea
ET
8366
8367 return 0;
8368}
8369
8370static u32 bnx2x_get_rx_csum(struct net_device *dev)
8371{
8372 struct bnx2x *bp = netdev_priv(dev);
8373
8374 return bp->rx_csum;
8375}
8376
8377static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8378{
8379 struct bnx2x *bp = netdev_priv(dev);
8380
8381 bp->rx_csum = data;
8382 return 0;
8383}
8384
8385static int bnx2x_set_tso(struct net_device *dev, u32 data)
8386{
755735eb 8387 if (data) {
a2fbb9ea 8388 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8389 dev->features |= NETIF_F_TSO6;
8390 } else {
a2fbb9ea 8391 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8392 dev->features &= ~NETIF_F_TSO6;
8393 }
8394
a2fbb9ea
ET
8395 return 0;
8396}
8397
f3c87cdd 8398static const struct {
a2fbb9ea
ET
8399 char string[ETH_GSTRING_LEN];
8400} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8401 { "register_test (offline)" },
8402 { "memory_test (offline)" },
8403 { "loopback_test (offline)" },
8404 { "nvram_test (online)" },
8405 { "interrupt_test (online)" },
8406 { "link_test (online)" },
8407 { "idle check (online)" },
8408 { "MC errors (online)" }
a2fbb9ea
ET
8409};
8410
8411static int bnx2x_self_test_count(struct net_device *dev)
8412{
8413 return BNX2X_NUM_TESTS;
8414}
8415
f3c87cdd
YG
8416static int bnx2x_test_registers(struct bnx2x *bp)
8417{
8418 int idx, i, rc = -ENODEV;
8419 u32 wr_val = 0;
8420 static const struct {
8421 u32 offset0;
8422 u32 offset1;
8423 u32 mask;
8424 } reg_tbl[] = {
8425/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8426 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8427 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8428 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8429 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8430 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8431 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8432 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8433 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8434 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8435/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8436 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8437 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8438 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8439 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8440 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8441 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8442 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8443 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8444 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8445/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8446 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8447 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8448 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8449 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8450 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8451 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8452 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8453 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8454 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8455/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8456 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8457 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8458 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8459 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8460 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8461 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8462 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8463
8464 { 0xffffffff, 0, 0x00000000 }
8465 };
8466
8467 if (!netif_running(bp->dev))
8468 return rc;
8469
8470 /* Repeat the test twice:
8471 First by writing 0x00000000, second by writing 0xffffffff */
8472 for (idx = 0; idx < 2; idx++) {
8473
8474 switch (idx) {
8475 case 0:
8476 wr_val = 0;
8477 break;
8478 case 1:
8479 wr_val = 0xffffffff;
8480 break;
8481 }
8482
8483 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8484 u32 offset, mask, save_val, val;
8485 int port = BP_PORT(bp);
8486
8487 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8488 mask = reg_tbl[i].mask;
8489
8490 save_val = REG_RD(bp, offset);
8491
8492 REG_WR(bp, offset, wr_val);
8493 val = REG_RD(bp, offset);
8494
8495 /* Restore the original register's value */
8496 REG_WR(bp, offset, save_val);
8497
8498 /* verify that value is as expected value */
8499 if ((val & mask) != (wr_val & mask))
8500 goto test_reg_exit;
8501 }
8502 }
8503
8504 rc = 0;
8505
8506test_reg_exit:
8507 return rc;
8508}
8509
8510static int bnx2x_test_memory(struct bnx2x *bp)
8511{
8512 int i, j, rc = -ENODEV;
8513 u32 val;
8514 static const struct {
8515 u32 offset;
8516 int size;
8517 } mem_tbl[] = {
8518 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8519 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8520 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8521 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8522 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8523 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8524 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8525
8526 { 0xffffffff, 0 }
8527 };
8528 static const struct {
8529 char *name;
8530 u32 offset;
8531 u32 mask;
8532 } prty_tbl[] = {
8533 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8534 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8535 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8536 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8537 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8538 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8539
8540 { NULL, 0xffffffff, 0 }
8541 };
8542
8543 if (!netif_running(bp->dev))
8544 return rc;
8545
8546 /* Go through all the memories */
8547 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8548 for (j = 0; j < mem_tbl[i].size; j++)
8549 REG_RD(bp, mem_tbl[i].offset + j*4);
8550
8551 /* Check the parity status */
8552 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8553 val = REG_RD(bp, prty_tbl[i].offset);
8554 if (val & ~(prty_tbl[i].mask)) {
8555 DP(NETIF_MSG_HW,
8556 "%s is 0x%x\n", prty_tbl[i].name, val);
8557 goto test_mem_exit;
8558 }
8559 }
8560
8561 rc = 0;
8562
8563test_mem_exit:
8564 return rc;
8565}
8566
8567static void bnx2x_netif_start(struct bnx2x *bp)
8568{
8569 int i;
8570
8571 if (atomic_dec_and_test(&bp->intr_sem)) {
8572 if (netif_running(bp->dev)) {
8573 bnx2x_int_enable(bp);
8574 for_each_queue(bp, i)
8575 napi_enable(&bnx2x_fp(bp, i, napi));
8576 if (bp->state == BNX2X_STATE_OPEN)
8577 netif_wake_queue(bp->dev);
8578 }
8579 }
8580}
8581
8582static void bnx2x_netif_stop(struct bnx2x *bp)
8583{
8584 int i;
8585
8586 if (netif_running(bp->dev)) {
8587 netif_tx_disable(bp->dev);
8588 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8589 for_each_queue(bp, i)
8590 napi_disable(&bnx2x_fp(bp, i, napi));
8591 }
8592 bnx2x_int_disable_sync(bp);
8593}
8594
8595static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8596{
8597 int cnt = 1000;
8598
8599 if (link_up)
8600 while (bnx2x_link_test(bp) && cnt--)
8601 msleep(10);
8602}
8603
8604static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8605{
8606 unsigned int pkt_size, num_pkts, i;
8607 struct sk_buff *skb;
8608 unsigned char *packet;
8609 struct bnx2x_fastpath *fp = &bp->fp[0];
8610 u16 tx_start_idx, tx_idx;
8611 u16 rx_start_idx, rx_idx;
8612 u16 pkt_prod;
8613 struct sw_tx_bd *tx_buf;
8614 struct eth_tx_bd *tx_bd;
8615 dma_addr_t mapping;
8616 union eth_rx_cqe *cqe;
8617 u8 cqe_fp_flags;
8618 struct sw_rx_bd *rx_buf;
8619 u16 len;
8620 int rc = -ENODEV;
8621
8622 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8623 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8624 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8625 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8626 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8627
8628 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8629 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8630 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8631 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8632 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8633 /* wait until link state is restored */
8634 bnx2x_wait_for_link(bp, link_up);
8635
8636 } else
8637 return -EINVAL;
8638
8639 pkt_size = 1514;
8640 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8641 if (!skb) {
8642 rc = -ENOMEM;
8643 goto test_loopback_exit;
8644 }
8645 packet = skb_put(skb, pkt_size);
8646 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8647 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8648 for (i = ETH_HLEN; i < pkt_size; i++)
8649 packet[i] = (unsigned char) (i & 0xff);
8650
8651 num_pkts = 0;
8652 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8653 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8654
8655 pkt_prod = fp->tx_pkt_prod++;
8656 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8657 tx_buf->first_bd = fp->tx_bd_prod;
8658 tx_buf->skb = skb;
8659
8660 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8661 mapping = pci_map_single(bp->pdev, skb->data,
8662 skb_headlen(skb), PCI_DMA_TODEVICE);
8663 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8664 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8665 tx_bd->nbd = cpu_to_le16(1);
8666 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8667 tx_bd->vlan = cpu_to_le16(pkt_prod);
8668 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8669 ETH_TX_BD_FLAGS_END_BD);
8670 tx_bd->general_data = ((UNICAST_ADDRESS <<
8671 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8672
8673 fp->hw_tx_prods->bds_prod =
8674 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8675 mb(); /* FW restriction: must not reorder writing nbd and packets */
8676 fp->hw_tx_prods->packets_prod =
8677 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8678 DOORBELL(bp, FP_IDX(fp), 0);
8679
8680 mmiowb();
8681
8682 num_pkts++;
8683 fp->tx_bd_prod++;
8684 bp->dev->trans_start = jiffies;
8685
8686 udelay(100);
8687
8688 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8689 if (tx_idx != tx_start_idx + num_pkts)
8690 goto test_loopback_exit;
8691
8692 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8693 if (rx_idx != rx_start_idx + num_pkts)
8694 goto test_loopback_exit;
8695
8696 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8697 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8698 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8699 goto test_loopback_rx_exit;
8700
8701 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8702 if (len != pkt_size)
8703 goto test_loopback_rx_exit;
8704
8705 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8706 skb = rx_buf->skb;
8707 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8708 for (i = ETH_HLEN; i < pkt_size; i++)
8709 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8710 goto test_loopback_rx_exit;
8711
8712 rc = 0;
8713
8714test_loopback_rx_exit:
8715 bp->dev->last_rx = jiffies;
8716
8717 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8718 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8719 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8720 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8721
8722 /* Update producers */
8723 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8724 fp->rx_sge_prod);
8725 mmiowb(); /* keep prod updates ordered */
8726
8727test_loopback_exit:
8728 bp->link_params.loopback_mode = LOOPBACK_NONE;
8729
8730 return rc;
8731}
8732
8733static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8734{
8735 int rc = 0;
8736
8737 if (!netif_running(bp->dev))
8738 return BNX2X_LOOPBACK_FAILED;
8739
8740 bnx2x_netif_stop(bp);
8741
8742 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8743 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8744 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8745 }
8746
8747 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8748 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8749 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8750 }
8751
8752 bnx2x_netif_start(bp);
8753
8754 return rc;
8755}
8756
8757#define CRC32_RESIDUAL 0xdebb20e3
8758
8759static int bnx2x_test_nvram(struct bnx2x *bp)
8760{
8761 static const struct {
8762 int offset;
8763 int size;
8764 } nvram_tbl[] = {
8765 { 0, 0x14 }, /* bootstrap */
8766 { 0x14, 0xec }, /* dir */
8767 { 0x100, 0x350 }, /* manuf_info */
8768 { 0x450, 0xf0 }, /* feature_info */
8769 { 0x640, 0x64 }, /* upgrade_key_info */
8770 { 0x6a4, 0x64 },
8771 { 0x708, 0x70 }, /* manuf_key_info */
8772 { 0x778, 0x70 },
8773 { 0, 0 }
8774 };
8775 u32 buf[0x350 / 4];
8776 u8 *data = (u8 *)buf;
8777 int i, rc;
8778 u32 magic, csum;
8779
8780 rc = bnx2x_nvram_read(bp, 0, data, 4);
8781 if (rc) {
8782 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8783 goto test_nvram_exit;
8784 }
8785
8786 magic = be32_to_cpu(buf[0]);
8787 if (magic != 0x669955aa) {
8788 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8789 rc = -ENODEV;
8790 goto test_nvram_exit;
8791 }
8792
8793 for (i = 0; nvram_tbl[i].size; i++) {
8794
8795 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8796 nvram_tbl[i].size);
8797 if (rc) {
8798 DP(NETIF_MSG_PROBE,
8799 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8800 goto test_nvram_exit;
8801 }
8802
8803 csum = ether_crc_le(nvram_tbl[i].size, data);
8804 if (csum != CRC32_RESIDUAL) {
8805 DP(NETIF_MSG_PROBE,
8806 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8807 rc = -ENODEV;
8808 goto test_nvram_exit;
8809 }
8810 }
8811
8812test_nvram_exit:
8813 return rc;
8814}
8815
8816static int bnx2x_test_intr(struct bnx2x *bp)
8817{
8818 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8819 int i, rc;
8820
8821 if (!netif_running(bp->dev))
8822 return -ENODEV;
8823
8824 config->hdr.length_6b = 0;
8825 config->hdr.offset = 0;
8826 config->hdr.client_id = BP_CL_ID(bp);
8827 config->hdr.reserved1 = 0;
8828
8829 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8830 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8831 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8832 if (rc == 0) {
8833 bp->set_mac_pending++;
8834 for (i = 0; i < 10; i++) {
8835 if (!bp->set_mac_pending)
8836 break;
8837 msleep_interruptible(10);
8838 }
8839 if (i == 10)
8840 rc = -ENODEV;
8841 }
8842
8843 return rc;
8844}
8845
a2fbb9ea
ET
8846static void bnx2x_self_test(struct net_device *dev,
8847 struct ethtool_test *etest, u64 *buf)
8848{
8849 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8850
8851 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8852
f3c87cdd 8853 if (!netif_running(dev))
a2fbb9ea 8854 return;
a2fbb9ea 8855
f3c87cdd
YG
8856 /* offline tests are not suppoerted in MF mode */
8857 if (IS_E1HMF(bp))
8858 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8859
8860 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8861 u8 link_up;
8862
8863 link_up = bp->link_vars.link_up;
8864 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8865 bnx2x_nic_load(bp, LOAD_DIAG);
8866 /* wait until link state is restored */
8867 bnx2x_wait_for_link(bp, link_up);
8868
8869 if (bnx2x_test_registers(bp) != 0) {
8870 buf[0] = 1;
8871 etest->flags |= ETH_TEST_FL_FAILED;
8872 }
8873 if (bnx2x_test_memory(bp) != 0) {
8874 buf[1] = 1;
8875 etest->flags |= ETH_TEST_FL_FAILED;
8876 }
8877 buf[2] = bnx2x_test_loopback(bp, link_up);
8878 if (buf[2] != 0)
8879 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8880
f3c87cdd
YG
8881 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8882 bnx2x_nic_load(bp, LOAD_NORMAL);
8883 /* wait until link state is restored */
8884 bnx2x_wait_for_link(bp, link_up);
8885 }
8886 if (bnx2x_test_nvram(bp) != 0) {
8887 buf[3] = 1;
a2fbb9ea
ET
8888 etest->flags |= ETH_TEST_FL_FAILED;
8889 }
f3c87cdd
YG
8890 if (bnx2x_test_intr(bp) != 0) {
8891 buf[4] = 1;
8892 etest->flags |= ETH_TEST_FL_FAILED;
8893 }
8894 if (bp->port.pmf)
8895 if (bnx2x_link_test(bp) != 0) {
8896 buf[5] = 1;
8897 etest->flags |= ETH_TEST_FL_FAILED;
8898 }
8899 buf[7] = bnx2x_mc_assert(bp);
8900 if (buf[7] != 0)
8901 etest->flags |= ETH_TEST_FL_FAILED;
8902
8903#ifdef BNX2X_EXTRA_DEBUG
8904 bnx2x_panic_dump(bp);
8905#endif
a2fbb9ea
ET
8906}
8907
bb2a0f7a
YG
8908static const struct {
8909 long offset;
8910 int size;
8911 u32 flags;
66e855f3
YG
8912#define STATS_FLAGS_PORT 1
8913#define STATS_FLAGS_FUNC 2
8914 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8915} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8916/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8917 8, STATS_FLAGS_FUNC, "rx_bytes" },
8918 { STATS_OFFSET32(error_bytes_received_hi),
8919 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8920 { STATS_OFFSET32(total_bytes_transmitted_hi),
8921 8, STATS_FLAGS_FUNC, "tx_bytes" },
8922 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8923 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8924 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8925 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8926 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8927 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8928 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8929 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8930 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8931 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8932 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8933 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8934/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8935 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8936 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8937 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8938 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8939 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8940 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8941 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8942 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 8943 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 8944 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 8945 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 8946 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 8947 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 8948 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 8949 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 8950 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 8951 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 8952 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
8953 8, STATS_FLAGS_PORT, "rx_fragments" },
8954/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8955 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 8956 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 8957 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 8958 { STATS_OFFSET32(jabber_packets_received),
66e855f3 8959 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 8960 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 8961 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 8962 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 8963 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 8964 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 8965 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 8966 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 8967 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 8968 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 8969 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 8970 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 8971 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 8972 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 8973 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 8974/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 8975 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 8976 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
8977 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8978 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8979 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8980 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8981 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 8982 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
8983 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8984 { STATS_OFFSET32(mac_filter_discard),
8985 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8986 { STATS_OFFSET32(no_buff_discard),
8987 4, STATS_FLAGS_FUNC, "rx_discards" },
8988 { STATS_OFFSET32(xxoverflow_discard),
8989 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8990 { STATS_OFFSET32(brb_drop_hi),
8991 8, STATS_FLAGS_PORT, "brb_discard" },
8992 { STATS_OFFSET32(brb_truncate_hi),
8993 8, STATS_FLAGS_PORT, "brb_truncate" },
8994/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8995 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8996 { STATS_OFFSET32(rx_skb_alloc_failed),
8997 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8998/* 42 */{ STATS_OFFSET32(hw_csum_err),
8999 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9000};
9001
66e855f3
YG
9002#define IS_NOT_E1HMF_STAT(bp, i) \
9003 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9004
a2fbb9ea
ET
9005static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9006{
bb2a0f7a
YG
9007 struct bnx2x *bp = netdev_priv(dev);
9008 int i, j;
9009
a2fbb9ea
ET
9010 switch (stringset) {
9011 case ETH_SS_STATS:
bb2a0f7a 9012 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9013 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9014 continue;
9015 strcpy(buf + j*ETH_GSTRING_LEN,
9016 bnx2x_stats_arr[i].string);
9017 j++;
9018 }
a2fbb9ea
ET
9019 break;
9020
9021 case ETH_SS_TEST:
9022 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9023 break;
9024 }
9025}
9026
9027static int bnx2x_get_stats_count(struct net_device *dev)
9028{
bb2a0f7a
YG
9029 struct bnx2x *bp = netdev_priv(dev);
9030 int i, num_stats = 0;
9031
9032 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9033 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9034 continue;
9035 num_stats++;
9036 }
9037 return num_stats;
a2fbb9ea
ET
9038}
9039
9040static void bnx2x_get_ethtool_stats(struct net_device *dev,
9041 struct ethtool_stats *stats, u64 *buf)
9042{
9043 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9044 u32 *hw_stats = (u32 *)&bp->eth_stats;
9045 int i, j;
a2fbb9ea 9046
bb2a0f7a 9047 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9048 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9049 continue;
bb2a0f7a
YG
9050
9051 if (bnx2x_stats_arr[i].size == 0) {
9052 /* skip this counter */
9053 buf[j] = 0;
9054 j++;
a2fbb9ea
ET
9055 continue;
9056 }
bb2a0f7a 9057 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9058 /* 4-byte counter */
bb2a0f7a
YG
9059 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9060 j++;
a2fbb9ea
ET
9061 continue;
9062 }
9063 /* 8-byte counter */
bb2a0f7a
YG
9064 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9065 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9066 j++;
a2fbb9ea
ET
9067 }
9068}
9069
9070static int bnx2x_phys_id(struct net_device *dev, u32 data)
9071{
9072 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9073 int port = BP_PORT(bp);
a2fbb9ea
ET
9074 int i;
9075
34f80b04
EG
9076 if (!netif_running(dev))
9077 return 0;
9078
9079 if (!bp->port.pmf)
9080 return 0;
9081
a2fbb9ea
ET
9082 if (data == 0)
9083 data = 2;
9084
9085 for (i = 0; i < (data * 2); i++) {
c18487ee 9086 if ((i % 2) == 0)
34f80b04 9087 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9088 bp->link_params.hw_led_mode,
9089 bp->link_params.chip_id);
9090 else
34f80b04 9091 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9092 bp->link_params.hw_led_mode,
9093 bp->link_params.chip_id);
9094
a2fbb9ea
ET
9095 msleep_interruptible(500);
9096 if (signal_pending(current))
9097 break;
9098 }
9099
c18487ee 9100 if (bp->link_vars.link_up)
34f80b04 9101 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9102 bp->link_vars.line_speed,
9103 bp->link_params.hw_led_mode,
9104 bp->link_params.chip_id);
a2fbb9ea
ET
9105
9106 return 0;
9107}
9108
9109static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9110 .get_settings = bnx2x_get_settings,
9111 .set_settings = bnx2x_set_settings,
9112 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9113 .get_wol = bnx2x_get_wol,
9114 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9115 .get_msglevel = bnx2x_get_msglevel,
9116 .set_msglevel = bnx2x_set_msglevel,
9117 .nway_reset = bnx2x_nway_reset,
9118 .get_link = ethtool_op_get_link,
9119 .get_eeprom_len = bnx2x_get_eeprom_len,
9120 .get_eeprom = bnx2x_get_eeprom,
9121 .set_eeprom = bnx2x_set_eeprom,
9122 .get_coalesce = bnx2x_get_coalesce,
9123 .set_coalesce = bnx2x_set_coalesce,
9124 .get_ringparam = bnx2x_get_ringparam,
9125 .set_ringparam = bnx2x_set_ringparam,
9126 .get_pauseparam = bnx2x_get_pauseparam,
9127 .set_pauseparam = bnx2x_set_pauseparam,
9128 .get_rx_csum = bnx2x_get_rx_csum,
9129 .set_rx_csum = bnx2x_set_rx_csum,
9130 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9131 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9132 .set_flags = bnx2x_set_flags,
9133 .get_flags = ethtool_op_get_flags,
9134 .get_sg = ethtool_op_get_sg,
9135 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9136 .get_tso = ethtool_op_get_tso,
9137 .set_tso = bnx2x_set_tso,
9138 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9139 .self_test = bnx2x_self_test,
9140 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9141 .phys_id = bnx2x_phys_id,
9142 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9143 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9144};
9145
9146/* end of ethtool_ops */
9147
9148/****************************************************************************
9149* General service functions
9150****************************************************************************/
9151
9152static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9153{
9154 u16 pmcsr;
9155
9156 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9157
9158 switch (state) {
9159 case PCI_D0:
34f80b04 9160 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9161 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9162 PCI_PM_CTRL_PME_STATUS));
9163
9164 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9165 /* delay required during transition out of D3hot */
9166 msleep(20);
34f80b04 9167 break;
a2fbb9ea 9168
34f80b04
EG
9169 case PCI_D3hot:
9170 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9171 pmcsr |= 3;
a2fbb9ea 9172
34f80b04
EG
9173 if (bp->wol)
9174 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9175
34f80b04
EG
9176 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9177 pmcsr);
a2fbb9ea 9178
34f80b04
EG
9179 /* No more memory access after this point until
9180 * device is brought back to D0.
9181 */
9182 break;
9183
9184 default:
9185 return -EINVAL;
9186 }
9187 return 0;
a2fbb9ea
ET
9188}
9189
34f80b04
EG
9190/*
9191 * net_device service functions
9192 */
9193
a2fbb9ea
ET
9194static int bnx2x_poll(struct napi_struct *napi, int budget)
9195{
9196 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9197 napi);
9198 struct bnx2x *bp = fp->bp;
9199 int work_done = 0;
9200
9201#ifdef BNX2X_STOP_ON_ERROR
9202 if (unlikely(bp->panic))
34f80b04 9203 goto poll_panic;
a2fbb9ea
ET
9204#endif
9205
9206 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9207 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9208 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9209
9210 bnx2x_update_fpsb_idx(fp);
9211
da5a662a 9212 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9213 bnx2x_tx_int(fp, budget);
9214
da5a662a 9215 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9216 work_done = bnx2x_rx_int(fp, budget);
9217
da5a662a 9218 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9219
9220 /* must not complete if we consumed full budget */
da5a662a 9221 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9222
9223#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9224poll_panic:
a2fbb9ea
ET
9225#endif
9226 netif_rx_complete(bp->dev, napi);
9227
34f80b04 9228 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9229 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9230 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9231 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9232 }
a2fbb9ea
ET
9233 return work_done;
9234}
9235
755735eb
EG
9236
9237/* we split the first BD into headers and data BDs
9238 * to ease the pain of our fellow micocode engineers
9239 * we use one mapping for both BDs
9240 * So far this has only been observed to happen
9241 * in Other Operating Systems(TM)
9242 */
9243static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9244 struct bnx2x_fastpath *fp,
9245 struct eth_tx_bd **tx_bd, u16 hlen,
9246 u16 bd_prod, int nbd)
9247{
9248 struct eth_tx_bd *h_tx_bd = *tx_bd;
9249 struct eth_tx_bd *d_tx_bd;
9250 dma_addr_t mapping;
9251 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9252
9253 /* first fix first BD */
9254 h_tx_bd->nbd = cpu_to_le16(nbd);
9255 h_tx_bd->nbytes = cpu_to_le16(hlen);
9256
9257 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9258 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9259 h_tx_bd->addr_lo, h_tx_bd->nbd);
9260
9261 /* now get a new data BD
9262 * (after the pbd) and fill it */
9263 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9264 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9265
9266 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9267 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9268
9269 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9270 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9271 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9272 d_tx_bd->vlan = 0;
9273 /* this marks the BD as one that has no individual mapping
9274 * the FW ignores this flag in a BD not marked start
9275 */
9276 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9277 DP(NETIF_MSG_TX_QUEUED,
9278 "TSO split data size is %d (%x:%x)\n",
9279 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9280
9281 /* update tx_bd for marking the last BD flag */
9282 *tx_bd = d_tx_bd;
9283
9284 return bd_prod;
9285}
9286
9287static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9288{
9289 if (fix > 0)
9290 csum = (u16) ~csum_fold(csum_sub(csum,
9291 csum_partial(t_header - fix, fix, 0)));
9292
9293 else if (fix < 0)
9294 csum = (u16) ~csum_fold(csum_add(csum,
9295 csum_partial(t_header, -fix, 0)));
9296
9297 return swab16(csum);
9298}
9299
9300static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9301{
9302 u32 rc;
9303
9304 if (skb->ip_summed != CHECKSUM_PARTIAL)
9305 rc = XMIT_PLAIN;
9306
9307 else {
9308 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9309 rc = XMIT_CSUM_V6;
9310 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9311 rc |= XMIT_CSUM_TCP;
9312
9313 } else {
9314 rc = XMIT_CSUM_V4;
9315 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9316 rc |= XMIT_CSUM_TCP;
9317 }
9318 }
9319
9320 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9321 rc |= XMIT_GSO_V4;
9322
9323 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9324 rc |= XMIT_GSO_V6;
9325
9326 return rc;
9327}
9328
9329/* check if packet requires linearization (packet is too fragmented) */
9330static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9331 u32 xmit_type)
9332{
9333 int to_copy = 0;
9334 int hlen = 0;
9335 int first_bd_sz = 0;
9336
9337 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9338 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9339
9340 if (xmit_type & XMIT_GSO) {
9341 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9342 /* Check if LSO packet needs to be copied:
9343 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9344 int wnd_size = MAX_FETCH_BD - 3;
9345 /* Number of widnows to check */
9346 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9347 int wnd_idx = 0;
9348 int frag_idx = 0;
9349 u32 wnd_sum = 0;
9350
9351 /* Headers length */
9352 hlen = (int)(skb_transport_header(skb) - skb->data) +
9353 tcp_hdrlen(skb);
9354
9355 /* Amount of data (w/o headers) on linear part of SKB*/
9356 first_bd_sz = skb_headlen(skb) - hlen;
9357
9358 wnd_sum = first_bd_sz;
9359
9360 /* Calculate the first sum - it's special */
9361 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9362 wnd_sum +=
9363 skb_shinfo(skb)->frags[frag_idx].size;
9364
9365 /* If there was data on linear skb data - check it */
9366 if (first_bd_sz > 0) {
9367 if (unlikely(wnd_sum < lso_mss)) {
9368 to_copy = 1;
9369 goto exit_lbl;
9370 }
9371
9372 wnd_sum -= first_bd_sz;
9373 }
9374
9375 /* Others are easier: run through the frag list and
9376 check all windows */
9377 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9378 wnd_sum +=
9379 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9380
9381 if (unlikely(wnd_sum < lso_mss)) {
9382 to_copy = 1;
9383 break;
9384 }
9385 wnd_sum -=
9386 skb_shinfo(skb)->frags[wnd_idx].size;
9387 }
9388
9389 } else {
9390 /* in non-LSO too fragmented packet should always
9391 be linearized */
9392 to_copy = 1;
9393 }
9394 }
9395
9396exit_lbl:
9397 if (unlikely(to_copy))
9398 DP(NETIF_MSG_TX_QUEUED,
9399 "Linearization IS REQUIRED for %s packet. "
9400 "num_frags %d hlen %d first_bd_sz %d\n",
9401 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9402 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9403
9404 return to_copy;
9405}
9406
9407/* called with netif_tx_lock
a2fbb9ea 9408 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9409 * netif_wake_queue()
a2fbb9ea
ET
9410 */
9411static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9412{
9413 struct bnx2x *bp = netdev_priv(dev);
9414 struct bnx2x_fastpath *fp;
9415 struct sw_tx_bd *tx_buf;
9416 struct eth_tx_bd *tx_bd;
9417 struct eth_tx_parse_bd *pbd = NULL;
9418 u16 pkt_prod, bd_prod;
755735eb 9419 int nbd, fp_index;
a2fbb9ea 9420 dma_addr_t mapping;
755735eb
EG
9421 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9422 int vlan_off = (bp->e1hov ? 4 : 0);
9423 int i;
9424 u8 hlen = 0;
a2fbb9ea
ET
9425
9426#ifdef BNX2X_STOP_ON_ERROR
9427 if (unlikely(bp->panic))
9428 return NETDEV_TX_BUSY;
9429#endif
9430
755735eb 9431 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9432 fp = &bp->fp[fp_index];
755735eb 9433
a2fbb9ea
ET
9434 if (unlikely(bnx2x_tx_avail(bp->fp) <
9435 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9436 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9437 netif_stop_queue(dev);
9438 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9439 return NETDEV_TX_BUSY;
9440 }
9441
755735eb
EG
9442 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9443 " gso type %x xmit_type %x\n",
9444 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9445 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9446
9447 /* First, check if we need to linearaize the skb
9448 (due to FW restrictions) */
9449 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9450 /* Statistics of linearization */
9451 bp->lin_cnt++;
9452 if (skb_linearize(skb) != 0) {
9453 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9454 "silently dropping this SKB\n");
9455 dev_kfree_skb_any(skb);
da5a662a 9456 return NETDEV_TX_OK;
755735eb
EG
9457 }
9458 }
9459
a2fbb9ea 9460 /*
755735eb 9461 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9462 then for TSO or xsum we have a parsing info BD,
755735eb 9463 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9464 (don't forget to mark the last one as last,
9465 and to unmap only AFTER you write to the BD ...)
755735eb 9466 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9467 */
9468
9469 pkt_prod = fp->tx_pkt_prod++;
755735eb 9470 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9471
755735eb 9472 /* get a tx_buf and first BD */
a2fbb9ea
ET
9473 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9474 tx_bd = &fp->tx_desc_ring[bd_prod];
9475
9476 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9477 tx_bd->general_data = (UNICAST_ADDRESS <<
9478 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9479 tx_bd->general_data |= 1; /* header nbd */
9480
755735eb
EG
9481 /* remember the first BD of the packet */
9482 tx_buf->first_bd = fp->tx_bd_prod;
9483 tx_buf->skb = skb;
a2fbb9ea
ET
9484
9485 DP(NETIF_MSG_TX_QUEUED,
9486 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9487 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9488
755735eb
EG
9489 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9490 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9491 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9492 vlan_off += 4;
9493 } else
9494 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9495
755735eb 9496 if (xmit_type) {
a2fbb9ea 9497
755735eb 9498 /* turn on parsing and get a BD */
a2fbb9ea
ET
9499 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9500 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9501
9502 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9503 }
9504
9505 if (xmit_type & XMIT_CSUM) {
9506 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9507
9508 /* for now NS flag is not used in Linux */
755735eb 9509 pbd->global_data = (hlen |
96fc1784 9510 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9511 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9512
755735eb
EG
9513 pbd->ip_hlen = (skb_transport_header(skb) -
9514 skb_network_header(skb)) / 2;
9515
9516 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9517
755735eb
EG
9518 pbd->total_hlen = cpu_to_le16(hlen);
9519 hlen = hlen*2 - vlan_off;
a2fbb9ea 9520
755735eb
EG
9521 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9522
9523 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9524 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9525 ETH_TX_BD_FLAGS_IP_CSUM;
9526 else
9527 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9528
9529 if (xmit_type & XMIT_CSUM_TCP) {
9530 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9531
9532 } else {
9533 s8 fix = SKB_CS_OFF(skb); /* signed! */
9534
a2fbb9ea 9535 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9536 pbd->cs_offset = fix / 2;
a2fbb9ea 9537
755735eb
EG
9538 DP(NETIF_MSG_TX_QUEUED,
9539 "hlen %d offset %d fix %d csum before fix %x\n",
9540 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9541 SKB_CS(skb));
9542
9543 /* HW bug: fixup the CSUM */
9544 pbd->tcp_pseudo_csum =
9545 bnx2x_csum_fix(skb_transport_header(skb),
9546 SKB_CS(skb), fix);
9547
9548 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9549 pbd->tcp_pseudo_csum);
9550 }
a2fbb9ea
ET
9551 }
9552
9553 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9554 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9555
9556 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9557 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9558 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9559 tx_bd->nbd = cpu_to_le16(nbd);
9560 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9561
9562 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9563 " nbytes %d flags %x vlan %x\n",
9564 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9565 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9566 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9567
755735eb 9568 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9569
9570 DP(NETIF_MSG_TX_QUEUED,
9571 "TSO packet len %d hlen %d total len %d tso size %d\n",
9572 skb->len, hlen, skb_headlen(skb),
9573 skb_shinfo(skb)->gso_size);
9574
9575 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9576
755735eb
EG
9577 if (unlikely(skb_headlen(skb) > hlen))
9578 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9579 bd_prod, ++nbd);
a2fbb9ea
ET
9580
9581 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9582 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9583 pbd->tcp_flags = pbd_tcp_flags(skb);
9584
9585 if (xmit_type & XMIT_GSO_V4) {
9586 pbd->ip_id = swab16(ip_hdr(skb)->id);
9587 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9588 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9589 ip_hdr(skb)->daddr,
9590 0, IPPROTO_TCP, 0));
755735eb
EG
9591
9592 } else
9593 pbd->tcp_pseudo_csum =
9594 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9595 &ipv6_hdr(skb)->daddr,
9596 0, IPPROTO_TCP, 0));
9597
a2fbb9ea
ET
9598 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9599 }
9600
755735eb
EG
9601 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9602 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9603
755735eb
EG
9604 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9605 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9606
755735eb
EG
9607 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9608 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9609
755735eb
EG
9610 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9611 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9612 tx_bd->nbytes = cpu_to_le16(frag->size);
9613 tx_bd->vlan = cpu_to_le16(pkt_prod);
9614 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9615
755735eb
EG
9616 DP(NETIF_MSG_TX_QUEUED,
9617 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9618 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9619 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9620 }
9621
755735eb 9622 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9623 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9624
9625 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9626 tx_bd, tx_bd->bd_flags.as_bitfield);
9627
a2fbb9ea
ET
9628 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9629
755735eb 9630 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9631 * if the packet contains or ends with it
9632 */
9633 if (TX_BD_POFF(bd_prod) < nbd)
9634 nbd++;
9635
9636 if (pbd)
9637 DP(NETIF_MSG_TX_QUEUED,
9638 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9639 " tcp_flags %x xsum %x seq %u hlen %u\n",
9640 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9641 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9642 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9643
755735eb 9644 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9645
96fc1784
ET
9646 fp->hw_tx_prods->bds_prod =
9647 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9648 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9649 fp->hw_tx_prods->packets_prod =
9650 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9651 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9652
9653 mmiowb();
9654
755735eb 9655 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9656 dev->trans_start = jiffies;
9657
9658 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9659 netif_stop_queue(dev);
bb2a0f7a 9660 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9661 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9662 netif_wake_queue(dev);
9663 }
9664 fp->tx_pkt++;
9665
9666 return NETDEV_TX_OK;
9667}
9668
bb2a0f7a 9669/* called with rtnl_lock */
a2fbb9ea
ET
9670static int bnx2x_open(struct net_device *dev)
9671{
9672 struct bnx2x *bp = netdev_priv(dev);
9673
9674 bnx2x_set_power_state(bp, PCI_D0);
9675
bb2a0f7a 9676 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9677}
9678
bb2a0f7a 9679/* called with rtnl_lock */
a2fbb9ea
ET
9680static int bnx2x_close(struct net_device *dev)
9681{
a2fbb9ea
ET
9682 struct bnx2x *bp = netdev_priv(dev);
9683
9684 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9685 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9686 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9687 if (!CHIP_REV_IS_SLOW(bp))
9688 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9689
9690 return 0;
9691}
9692
34f80b04
EG
9693/* called with netif_tx_lock from set_multicast */
9694static void bnx2x_set_rx_mode(struct net_device *dev)
9695{
9696 struct bnx2x *bp = netdev_priv(dev);
9697 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9698 int port = BP_PORT(bp);
9699
9700 if (bp->state != BNX2X_STATE_OPEN) {
9701 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9702 return;
9703 }
9704
9705 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9706
9707 if (dev->flags & IFF_PROMISC)
9708 rx_mode = BNX2X_RX_MODE_PROMISC;
9709
9710 else if ((dev->flags & IFF_ALLMULTI) ||
9711 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9712 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9713
9714 else { /* some multicasts */
9715 if (CHIP_IS_E1(bp)) {
9716 int i, old, offset;
9717 struct dev_mc_list *mclist;
9718 struct mac_configuration_cmd *config =
9719 bnx2x_sp(bp, mcast_config);
9720
9721 for (i = 0, mclist = dev->mc_list;
9722 mclist && (i < dev->mc_count);
9723 i++, mclist = mclist->next) {
9724
9725 config->config_table[i].
9726 cam_entry.msb_mac_addr =
9727 swab16(*(u16 *)&mclist->dmi_addr[0]);
9728 config->config_table[i].
9729 cam_entry.middle_mac_addr =
9730 swab16(*(u16 *)&mclist->dmi_addr[2]);
9731 config->config_table[i].
9732 cam_entry.lsb_mac_addr =
9733 swab16(*(u16 *)&mclist->dmi_addr[4]);
9734 config->config_table[i].cam_entry.flags =
9735 cpu_to_le16(port);
9736 config->config_table[i].
9737 target_table_entry.flags = 0;
9738 config->config_table[i].
9739 target_table_entry.client_id = 0;
9740 config->config_table[i].
9741 target_table_entry.vlan_id = 0;
9742
9743 DP(NETIF_MSG_IFUP,
9744 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9745 config->config_table[i].
9746 cam_entry.msb_mac_addr,
9747 config->config_table[i].
9748 cam_entry.middle_mac_addr,
9749 config->config_table[i].
9750 cam_entry.lsb_mac_addr);
9751 }
9752 old = config->hdr.length_6b;
9753 if (old > i) {
9754 for (; i < old; i++) {
9755 if (CAM_IS_INVALID(config->
9756 config_table[i])) {
9757 i--; /* already invalidated */
9758 break;
9759 }
9760 /* invalidate */
9761 CAM_INVALIDATE(config->
9762 config_table[i]);
9763 }
9764 }
9765
9766 if (CHIP_REV_IS_SLOW(bp))
9767 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9768 else
9769 offset = BNX2X_MAX_MULTICAST*(1 + port);
9770
9771 config->hdr.length_6b = i;
9772 config->hdr.offset = offset;
9773 config->hdr.client_id = BP_CL_ID(bp);
9774 config->hdr.reserved1 = 0;
9775
9776 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9777 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9778 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9779 0);
9780 } else { /* E1H */
9781 /* Accept one or more multicasts */
9782 struct dev_mc_list *mclist;
9783 u32 mc_filter[MC_HASH_SIZE];
9784 u32 crc, bit, regidx;
9785 int i;
9786
9787 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9788
9789 for (i = 0, mclist = dev->mc_list;
9790 mclist && (i < dev->mc_count);
9791 i++, mclist = mclist->next) {
9792
9793 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9794 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9795 mclist->dmi_addr[0], mclist->dmi_addr[1],
9796 mclist->dmi_addr[2], mclist->dmi_addr[3],
9797 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9798
9799 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9800 bit = (crc >> 24) & 0xff;
9801 regidx = bit >> 5;
9802 bit &= 0x1f;
9803 mc_filter[regidx] |= (1 << bit);
9804 }
9805
9806 for (i = 0; i < MC_HASH_SIZE; i++)
9807 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9808 mc_filter[i]);
9809 }
9810 }
9811
9812 bp->rx_mode = rx_mode;
9813 bnx2x_set_storm_rx_mode(bp);
9814}
9815
9816/* called with rtnl_lock */
a2fbb9ea
ET
9817static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9818{
9819 struct sockaddr *addr = p;
9820 struct bnx2x *bp = netdev_priv(dev);
9821
34f80b04 9822 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9823 return -EINVAL;
9824
9825 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9826 if (netif_running(dev)) {
9827 if (CHIP_IS_E1(bp))
9828 bnx2x_set_mac_addr_e1(bp);
9829 else
9830 bnx2x_set_mac_addr_e1h(bp);
9831 }
a2fbb9ea
ET
9832
9833 return 0;
9834}
9835
c18487ee 9836/* called with rtnl_lock */
a2fbb9ea
ET
9837static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9838{
9839 struct mii_ioctl_data *data = if_mii(ifr);
9840 struct bnx2x *bp = netdev_priv(dev);
9841 int err;
9842
9843 switch (cmd) {
9844 case SIOCGMIIPHY:
34f80b04 9845 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9846
c14423fe 9847 /* fallthrough */
c18487ee 9848
a2fbb9ea 9849 case SIOCGMIIREG: {
c18487ee 9850 u16 mii_regval;
a2fbb9ea 9851
c18487ee
YR
9852 if (!netif_running(dev))
9853 return -EAGAIN;
a2fbb9ea 9854
34f80b04
EG
9855 mutex_lock(&bp->port.phy_mutex);
9856 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9857 DEFAULT_PHY_DEV_ADDR,
9858 (data->reg_num & 0x1f), &mii_regval);
9859 data->val_out = mii_regval;
34f80b04 9860 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9861 return err;
9862 }
9863
9864 case SIOCSMIIREG:
9865 if (!capable(CAP_NET_ADMIN))
9866 return -EPERM;
9867
c18487ee
YR
9868 if (!netif_running(dev))
9869 return -EAGAIN;
9870
34f80b04
EG
9871 mutex_lock(&bp->port.phy_mutex);
9872 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9873 DEFAULT_PHY_DEV_ADDR,
9874 (data->reg_num & 0x1f), data->val_in);
34f80b04 9875 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9876 return err;
9877
9878 default:
9879 /* do nothing */
9880 break;
9881 }
9882
9883 return -EOPNOTSUPP;
9884}
9885
34f80b04 9886/* called with rtnl_lock */
a2fbb9ea
ET
9887static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9888{
9889 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9890 int rc = 0;
a2fbb9ea
ET
9891
9892 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9893 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9894 return -EINVAL;
9895
9896 /* This does not race with packet allocation
c14423fe 9897 * because the actual alloc size is
a2fbb9ea
ET
9898 * only updated as part of load
9899 */
9900 dev->mtu = new_mtu;
9901
9902 if (netif_running(dev)) {
34f80b04
EG
9903 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9904 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9905 }
34f80b04
EG
9906
9907 return rc;
a2fbb9ea
ET
9908}
9909
9910static void bnx2x_tx_timeout(struct net_device *dev)
9911{
9912 struct bnx2x *bp = netdev_priv(dev);
9913
9914#ifdef BNX2X_STOP_ON_ERROR
9915 if (!bp->panic)
9916 bnx2x_panic();
9917#endif
9918 /* This allows the netif to be shutdown gracefully before resetting */
9919 schedule_work(&bp->reset_task);
9920}
9921
9922#ifdef BCM_VLAN
34f80b04 9923/* called with rtnl_lock */
a2fbb9ea
ET
9924static void bnx2x_vlan_rx_register(struct net_device *dev,
9925 struct vlan_group *vlgrp)
9926{
9927 struct bnx2x *bp = netdev_priv(dev);
9928
9929 bp->vlgrp = vlgrp;
9930 if (netif_running(dev))
49d66772 9931 bnx2x_set_client_config(bp);
a2fbb9ea 9932}
34f80b04 9933
a2fbb9ea
ET
9934#endif
9935
9936#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9937static void poll_bnx2x(struct net_device *dev)
9938{
9939 struct bnx2x *bp = netdev_priv(dev);
9940
9941 disable_irq(bp->pdev->irq);
9942 bnx2x_interrupt(bp->pdev->irq, dev);
9943 enable_irq(bp->pdev->irq);
9944}
9945#endif
9946
34f80b04
EG
9947static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9948 struct net_device *dev)
a2fbb9ea
ET
9949{
9950 struct bnx2x *bp;
9951 int rc;
9952
9953 SET_NETDEV_DEV(dev, &pdev->dev);
9954 bp = netdev_priv(dev);
9955
34f80b04
EG
9956 bp->dev = dev;
9957 bp->pdev = pdev;
a2fbb9ea 9958 bp->flags = 0;
34f80b04 9959 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9960
9961 rc = pci_enable_device(pdev);
9962 if (rc) {
9963 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9964 goto err_out;
9965 }
9966
9967 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9968 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9969 " aborting\n");
9970 rc = -ENODEV;
9971 goto err_out_disable;
9972 }
9973
9974 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9975 printk(KERN_ERR PFX "Cannot find second PCI device"
9976 " base address, aborting\n");
9977 rc = -ENODEV;
9978 goto err_out_disable;
9979 }
9980
34f80b04
EG
9981 if (atomic_read(&pdev->enable_cnt) == 1) {
9982 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9983 if (rc) {
9984 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9985 " aborting\n");
9986 goto err_out_disable;
9987 }
a2fbb9ea 9988
34f80b04
EG
9989 pci_set_master(pdev);
9990 pci_save_state(pdev);
9991 }
a2fbb9ea
ET
9992
9993 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9994 if (bp->pm_cap == 0) {
9995 printk(KERN_ERR PFX "Cannot find power management"
9996 " capability, aborting\n");
9997 rc = -EIO;
9998 goto err_out_release;
9999 }
10000
10001 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10002 if (bp->pcie_cap == 0) {
10003 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10004 " aborting\n");
10005 rc = -EIO;
10006 goto err_out_release;
10007 }
10008
10009 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10010 bp->flags |= USING_DAC_FLAG;
10011 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10012 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10013 " failed, aborting\n");
10014 rc = -EIO;
10015 goto err_out_release;
10016 }
10017
10018 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10019 printk(KERN_ERR PFX "System does not support DMA,"
10020 " aborting\n");
10021 rc = -EIO;
10022 goto err_out_release;
10023 }
10024
34f80b04
EG
10025 dev->mem_start = pci_resource_start(pdev, 0);
10026 dev->base_addr = dev->mem_start;
10027 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10028
10029 dev->irq = pdev->irq;
10030
10031 bp->regview = ioremap_nocache(dev->base_addr,
10032 pci_resource_len(pdev, 0));
10033 if (!bp->regview) {
10034 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10035 rc = -ENOMEM;
10036 goto err_out_release;
10037 }
10038
34f80b04
EG
10039 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10040 min_t(u64, BNX2X_DB_SIZE,
10041 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10042 if (!bp->doorbells) {
10043 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10044 rc = -ENOMEM;
10045 goto err_out_unmap;
10046 }
10047
10048 bnx2x_set_power_state(bp, PCI_D0);
10049
34f80b04
EG
10050 /* clean indirect addresses */
10051 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10052 PCICFG_VENDOR_ID_OFFSET);
10053 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10054 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10055 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10056 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10057
34f80b04
EG
10058 dev->hard_start_xmit = bnx2x_start_xmit;
10059 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10060
34f80b04
EG
10061 dev->ethtool_ops = &bnx2x_ethtool_ops;
10062 dev->open = bnx2x_open;
10063 dev->stop = bnx2x_close;
10064 dev->set_multicast_list = bnx2x_set_rx_mode;
10065 dev->set_mac_address = bnx2x_change_mac_addr;
10066 dev->do_ioctl = bnx2x_ioctl;
10067 dev->change_mtu = bnx2x_change_mtu;
10068 dev->tx_timeout = bnx2x_tx_timeout;
10069#ifdef BCM_VLAN
10070 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10071#endif
10072#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10073 dev->poll_controller = poll_bnx2x;
10074#endif
10075 dev->features |= NETIF_F_SG;
10076 dev->features |= NETIF_F_HW_CSUM;
10077 if (bp->flags & USING_DAC_FLAG)
10078 dev->features |= NETIF_F_HIGHDMA;
10079#ifdef BCM_VLAN
10080 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10081#endif
10082 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10083 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10084
10085 return 0;
10086
10087err_out_unmap:
10088 if (bp->regview) {
10089 iounmap(bp->regview);
10090 bp->regview = NULL;
10091 }
a2fbb9ea
ET
10092 if (bp->doorbells) {
10093 iounmap(bp->doorbells);
10094 bp->doorbells = NULL;
10095 }
10096
10097err_out_release:
34f80b04
EG
10098 if (atomic_read(&pdev->enable_cnt) == 1)
10099 pci_release_regions(pdev);
a2fbb9ea
ET
10100
10101err_out_disable:
10102 pci_disable_device(pdev);
10103 pci_set_drvdata(pdev, NULL);
10104
10105err_out:
10106 return rc;
10107}
10108
25047950
ET
10109static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10110{
10111 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10112
10113 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10114 return val;
10115}
10116
10117/* return value of 1=2.5GHz 2=5GHz */
10118static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10119{
10120 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10121
10122 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10123 return val;
10124}
10125
a2fbb9ea
ET
10126static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10127 const struct pci_device_id *ent)
10128{
10129 static int version_printed;
10130 struct net_device *dev = NULL;
10131 struct bnx2x *bp;
25047950 10132 int rc;
25047950 10133 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10134
10135 if (version_printed++ == 0)
10136 printk(KERN_INFO "%s", version);
10137
10138 /* dev zeroed in init_etherdev */
10139 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10140 if (!dev) {
10141 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10142 return -ENOMEM;
34f80b04 10143 }
a2fbb9ea
ET
10144
10145 netif_carrier_off(dev);
10146
10147 bp = netdev_priv(dev);
10148 bp->msglevel = debug;
10149
34f80b04 10150 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10151 if (rc < 0) {
10152 free_netdev(dev);
10153 return rc;
10154 }
10155
a2fbb9ea
ET
10156 rc = register_netdev(dev);
10157 if (rc) {
c14423fe 10158 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10159 goto init_one_exit;
a2fbb9ea
ET
10160 }
10161
10162 pci_set_drvdata(pdev, dev);
10163
34f80b04
EG
10164 rc = bnx2x_init_bp(bp);
10165 if (rc) {
10166 unregister_netdev(dev);
10167 goto init_one_exit;
10168 }
10169
10170 bp->common.name = board_info[ent->driver_data].name;
25047950 10171 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10172 " IRQ %d, ", dev->name, bp->common.name,
10173 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10174 bnx2x_get_pcie_width(bp),
10175 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10176 dev->base_addr, bp->pdev->irq);
10177 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10178 return 0;
34f80b04
EG
10179
10180init_one_exit:
10181 if (bp->regview)
10182 iounmap(bp->regview);
10183
10184 if (bp->doorbells)
10185 iounmap(bp->doorbells);
10186
10187 free_netdev(dev);
10188
10189 if (atomic_read(&pdev->enable_cnt) == 1)
10190 pci_release_regions(pdev);
10191
10192 pci_disable_device(pdev);
10193 pci_set_drvdata(pdev, NULL);
10194
10195 return rc;
a2fbb9ea
ET
10196}
10197
10198static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10199{
10200 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10201 struct bnx2x *bp;
10202
10203 if (!dev) {
228241eb
ET
10204 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10205 return;
10206 }
228241eb 10207 bp = netdev_priv(dev);
a2fbb9ea 10208
a2fbb9ea
ET
10209 unregister_netdev(dev);
10210
10211 if (bp->regview)
10212 iounmap(bp->regview);
10213
10214 if (bp->doorbells)
10215 iounmap(bp->doorbells);
10216
10217 free_netdev(dev);
34f80b04
EG
10218
10219 if (atomic_read(&pdev->enable_cnt) == 1)
10220 pci_release_regions(pdev);
10221
a2fbb9ea
ET
10222 pci_disable_device(pdev);
10223 pci_set_drvdata(pdev, NULL);
10224}
10225
10226static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10227{
10228 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10229 struct bnx2x *bp;
10230
34f80b04
EG
10231 if (!dev) {
10232 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10233 return -ENODEV;
10234 }
10235 bp = netdev_priv(dev);
a2fbb9ea 10236
34f80b04 10237 rtnl_lock();
a2fbb9ea 10238
34f80b04 10239 pci_save_state(pdev);
228241eb 10240
34f80b04
EG
10241 if (!netif_running(dev)) {
10242 rtnl_unlock();
10243 return 0;
10244 }
a2fbb9ea
ET
10245
10246 netif_device_detach(dev);
a2fbb9ea 10247
da5a662a 10248 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10249
a2fbb9ea 10250 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10251
34f80b04
EG
10252 rtnl_unlock();
10253
a2fbb9ea
ET
10254 return 0;
10255}
10256
10257static int bnx2x_resume(struct pci_dev *pdev)
10258{
10259 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10260 struct bnx2x *bp;
a2fbb9ea
ET
10261 int rc;
10262
228241eb
ET
10263 if (!dev) {
10264 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10265 return -ENODEV;
10266 }
228241eb 10267 bp = netdev_priv(dev);
a2fbb9ea 10268
34f80b04
EG
10269 rtnl_lock();
10270
228241eb 10271 pci_restore_state(pdev);
34f80b04
EG
10272
10273 if (!netif_running(dev)) {
10274 rtnl_unlock();
10275 return 0;
10276 }
10277
a2fbb9ea
ET
10278 bnx2x_set_power_state(bp, PCI_D0);
10279 netif_device_attach(dev);
10280
da5a662a 10281 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10282
34f80b04
EG
10283 rtnl_unlock();
10284
10285 return rc;
a2fbb9ea
ET
10286}
10287
493adb1f
WX
10288/**
10289 * bnx2x_io_error_detected - called when PCI error is detected
10290 * @pdev: Pointer to PCI device
10291 * @state: The current pci connection state
10292 *
10293 * This function is called after a PCI bus error affecting
10294 * this device has been detected.
10295 */
10296static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10297 pci_channel_state_t state)
10298{
10299 struct net_device *dev = pci_get_drvdata(pdev);
10300 struct bnx2x *bp = netdev_priv(dev);
10301
10302 rtnl_lock();
10303
10304 netif_device_detach(dev);
10305
10306 if (netif_running(dev))
10307 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10308
10309 pci_disable_device(pdev);
10310
10311 rtnl_unlock();
10312
10313 /* Request a slot reset */
10314 return PCI_ERS_RESULT_NEED_RESET;
10315}
10316
10317/**
10318 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10319 * @pdev: Pointer to PCI device
10320 *
10321 * Restart the card from scratch, as if from a cold-boot.
10322 */
10323static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10324{
10325 struct net_device *dev = pci_get_drvdata(pdev);
10326 struct bnx2x *bp = netdev_priv(dev);
10327
10328 rtnl_lock();
10329
10330 if (pci_enable_device(pdev)) {
10331 dev_err(&pdev->dev,
10332 "Cannot re-enable PCI device after reset\n");
10333 rtnl_unlock();
10334 return PCI_ERS_RESULT_DISCONNECT;
10335 }
10336
10337 pci_set_master(pdev);
10338 pci_restore_state(pdev);
10339
10340 if (netif_running(dev))
10341 bnx2x_set_power_state(bp, PCI_D0);
10342
10343 rtnl_unlock();
10344
10345 return PCI_ERS_RESULT_RECOVERED;
10346}
10347
10348/**
10349 * bnx2x_io_resume - called when traffic can start flowing again
10350 * @pdev: Pointer to PCI device
10351 *
10352 * This callback is called when the error recovery driver tells us that
10353 * its OK to resume normal operation.
10354 */
10355static void bnx2x_io_resume(struct pci_dev *pdev)
10356{
10357 struct net_device *dev = pci_get_drvdata(pdev);
10358 struct bnx2x *bp = netdev_priv(dev);
10359
10360 rtnl_lock();
10361
10362 if (netif_running(dev))
10363 bnx2x_nic_load(bp, LOAD_OPEN);
10364
10365 netif_device_attach(dev);
10366
10367 rtnl_unlock();
10368}
10369
10370static struct pci_error_handlers bnx2x_err_handler = {
10371 .error_detected = bnx2x_io_error_detected,
10372 .slot_reset = bnx2x_io_slot_reset,
10373 .resume = bnx2x_io_resume,
10374};
10375
a2fbb9ea 10376static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10377 .name = DRV_MODULE_NAME,
10378 .id_table = bnx2x_pci_tbl,
10379 .probe = bnx2x_init_one,
10380 .remove = __devexit_p(bnx2x_remove_one),
10381 .suspend = bnx2x_suspend,
10382 .resume = bnx2x_resume,
10383 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10384};
10385
10386static int __init bnx2x_init(void)
10387{
10388 return pci_register_driver(&bnx2x_pci_driver);
10389}
10390
10391static void __exit bnx2x_cleanup(void)
10392{
10393 pci_unregister_driver(&bnx2x_pci_driver);
10394}
10395
10396module_init(bnx2x_init);
10397module_exit(bnx2x_cleanup);
10398