]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Protecting the link change indication
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea
ET
53#include <linux/io.h>
54
55#include "bnx2x_reg.h"
56#include "bnx2x_fw_defs.h"
57#include "bnx2x_hsi.h"
c18487ee 58#include "bnx2x_link.h"
a2fbb9ea
ET
59#include "bnx2x.h"
60#include "bnx2x_init.h"
61
ca8eac55
EG
62#define DRV_MODULE_VERSION "1.45.23"
63#define DRV_MODULE_RELDATE "2008/11/03"
34f80b04 64#define BNX2X_BC_VER 0x040200
a2fbb9ea 65
34f80b04
EG
66/* Time in jiffies before concluding the transmitter is hung */
67#define TX_TIMEOUT (5*HZ)
a2fbb9ea 68
53a10565 69static char version[] __devinitdata =
34f80b04 70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
24e3fcef 73MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 77
19680c48 78static int disable_tpa;
a2fbb9ea
ET
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
83static int use_multi;
84
19680c48 85module_param(disable_tpa, int, 0);
a2fbb9ea
ET
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
19680c48 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
93
94#ifdef BNX2X_MULTI
95module_param(use_multi, int, 0);
96MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97#endif
1cf167f2 98static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
473 u32 data[9];
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
526 fp->fp_u_idx,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
529
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
537 }
538
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 }
547
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
556 }
557
3196a88a
EG
558 start = RX_SGE(fp->rx_sge_prod);
559 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
566 }
567
a2fbb9ea
ET
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
575 }
576 }
577
49d66772
ET
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 580 " spq_prod_idx(%u)\n",
49d66772 581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
34f80b04 584 bnx2x_fw_dump(bp);
a2fbb9ea
ET
585 bnx2x_mc_assert(bp);
586 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
587}
588
615f8fd9 589static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 590{
34f80b04 591 int port = BP_PORT(bp);
a2fbb9ea
ET
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596 if (msix) {
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600 } else {
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 605
615f8fd9
ET
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
608
609 REG_WR(bp, addr, val);
610
a2fbb9ea
ET
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 }
613
615f8fd9 614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
615 val, port, addr, msix);
616
617 REG_WR(bp, addr, val);
34f80b04
EG
618
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
621 if (IS_E1HMF(bp)) {
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623 if (bp->port.pmf)
624 /* enable nig attention */
625 val |= 0x0100;
626 } else
627 val = 0xffff;
628
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631 }
a2fbb9ea
ET
632}
633
615f8fd9 634static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 635{
34f80b04 636 int port = BP_PORT(bp);
a2fbb9ea
ET
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 val, port, addr);
647
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651}
652
f8ef6e44 653static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 654{
a2fbb9ea
ET
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 int i;
657
34f80b04 658 /* disable interrupt handling */
a2fbb9ea 659 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
660 if (disable_hw)
661 /* prevent the HW from sending interrupts */
662 bnx2x_int_disable(bp);
a2fbb9ea
ET
663
664 /* make sure all ISRs are done */
665 if (msix) {
666 for_each_queue(bp, i)
667 synchronize_irq(bp->msix_table[i].vector);
668
669 /* one more for the Slow Path IRQ */
670 synchronize_irq(bp->msix_table[i].vector);
671 } else
672 synchronize_irq(bp->pdev->irq);
673
674 /* make sure sp_task is not running */
1cf167f2
EG
675 cancel_delayed_work(&bp->sp_task);
676 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
5c862848
EG
688 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
689 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
690 struct igu_ack_register igu_ack;
691
692 igu_ack.status_block_index = index;
693 igu_ack.sb_id_and_flags =
34f80b04 694 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
695 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
696 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
697 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698
5c862848
EG
699 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
700 (*(u32 *)&igu_ack), hc_addr);
701 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
702}
703
704static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705{
706 struct host_status_block *fpsb = fp->status_blk;
707 u16 rc = 0;
708
709 barrier(); /* status block is written to by the chip */
710 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
711 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 rc |= 1;
713 }
714 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
715 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
716 rc |= 2;
717 }
718 return rc;
719}
720
a2fbb9ea
ET
721static u16 bnx2x_ack_int(struct bnx2x *bp)
722{
5c862848
EG
723 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
724 COMMAND_REG_SIMD_MASK);
725 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 726
5c862848
EG
727 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
728 result, hc_addr);
a2fbb9ea 729
a2fbb9ea
ET
730 return result;
731}
732
733
734/*
735 * fast path service functions
736 */
737
738/* free skb in the packet ring at pos idx
739 * return idx of last bd freed
740 */
741static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742 u16 idx)
743{
744 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
745 struct eth_tx_bd *tx_bd;
746 struct sk_buff *skb = tx_buf->skb;
34f80b04 747 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
748 int nbd;
749
750 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
751 idx, tx_buf, skb);
752
753 /* unmap first bd */
754 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
755 tx_bd = &fp->tx_desc_ring[bd_idx];
756 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
757 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
758
759 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 760 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
761#ifdef BNX2X_STOP_ON_ERROR
762 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 763 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
764 bnx2x_panic();
765 }
766#endif
767
768 /* Skip a parse bd and the TSO split header bd
769 since they have no mapping */
770 if (nbd)
771 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
772
773 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
774 ETH_TX_BD_FLAGS_TCP_CSUM |
775 ETH_TX_BD_FLAGS_SW_LSO)) {
776 if (--nbd)
777 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 tx_bd = &fp->tx_desc_ring[bd_idx];
779 /* is this a TSO split header bd? */
780 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
781 if (--nbd)
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 }
784 }
785
786 /* now free frags */
787 while (nbd > 0) {
788
789 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
790 tx_bd = &fp->tx_desc_ring[bd_idx];
791 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
792 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
793 if (--nbd)
794 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795 }
796
797 /* release skb */
53e5e96e 798 WARN_ON(!skb);
a2fbb9ea
ET
799 dev_kfree_skb(skb);
800 tx_buf->first_bd = 0;
801 tx_buf->skb = NULL;
802
34f80b04 803 return new_cons;
a2fbb9ea
ET
804}
805
34f80b04 806static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 807{
34f80b04
EG
808 s16 used;
809 u16 prod;
810 u16 cons;
a2fbb9ea 811
34f80b04 812 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
813 prod = fp->tx_bd_prod;
814 cons = fp->tx_bd_cons;
815
34f80b04
EG
816 /* NUM_TX_RINGS = number of "next-page" entries
817 It will be used as a threshold */
818 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 819
34f80b04 820#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
821 WARN_ON(used < 0);
822 WARN_ON(used > fp->bp->tx_ring_size);
823 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 824#endif
a2fbb9ea 825
34f80b04 826 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
827}
828
829static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
830{
831 struct bnx2x *bp = fp->bp;
832 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833 int done = 0;
834
835#ifdef BNX2X_STOP_ON_ERROR
836 if (unlikely(bp->panic))
837 return;
838#endif
839
840 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
841 sw_cons = fp->tx_pkt_cons;
842
843 while (sw_cons != hw_cons) {
844 u16 pkt_cons;
845
846 pkt_cons = TX_BD(sw_cons);
847
848 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
849
34f80b04 850 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
851 hw_cons, sw_cons, pkt_cons);
852
34f80b04 853/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
854 rmb();
855 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 }
857*/
858 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
859 sw_cons++;
860 done++;
861
862 if (done == work)
863 break;
864 }
865
866 fp->tx_pkt_cons = sw_cons;
867 fp->tx_bd_cons = bd_cons;
868
869 /* Need to make the tx_cons update visible to start_xmit()
870 * before checking for netif_queue_stopped(). Without the
871 * memory barrier, there is a small possibility that start_xmit()
872 * will miss it and cause the queue to be stopped forever.
873 */
874 smp_mb();
875
876 /* TBD need a thresh? */
877 if (unlikely(netif_queue_stopped(bp->dev))) {
878
879 netif_tx_lock(bp->dev);
880
881 if (netif_queue_stopped(bp->dev) &&
da5a662a 882 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
883 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
884 netif_wake_queue(bp->dev);
885
886 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
887 }
888}
889
3196a88a 890
a2fbb9ea
ET
891static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
892 union eth_rx_cqe *rr_cqe)
893{
894 struct bnx2x *bp = fp->bp;
895 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
897
34f80b04 898 DP(BNX2X_MSG_SP,
a2fbb9ea 899 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
900 FP_IDX(fp), cid, command, bp->state,
901 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
902
903 bp->spq_left++;
904
34f80b04 905 if (FP_IDX(fp)) {
a2fbb9ea
ET
906 switch (command | fp->state) {
907 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
908 BNX2X_FP_STATE_OPENING):
909 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
910 cid);
911 fp->state = BNX2X_FP_STATE_OPEN;
912 break;
913
914 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
915 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
916 cid);
917 fp->state = BNX2X_FP_STATE_HALTED;
918 break;
919
920 default:
34f80b04
EG
921 BNX2X_ERR("unexpected MC reply (%d) "
922 "fp->state is %x\n", command, fp->state);
923 break;
a2fbb9ea 924 }
34f80b04 925 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
926 return;
927 }
c14423fe 928
a2fbb9ea
ET
929 switch (command | bp->state) {
930 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
931 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
932 bp->state = BNX2X_STATE_OPEN;
933 break;
934
935 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
936 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
937 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
938 fp->state = BNX2X_FP_STATE_HALTED;
939 break;
940
a2fbb9ea 941 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 942 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 943 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
944 break;
945
3196a88a 946
a2fbb9ea 947 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 948 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 949 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 950 bp->set_mac_pending = 0;
a2fbb9ea
ET
951 break;
952
49d66772 953 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 954 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
955 break;
956
a2fbb9ea 957 default:
34f80b04 958 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 959 command, bp->state);
34f80b04 960 break;
a2fbb9ea 961 }
34f80b04 962 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
963}
964
7a9b2557
VZ
965static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
966 struct bnx2x_fastpath *fp, u16 index)
967{
968 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
969 struct page *page = sw_buf->page;
970 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
971
972 /* Skip "next page" elements */
973 if (!page)
974 return;
975
976 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 977 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
978 __free_pages(page, PAGES_PER_SGE_SHIFT);
979
980 sw_buf->page = NULL;
981 sge->addr_hi = 0;
982 sge->addr_lo = 0;
983}
984
985static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
986 struct bnx2x_fastpath *fp, int last)
987{
988 int i;
989
990 for (i = 0; i < last; i++)
991 bnx2x_free_rx_sge(bp, fp, i);
992}
993
994static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
995 struct bnx2x_fastpath *fp, u16 index)
996{
997 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
998 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
999 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1000 dma_addr_t mapping;
1001
1002 if (unlikely(page == NULL))
1003 return -ENOMEM;
1004
4f40f2cb 1005 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1006 PCI_DMA_FROMDEVICE);
8d8bb39b 1007 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1008 __free_pages(page, PAGES_PER_SGE_SHIFT);
1009 return -ENOMEM;
1010 }
1011
1012 sw_buf->page = page;
1013 pci_unmap_addr_set(sw_buf, mapping, mapping);
1014
1015 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1016 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1017
1018 return 0;
1019}
1020
a2fbb9ea
ET
1021static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct sk_buff *skb;
1025 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1026 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1027 dma_addr_t mapping;
1028
1029 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1030 if (unlikely(skb == NULL))
1031 return -ENOMEM;
1032
437cf2f1 1033 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1034 PCI_DMA_FROMDEVICE);
8d8bb39b 1035 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1036 dev_kfree_skb(skb);
1037 return -ENOMEM;
1038 }
1039
1040 rx_buf->skb = skb;
1041 pci_unmap_addr_set(rx_buf, mapping, mapping);
1042
1043 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1044 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1045
1046 return 0;
1047}
1048
1049/* note that we are not allocating a new skb,
1050 * we are just moving one from cons to prod
1051 * we are not creating a new mapping,
1052 * so there is no need to check for dma_mapping_error().
1053 */
1054static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1055 struct sk_buff *skb, u16 cons, u16 prod)
1056{
1057 struct bnx2x *bp = fp->bp;
1058 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1059 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1060 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1061 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1062
1063 pci_dma_sync_single_for_device(bp->pdev,
1064 pci_unmap_addr(cons_rx_buf, mapping),
1065 bp->rx_offset + RX_COPY_THRESH,
1066 PCI_DMA_FROMDEVICE);
1067
1068 prod_rx_buf->skb = cons_rx_buf->skb;
1069 pci_unmap_addr_set(prod_rx_buf, mapping,
1070 pci_unmap_addr(cons_rx_buf, mapping));
1071 *prod_bd = *cons_bd;
1072}
1073
7a9b2557
VZ
1074static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1075 u16 idx)
1076{
1077 u16 last_max = fp->last_max_sge;
1078
1079 if (SUB_S16(idx, last_max) > 0)
1080 fp->last_max_sge = idx;
1081}
1082
1083static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1084{
1085 int i, j;
1086
1087 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1088 int idx = RX_SGE_CNT * i - 1;
1089
1090 for (j = 0; j < 2; j++) {
1091 SGE_MASK_CLEAR_BIT(fp, idx);
1092 idx--;
1093 }
1094 }
1095}
1096
1097static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1098 struct eth_fast_path_rx_cqe *fp_cqe)
1099{
1100 struct bnx2x *bp = fp->bp;
4f40f2cb 1101 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1102 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1103 SGE_PAGE_SHIFT;
7a9b2557
VZ
1104 u16 last_max, last_elem, first_elem;
1105 u16 delta = 0;
1106 u16 i;
1107
1108 if (!sge_len)
1109 return;
1110
1111 /* First mark all used pages */
1112 for (i = 0; i < sge_len; i++)
1113 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1114
1115 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1116 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118 /* Here we assume that the last SGE index is the biggest */
1119 prefetch((void *)(fp->sge_mask));
1120 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1121
1122 last_max = RX_SGE(fp->last_max_sge);
1123 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1124 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1125
1126 /* If ring is not full */
1127 if (last_elem + 1 != first_elem)
1128 last_elem++;
1129
1130 /* Now update the prod */
1131 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1132 if (likely(fp->sge_mask[i]))
1133 break;
1134
1135 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1136 delta += RX_SGE_MASK_ELEM_SZ;
1137 }
1138
1139 if (delta > 0) {
1140 fp->rx_sge_prod += delta;
1141 /* clear page-end entries */
1142 bnx2x_clear_sge_mask_next_elems(fp);
1143 }
1144
1145 DP(NETIF_MSG_RX_STATUS,
1146 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1147 fp->last_max_sge, fp->rx_sge_prod);
1148}
1149
1150static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1151{
1152 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1153 memset(fp->sge_mask, 0xff,
1154 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1155
33471629
EG
1156 /* Clear the two last indices in the page to 1:
1157 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1158 hence will never be indicated and should be removed from
1159 the calculations. */
1160 bnx2x_clear_sge_mask_next_elems(fp);
1161}
1162
1163static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1164 struct sk_buff *skb, u16 cons, u16 prod)
1165{
1166 struct bnx2x *bp = fp->bp;
1167 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1168 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1169 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1170 dma_addr_t mapping;
1171
1172 /* move empty skb from pool to prod and map it */
1173 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1174 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1175 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1176 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1177
1178 /* move partial skb from cons to pool (don't unmap yet) */
1179 fp->tpa_pool[queue] = *cons_rx_buf;
1180
1181 /* mark bin state as start - print error if current state != stop */
1182 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1183 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1184
1185 fp->tpa_state[queue] = BNX2X_TPA_START;
1186
1187 /* point prod_bd to new skb */
1188 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1189 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1190
1191#ifdef BNX2X_STOP_ON_ERROR
1192 fp->tpa_queue_used |= (1 << queue);
1193#ifdef __powerpc64__
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1195#else
1196 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1197#endif
1198 fp->tpa_queue_used);
1199#endif
1200}
1201
1202static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1203 struct sk_buff *skb,
1204 struct eth_fast_path_rx_cqe *fp_cqe,
1205 u16 cqe_idx)
1206{
1207 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1208 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1209 u32 i, frag_len, frag_size, pages;
1210 int err;
1211 int j;
1212
1213 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1214 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1215
1216 /* This is needed in order to enable forwarding support */
1217 if (frag_size)
4f40f2cb 1218 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1219 max(frag_size, (u32)len_on_bd));
1220
1221#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1222 if (pages >
1223 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1224 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1225 pages, cqe_idx);
1226 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1227 fp_cqe->pkt_len, len_on_bd);
1228 bnx2x_panic();
1229 return -EINVAL;
1230 }
1231#endif
1232
1233 /* Run through the SGL and compose the fragmented skb */
1234 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1235 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1236
1237 /* FW gives the indices of the SGE as if the ring is an array
1238 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1239 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1240 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1241 old_rx_pg = *rx_pg;
1242
1243 /* If we fail to allocate a substitute page, we simply stop
1244 where we are and drop the whole packet */
1245 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1246 if (unlikely(err)) {
66e855f3 1247 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1248 return err;
1249 }
1250
1251 /* Unmap the page as we r going to pass it to the stack */
1252 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1253 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1254
1255 /* Add one frag and update the appropriate fields in the skb */
1256 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1257
1258 skb->data_len += frag_len;
1259 skb->truesize += frag_len;
1260 skb->len += frag_len;
1261
1262 frag_size -= frag_len;
1263 }
1264
1265 return 0;
1266}
1267
1268static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1269 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1270 u16 cqe_idx)
1271{
1272 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1273 struct sk_buff *skb = rx_buf->skb;
1274 /* alloc new skb */
1275 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1276
1277 /* Unmap skb in the pool anyway, as we are going to change
1278 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1279 fails. */
1280 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1281 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1282
7a9b2557 1283 if (likely(new_skb)) {
66e855f3
YG
1284 /* fix ip xsum and give it to the stack */
1285 /* (no need to map the new skb) */
7a9b2557
VZ
1286
1287 prefetch(skb);
1288 prefetch(((char *)(skb)) + 128);
1289
7a9b2557
VZ
1290#ifdef BNX2X_STOP_ON_ERROR
1291 if (pad + len > bp->rx_buf_size) {
1292 BNX2X_ERR("skb_put is about to fail... "
1293 "pad %d len %d rx_buf_size %d\n",
1294 pad, len, bp->rx_buf_size);
1295 bnx2x_panic();
1296 return;
1297 }
1298#endif
1299
1300 skb_reserve(skb, pad);
1301 skb_put(skb, len);
1302
1303 skb->protocol = eth_type_trans(skb, bp->dev);
1304 skb->ip_summed = CHECKSUM_UNNECESSARY;
1305
1306 {
1307 struct iphdr *iph;
1308
1309 iph = (struct iphdr *)skb->data;
1310 iph->check = 0;
1311 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1312 }
1313
1314 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1315 &cqe->fast_path_cqe, cqe_idx)) {
1316#ifdef BCM_VLAN
1317 if ((bp->vlgrp != NULL) &&
1318 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1319 PARSING_FLAGS_VLAN))
1320 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1321 le16_to_cpu(cqe->fast_path_cqe.
1322 vlan_tag));
1323 else
1324#endif
1325 netif_receive_skb(skb);
1326 } else {
1327 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1328 " - dropping packet!\n");
1329 dev_kfree_skb(skb);
1330 }
1331
7a9b2557
VZ
1332
1333 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb;
1335
1336 } else {
66e855f3 1337 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1338 DP(NETIF_MSG_RX_STATUS,
1339 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1340 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1341 }
1342
1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344}
1345
1346static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347 struct bnx2x_fastpath *fp,
1348 u16 bd_prod, u16 rx_comp_prod,
1349 u16 rx_sge_prod)
1350{
1351 struct tstorm_eth_rx_producers rx_prods = {0};
1352 int i;
1353
1354 /* Update producers */
1355 rx_prods.bd_prod = bd_prod;
1356 rx_prods.cqe_prod = rx_comp_prod;
1357 rx_prods.sge_prod = rx_sge_prod;
1358
58f4c4cf
EG
1359 /*
1360 * Make sure that the BD and SGE data is updated before updating the
1361 * producers since FW might read the BD/SGE right after the producer
1362 * is updated.
1363 * This is only applicable for weak-ordered memory model archs such
1364 * as IA-64. The following barrier is also mandatory since FW will
1365 * assumes BDs must have buffers.
1366 */
1367 wmb();
1368
7a9b2557
VZ
1369 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1370 REG_WR(bp, BAR_TSTRORM_INTMEM +
1371 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1372 ((u32 *)&rx_prods)[i]);
1373
58f4c4cf
EG
1374 mmiowb(); /* keep prod updates ordered */
1375
7a9b2557
VZ
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1378 bd_prod, rx_comp_prod, rx_sge_prod);
1379}
1380
a2fbb9ea
ET
1381static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1382{
1383 struct bnx2x *bp = fp->bp;
34f80b04 1384 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1385 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1386 int rx_pkt = 0;
1387
1388#ifdef BNX2X_STOP_ON_ERROR
1389 if (unlikely(bp->panic))
1390 return 0;
1391#endif
1392
34f80b04
EG
1393 /* CQ "next element" is of the size of the regular element,
1394 that's why it's ok here */
a2fbb9ea
ET
1395 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1396 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1397 hw_comp_cons++;
1398
1399 bd_cons = fp->rx_bd_cons;
1400 bd_prod = fp->rx_bd_prod;
34f80b04 1401 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1402 sw_comp_cons = fp->rx_comp_cons;
1403 sw_comp_prod = fp->rx_comp_prod;
1404
1405 /* Memory barrier necessary as speculative reads of the rx
1406 * buffer can be ahead of the index in the status block
1407 */
1408 rmb();
1409
1410 DP(NETIF_MSG_RX_STATUS,
1411 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1412 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1413
1414 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1415 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1416 struct sk_buff *skb;
1417 union eth_rx_cqe *cqe;
34f80b04
EG
1418 u8 cqe_fp_flags;
1419 u16 len, pad;
a2fbb9ea
ET
1420
1421 comp_ring_cons = RCQ_BD(sw_comp_cons);
1422 bd_prod = RX_BD(bd_prod);
1423 bd_cons = RX_BD(bd_cons);
1424
1425 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1426 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1427
a2fbb9ea 1428 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1429 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1430 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1431 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1432 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1433 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1434
1435 /* is this a slowpath msg? */
34f80b04 1436 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1437 bnx2x_sp_event(fp, cqe);
1438 goto next_cqe;
1439
1440 /* this is an rx packet */
1441 } else {
1442 rx_buf = &fp->rx_buf_ring[bd_cons];
1443 skb = rx_buf->skb;
a2fbb9ea
ET
1444 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1445 pad = cqe->fast_path_cqe.placement_offset;
1446
7a9b2557
VZ
1447 /* If CQE is marked both TPA_START and TPA_END
1448 it is a non-TPA CQE */
1449 if ((!fp->disable_tpa) &&
1450 (TPA_TYPE(cqe_fp_flags) !=
1451 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1452 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1453
1454 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1455 DP(NETIF_MSG_RX_STATUS,
1456 "calling tpa_start on queue %d\n",
1457 queue);
1458
1459 bnx2x_tpa_start(fp, queue, skb,
1460 bd_cons, bd_prod);
1461 goto next_rx;
1462 }
1463
1464 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1465 DP(NETIF_MSG_RX_STATUS,
1466 "calling tpa_stop on queue %d\n",
1467 queue);
1468
1469 if (!BNX2X_RX_SUM_FIX(cqe))
1470 BNX2X_ERR("STOP on none TCP "
1471 "data\n");
1472
1473 /* This is a size of the linear data
1474 on this skb */
1475 len = le16_to_cpu(cqe->fast_path_cqe.
1476 len_on_bd);
1477 bnx2x_tpa_stop(bp, fp, queue, pad,
1478 len, cqe, comp_ring_cons);
1479#ifdef BNX2X_STOP_ON_ERROR
1480 if (bp->panic)
1481 return -EINVAL;
1482#endif
1483
1484 bnx2x_update_sge_prod(fp,
1485 &cqe->fast_path_cqe);
1486 goto next_cqe;
1487 }
1488 }
1489
a2fbb9ea
ET
1490 pci_dma_sync_single_for_device(bp->pdev,
1491 pci_unmap_addr(rx_buf, mapping),
1492 pad + RX_COPY_THRESH,
1493 PCI_DMA_FROMDEVICE);
1494 prefetch(skb);
1495 prefetch(((char *)(skb)) + 128);
1496
1497 /* is this an error packet? */
34f80b04 1498 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1499 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1500 "ERROR flags %x rx packet %u\n",
1501 cqe_fp_flags, sw_comp_cons);
66e855f3 1502 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1503 goto reuse_rx;
1504 }
1505
1506 /* Since we don't have a jumbo ring
1507 * copy small packets if mtu > 1500
1508 */
1509 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1510 (len <= RX_COPY_THRESH)) {
1511 struct sk_buff *new_skb;
1512
1513 new_skb = netdev_alloc_skb(bp->dev,
1514 len + pad);
1515 if (new_skb == NULL) {
1516 DP(NETIF_MSG_RX_ERR,
34f80b04 1517 "ERROR packet dropped "
a2fbb9ea 1518 "because of alloc failure\n");
66e855f3 1519 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1520 goto reuse_rx;
1521 }
1522
1523 /* aligned copy */
1524 skb_copy_from_linear_data_offset(skb, pad,
1525 new_skb->data + pad, len);
1526 skb_reserve(new_skb, pad);
1527 skb_put(new_skb, len);
1528
1529 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1530
1531 skb = new_skb;
1532
1533 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1534 pci_unmap_single(bp->pdev,
1535 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1536 bp->rx_buf_size,
a2fbb9ea
ET
1537 PCI_DMA_FROMDEVICE);
1538 skb_reserve(skb, pad);
1539 skb_put(skb, len);
1540
1541 } else {
1542 DP(NETIF_MSG_RX_ERR,
34f80b04 1543 "ERROR packet dropped because "
a2fbb9ea 1544 "of alloc failure\n");
66e855f3 1545 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1546reuse_rx:
1547 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1548 goto next_rx;
1549 }
1550
1551 skb->protocol = eth_type_trans(skb, bp->dev);
1552
1553 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1554 if (bp->rx_csum) {
1adcd8be
EG
1555 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1556 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1557 else
1558 bp->eth_stats.hw_csum_err++;
1559 }
a2fbb9ea
ET
1560 }
1561
1562#ifdef BCM_VLAN
34f80b04
EG
1563 if ((bp->vlgrp != NULL) &&
1564 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1565 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1566 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1567 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1568 else
1569#endif
34f80b04 1570 netif_receive_skb(skb);
a2fbb9ea 1571
a2fbb9ea
ET
1572
1573next_rx:
1574 rx_buf->skb = NULL;
1575
1576 bd_cons = NEXT_RX_IDX(bd_cons);
1577 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1578 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1579 rx_pkt++;
a2fbb9ea
ET
1580next_cqe:
1581 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1582 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1583
34f80b04 1584 if (rx_pkt == budget)
a2fbb9ea
ET
1585 break;
1586 } /* while */
1587
1588 fp->rx_bd_cons = bd_cons;
34f80b04 1589 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1590 fp->rx_comp_cons = sw_comp_cons;
1591 fp->rx_comp_prod = sw_comp_prod;
1592
7a9b2557
VZ
1593 /* Update producers */
1594 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1595 fp->rx_sge_prod);
a2fbb9ea
ET
1596
1597 fp->rx_pkt += rx_pkt;
1598 fp->rx_calls++;
1599
1600 return rx_pkt;
1601}
1602
1603static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1604{
1605 struct bnx2x_fastpath *fp = fp_cookie;
1606 struct bnx2x *bp = fp->bp;
34f80b04 1607 int index = FP_IDX(fp);
a2fbb9ea 1608
da5a662a
VZ
1609 /* Return here if interrupt is disabled */
1610 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1611 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1612 return IRQ_HANDLED;
1613 }
1614
34f80b04
EG
1615 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1616 index, FP_SB_ID(fp));
1617 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1618
1619#ifdef BNX2X_STOP_ON_ERROR
1620 if (unlikely(bp->panic))
1621 return IRQ_HANDLED;
1622#endif
1623
1624 prefetch(fp->rx_cons_sb);
1625 prefetch(fp->tx_cons_sb);
1626 prefetch(&fp->status_blk->c_status_block.status_block_index);
1627 prefetch(&fp->status_blk->u_status_block.status_block_index);
1628
908a7a16 1629 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1630
a2fbb9ea
ET
1631 return IRQ_HANDLED;
1632}
1633
1634static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1635{
1636 struct net_device *dev = dev_instance;
1637 struct bnx2x *bp = netdev_priv(dev);
1638 u16 status = bnx2x_ack_int(bp);
34f80b04 1639 u16 mask;
a2fbb9ea 1640
34f80b04 1641 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1642 if (unlikely(status == 0)) {
1643 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1644 return IRQ_NONE;
1645 }
34f80b04 1646 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1647
34f80b04 1648 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651 return IRQ_HANDLED;
1652 }
1653
3196a88a
EG
1654#ifdef BNX2X_STOP_ON_ERROR
1655 if (unlikely(bp->panic))
1656 return IRQ_HANDLED;
1657#endif
1658
34f80b04
EG
1659 mask = 0x2 << bp->fp[0].sb_id;
1660 if (status & mask) {
a2fbb9ea
ET
1661 struct bnx2x_fastpath *fp = &bp->fp[0];
1662
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
908a7a16 1668 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1669
34f80b04 1670 status &= ~mask;
a2fbb9ea
ET
1671 }
1672
a2fbb9ea 1673
34f80b04 1674 if (unlikely(status & 0x1)) {
1cf167f2 1675 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1676
1677 status &= ~0x1;
1678 if (!status)
1679 return IRQ_HANDLED;
1680 }
1681
34f80b04
EG
1682 if (status)
1683 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1684 status);
a2fbb9ea 1685
c18487ee 1686 return IRQ_HANDLED;
a2fbb9ea
ET
1687}
1688
c18487ee 1689/* end of fast path */
a2fbb9ea 1690
bb2a0f7a 1691static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1692
c18487ee
YR
1693/* Link */
1694
1695/*
1696 * General service functions
1697 */
a2fbb9ea 1698
4a37fb66 1699static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1700{
1701 u32 lock_status;
1702 u32 resource_bit = (1 << resource);
4a37fb66
YG
1703 int func = BP_FUNC(bp);
1704 u32 hw_lock_control_reg;
c18487ee 1705 int cnt;
a2fbb9ea 1706
c18487ee
YR
1707 /* Validating that the resource is within range */
1708 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1709 DP(NETIF_MSG_HW,
1710 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1711 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1712 return -EINVAL;
1713 }
a2fbb9ea 1714
4a37fb66
YG
1715 if (func <= 5) {
1716 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1717 } else {
1718 hw_lock_control_reg =
1719 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1720 }
1721
c18487ee 1722 /* Validating that the resource is not already taken */
4a37fb66 1723 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1724 if (lock_status & resource_bit) {
1725 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1726 lock_status, resource_bit);
1727 return -EEXIST;
1728 }
a2fbb9ea 1729
46230476
EG
1730 /* Try for 5 second every 5ms */
1731 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1732 /* Try to acquire the lock */
4a37fb66
YG
1733 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1734 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1735 if (lock_status & resource_bit)
1736 return 0;
a2fbb9ea 1737
c18487ee 1738 msleep(5);
a2fbb9ea 1739 }
c18487ee
YR
1740 DP(NETIF_MSG_HW, "Timeout\n");
1741 return -EAGAIN;
1742}
a2fbb9ea 1743
4a37fb66 1744static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1745{
1746 u32 lock_status;
1747 u32 resource_bit = (1 << resource);
4a37fb66
YG
1748 int func = BP_FUNC(bp);
1749 u32 hw_lock_control_reg;
a2fbb9ea 1750
c18487ee
YR
1751 /* Validating that the resource is within range */
1752 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1753 DP(NETIF_MSG_HW,
1754 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1755 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1756 return -EINVAL;
1757 }
1758
4a37fb66
YG
1759 if (func <= 5) {
1760 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1761 } else {
1762 hw_lock_control_reg =
1763 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1764 }
1765
c18487ee 1766 /* Validating that the resource is currently taken */
4a37fb66 1767 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1768 if (!(lock_status & resource_bit)) {
1769 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1770 lock_status, resource_bit);
1771 return -EFAULT;
a2fbb9ea
ET
1772 }
1773
4a37fb66 1774 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1775 return 0;
1776}
1777
1778/* HW Lock for shared dual port PHYs */
4a37fb66 1779static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1780{
1781 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1782
34f80b04 1783 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1784
c18487ee
YR
1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1787 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1788}
a2fbb9ea 1789
4a37fb66 1790static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1791{
1792 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1793
c18487ee
YR
1794 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1795 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1796 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1797
34f80b04 1798 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1799}
a2fbb9ea 1800
17de50b7 1801int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1802{
1803 /* The GPIO should be swapped if swap register is set and active */
1804 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1805 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1806 int gpio_shift = gpio_num +
1807 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1808 u32 gpio_mask = (1 << gpio_shift);
1809 u32 gpio_reg;
a2fbb9ea 1810
c18487ee
YR
1811 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1812 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1813 return -EINVAL;
1814 }
a2fbb9ea 1815
4a37fb66 1816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1817 /* read GPIO and mask except the float bits */
1818 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1819
c18487ee
YR
1820 switch (mode) {
1821 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1822 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1823 gpio_num, gpio_shift);
1824 /* clear FLOAT and set CLR */
1825 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1826 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1827 break;
a2fbb9ea 1828
c18487ee
YR
1829 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1830 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1831 gpio_num, gpio_shift);
1832 /* clear FLOAT and set SET */
1833 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1834 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1835 break;
a2fbb9ea 1836
17de50b7 1837 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1838 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1839 gpio_num, gpio_shift);
1840 /* set FLOAT */
1841 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1842 break;
a2fbb9ea 1843
c18487ee
YR
1844 default:
1845 break;
a2fbb9ea
ET
1846 }
1847
c18487ee 1848 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1849 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1850
c18487ee 1851 return 0;
a2fbb9ea
ET
1852}
1853
c18487ee 1854static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1855{
c18487ee
YR
1856 u32 spio_mask = (1 << spio_num);
1857 u32 spio_reg;
a2fbb9ea 1858
c18487ee
YR
1859 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1860 (spio_num > MISC_REGISTERS_SPIO_7)) {
1861 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1862 return -EINVAL;
a2fbb9ea
ET
1863 }
1864
4a37fb66 1865 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1866 /* read SPIO and mask except the float bits */
1867 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1868
c18487ee 1869 switch (mode) {
6378c025 1870 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1871 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1872 /* clear FLOAT and set CLR */
1873 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1874 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1875 break;
a2fbb9ea 1876
6378c025 1877 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1878 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1879 /* clear FLOAT and set SET */
1880 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1881 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1882 break;
a2fbb9ea 1883
c18487ee
YR
1884 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1885 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1886 /* set FLOAT */
1887 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1888 break;
a2fbb9ea 1889
c18487ee
YR
1890 default:
1891 break;
a2fbb9ea
ET
1892 }
1893
c18487ee 1894 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1895 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1896
a2fbb9ea
ET
1897 return 0;
1898}
1899
c18487ee 1900static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1901{
ad33ea3a
EG
1902 switch (bp->link_vars.ieee_fc &
1903 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1909 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1910 ADVERTISED_Pause);
1911 break;
1912 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1913 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1914 break;
1915 default:
34f80b04 1916 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1917 ADVERTISED_Pause);
1918 break;
1919 }
1920}
f1410647 1921
c18487ee
YR
1922static void bnx2x_link_report(struct bnx2x *bp)
1923{
1924 if (bp->link_vars.link_up) {
1925 if (bp->state == BNX2X_STATE_OPEN)
1926 netif_carrier_on(bp->dev);
1927 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1928
c18487ee 1929 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1930
c18487ee
YR
1931 if (bp->link_vars.duplex == DUPLEX_FULL)
1932 printk("full duplex");
1933 else
1934 printk("half duplex");
f1410647 1935
c0700f90
DM
1936 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1937 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1938 printk(", receive ");
c0700f90 1939 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1940 printk("& transmit ");
1941 } else {
1942 printk(", transmit ");
1943 }
1944 printk("flow control ON");
1945 }
1946 printk("\n");
f1410647 1947
c18487ee
YR
1948 } else { /* link_down */
1949 netif_carrier_off(bp->dev);
1950 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1951 }
c18487ee
YR
1952}
1953
1954static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1955{
19680c48
EG
1956 if (!BP_NOMCP(bp)) {
1957 u8 rc;
a2fbb9ea 1958
19680c48 1959 /* Initialize link parameters structure variables */
8c99e7b0
YR
1960 /* It is recommended to turn off RX FC for jumbo frames
1961 for better performance */
1962 if (IS_E1HMF(bp))
c0700f90 1963 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1964 else if (bp->dev->mtu > 5000)
c0700f90 1965 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1966 else
c0700f90 1967 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1968
4a37fb66 1969 bnx2x_acquire_phy_lock(bp);
19680c48 1970 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1971 bnx2x_release_phy_lock(bp);
a2fbb9ea 1972
3c96c68b
EG
1973 bnx2x_calc_fc_adv(bp);
1974
19680c48
EG
1975 if (bp->link_vars.link_up)
1976 bnx2x_link_report(bp);
a2fbb9ea 1977
34f80b04 1978
19680c48
EG
1979 return rc;
1980 }
1981 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1982 return -EINVAL;
a2fbb9ea
ET
1983}
1984
c18487ee 1985static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1986{
19680c48 1987 if (!BP_NOMCP(bp)) {
4a37fb66 1988 bnx2x_acquire_phy_lock(bp);
19680c48 1989 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1990 bnx2x_release_phy_lock(bp);
a2fbb9ea 1991
19680c48
EG
1992 bnx2x_calc_fc_adv(bp);
1993 } else
1994 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1995}
a2fbb9ea 1996
c18487ee
YR
1997static void bnx2x__link_reset(struct bnx2x *bp)
1998{
19680c48 1999 if (!BP_NOMCP(bp)) {
4a37fb66 2000 bnx2x_acquire_phy_lock(bp);
19680c48 2001 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2002 bnx2x_release_phy_lock(bp);
19680c48
EG
2003 } else
2004 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2005}
a2fbb9ea 2006
c18487ee
YR
2007static u8 bnx2x_link_test(struct bnx2x *bp)
2008{
2009 u8 rc;
a2fbb9ea 2010
4a37fb66 2011 bnx2x_acquire_phy_lock(bp);
c18487ee 2012 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2013 bnx2x_release_phy_lock(bp);
a2fbb9ea 2014
c18487ee
YR
2015 return rc;
2016}
a2fbb9ea 2017
34f80b04
EG
2018/* Calculates the sum of vn_min_rates.
2019 It's needed for further normalizing of the min_rates.
2020
2021 Returns:
2022 sum of vn_min_rates
2023 or
2024 0 - if all the min_rates are 0.
33471629 2025 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2026 If not all min_rates are zero then those that are zeroes will
2027 be set to 1.
2028 */
2029static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2030{
2031 int i, port = BP_PORT(bp);
2032 u32 wsum = 0;
2033 int all_zero = 1;
2034
2035 for (i = 0; i < E1HVN_MAX; i++) {
2036 u32 vn_cfg =
2037 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2038 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2039 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2040 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2041 /* If min rate is zero - set it to 1 */
2042 if (!vn_min_rate)
2043 vn_min_rate = DEF_MIN_RATE;
2044 else
2045 all_zero = 0;
2046
2047 wsum += vn_min_rate;
2048 }
2049 }
2050
2051 /* ... only if all min rates are zeros - disable FAIRNESS */
2052 if (all_zero)
2053 return 0;
2054
2055 return wsum;
2056}
2057
2058static void bnx2x_init_port_minmax(struct bnx2x *bp,
2059 int en_fness,
2060 u16 port_rate,
2061 struct cmng_struct_per_port *m_cmng_port)
2062{
2063 u32 r_param = port_rate / 8;
2064 int port = BP_PORT(bp);
2065 int i;
2066
2067 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2068
2069 /* Enable minmax only if we are in e1hmf mode */
2070 if (IS_E1HMF(bp)) {
2071 u32 fair_periodic_timeout_usec;
2072 u32 t_fair;
2073
2074 /* Enable rate shaping and fairness */
2075 m_cmng_port->flags.cmng_vn_enable = 1;
2076 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2077 m_cmng_port->flags.rate_shaping_enable = 1;
2078
2079 if (!en_fness)
2080 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2081 " fairness will be disabled\n");
2082
2083 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2084 m_cmng_port->rs_vars.rs_periodic_timeout =
2085 RS_PERIODIC_TIMEOUT_USEC / 4;
2086
2087 /* this is the threshold below which no timer arming will occur
2088 1.25 coefficient is for the threshold to be a little bigger
2089 than the real time, to compensate for timer in-accuracy */
2090 m_cmng_port->rs_vars.rs_threshold =
2091 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2092
2093 /* resolution of fairness timer */
2094 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2095 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2096 t_fair = T_FAIR_COEF / port_rate;
2097
2098 /* this is the threshold below which we won't arm
2099 the timer anymore */
2100 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2101
2102 /* we multiply by 1e3/8 to get bytes/msec.
2103 We don't want the credits to pass a credit
2104 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2105 m_cmng_port->fair_vars.upper_bound =
2106 r_param * t_fair * FAIR_MEM;
2107 /* since each tick is 4 usec */
2108 m_cmng_port->fair_vars.fairness_timeout =
2109 fair_periodic_timeout_usec / 4;
2110
2111 } else {
2112 /* Disable rate shaping and fairness */
2113 m_cmng_port->flags.cmng_vn_enable = 0;
2114 m_cmng_port->flags.fairness_enable = 0;
2115 m_cmng_port->flags.rate_shaping_enable = 0;
2116
2117 DP(NETIF_MSG_IFUP,
2118 "Single function mode minmax will be disabled\n");
2119 }
2120
2121 /* Store it to internal memory */
2122 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2123 REG_WR(bp, BAR_XSTRORM_INTMEM +
2124 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2125 ((u32 *)(m_cmng_port))[i]);
2126}
2127
2128static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2129 u32 wsum, u16 port_rate,
2130 struct cmng_struct_per_port *m_cmng_port)
2131{
2132 struct rate_shaping_vars_per_vn m_rs_vn;
2133 struct fairness_vars_per_vn m_fair_vn;
2134 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2135 u16 vn_min_rate, vn_max_rate;
2136 int i;
2137
2138 /* If function is hidden - set min and max to zeroes */
2139 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2140 vn_min_rate = 0;
2141 vn_max_rate = 0;
2142
2143 } else {
2144 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2145 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2146 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2147 if current min rate is zero - set it to 1.
33471629 2148 This is a requirement of the algorithm. */
34f80b04
EG
2149 if ((vn_min_rate == 0) && wsum)
2150 vn_min_rate = DEF_MIN_RATE;
2151 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2152 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2153 }
2154
2155 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2156 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2157
2158 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2159 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2160
2161 /* global vn counter - maximal Mbps for this vn */
2162 m_rs_vn.vn_counter.rate = vn_max_rate;
2163
2164 /* quota - number of bytes transmitted in this period */
2165 m_rs_vn.vn_counter.quota =
2166 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2167
2168#ifdef BNX2X_PER_PROT_QOS
2169 /* per protocol counter */
2170 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2171 /* maximal Mbps for this protocol */
2172 m_rs_vn.protocol_counters[protocol].rate =
2173 protocol_max_rate[protocol];
2174 /* the quota in each timer period -
2175 number of bytes transmitted in this period */
2176 m_rs_vn.protocol_counters[protocol].quota =
2177 (u32)(rs_periodic_timeout_usec *
2178 ((double)m_rs_vn.
2179 protocol_counters[protocol].rate/8));
2180 }
2181#endif
2182
2183 if (wsum) {
2184 /* credit for each period of the fairness algorithm:
2185 number of bytes in T_FAIR (the vn share the port rate).
2186 wsum should not be larger than 10000, thus
2187 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2188 m_fair_vn.vn_credit_delta =
2189 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2190 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2191 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2192 m_fair_vn.vn_credit_delta);
2193 }
2194
2195#ifdef BNX2X_PER_PROT_QOS
2196 do {
2197 u32 protocolWeightSum = 0;
2198
2199 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2200 protocolWeightSum +=
2201 drvInit.protocol_min_rate[protocol];
2202 /* per protocol counter -
2203 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2204 if (protocolWeightSum > 0) {
2205 for (protocol = 0;
2206 protocol < NUM_OF_PROTOCOLS; protocol++)
2207 /* credit for each period of the
2208 fairness algorithm - number of bytes in
2209 T_FAIR (the protocol share the vn rate) */
2210 m_fair_vn.protocol_credit_delta[protocol] =
2211 (u32)((vn_min_rate / 8) * t_fair *
2212 protocol_min_rate / protocolWeightSum);
2213 }
2214 } while (0);
2215#endif
2216
2217 /* Store it to internal memory */
2218 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2219 REG_WR(bp, BAR_XSTRORM_INTMEM +
2220 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2221 ((u32 *)(&m_rs_vn))[i]);
2222
2223 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2224 REG_WR(bp, BAR_XSTRORM_INTMEM +
2225 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2226 ((u32 *)(&m_fair_vn))[i]);
2227}
2228
c18487ee
YR
2229/* This function is called upon link interrupt */
2230static void bnx2x_link_attn(struct bnx2x *bp)
2231{
34f80b04
EG
2232 int vn;
2233
bb2a0f7a
YG
2234 /* Make sure that we are synced with the current statistics */
2235 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2236
c18487ee 2237 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2238
bb2a0f7a
YG
2239 if (bp->link_vars.link_up) {
2240
2241 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2242 struct host_port_stats *pstats;
2243
2244 pstats = bnx2x_sp(bp, port_stats);
2245 /* reset old bmac stats */
2246 memset(&(pstats->mac_stx[0]), 0,
2247 sizeof(struct mac_stx));
2248 }
2249 if ((bp->state == BNX2X_STATE_OPEN) ||
2250 (bp->state == BNX2X_STATE_DISABLED))
2251 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2252 }
2253
c18487ee
YR
2254 /* indicate link status */
2255 bnx2x_link_report(bp);
34f80b04
EG
2256
2257 if (IS_E1HMF(bp)) {
2258 int func;
2259
2260 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2261 if (vn == BP_E1HVN(bp))
2262 continue;
2263
2264 func = ((vn << 1) | BP_PORT(bp));
2265
2266 /* Set the attention towards other drivers
2267 on the same port */
2268 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2269 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2270 }
2271 }
2272
2273 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2274 struct cmng_struct_per_port m_cmng_port;
2275 u32 wsum;
2276 int port = BP_PORT(bp);
2277
2278 /* Init RATE SHAPING and FAIRNESS contexts */
2279 wsum = bnx2x_calc_vn_wsum(bp);
2280 bnx2x_init_port_minmax(bp, (int)wsum,
2281 bp->link_vars.line_speed,
2282 &m_cmng_port);
2283 if (IS_E1HMF(bp))
2284 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2285 bnx2x_init_vn_minmax(bp, 2*vn + port,
2286 wsum, bp->link_vars.line_speed,
2287 &m_cmng_port);
2288 }
c18487ee 2289}
a2fbb9ea 2290
c18487ee
YR
2291static void bnx2x__link_status_update(struct bnx2x *bp)
2292{
2293 if (bp->state != BNX2X_STATE_OPEN)
2294 return;
a2fbb9ea 2295
c18487ee 2296 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2297
bb2a0f7a
YG
2298 if (bp->link_vars.link_up)
2299 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2300 else
2301 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2302
c18487ee
YR
2303 /* indicate link status */
2304 bnx2x_link_report(bp);
a2fbb9ea 2305}
a2fbb9ea 2306
34f80b04
EG
2307static void bnx2x_pmf_update(struct bnx2x *bp)
2308{
2309 int port = BP_PORT(bp);
2310 u32 val;
2311
2312 bp->port.pmf = 1;
2313 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2314
2315 /* enable nig attention */
2316 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2317 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2318 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2319
2320 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2321}
2322
c18487ee 2323/* end of Link */
a2fbb9ea
ET
2324
2325/* slow path */
2326
2327/*
2328 * General service functions
2329 */
2330
2331/* the slow path queue is odd since completions arrive on the fastpath ring */
2332static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2333 u32 data_hi, u32 data_lo, int common)
2334{
34f80b04 2335 int func = BP_FUNC(bp);
a2fbb9ea 2336
34f80b04
EG
2337 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2338 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2339 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2340 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2341 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2342
2343#ifdef BNX2X_STOP_ON_ERROR
2344 if (unlikely(bp->panic))
2345 return -EIO;
2346#endif
2347
34f80b04 2348 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2349
2350 if (!bp->spq_left) {
2351 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2352 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2353 bnx2x_panic();
2354 return -EBUSY;
2355 }
f1410647 2356
a2fbb9ea
ET
2357 /* CID needs port number to be encoded int it */
2358 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2359 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2360 HW_CID(bp, cid)));
2361 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2362 if (common)
2363 bp->spq_prod_bd->hdr.type |=
2364 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2365
2366 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2367 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2368
2369 bp->spq_left--;
2370
2371 if (bp->spq_prod_bd == bp->spq_last_bd) {
2372 bp->spq_prod_bd = bp->spq;
2373 bp->spq_prod_idx = 0;
2374 DP(NETIF_MSG_TIMER, "end of spq\n");
2375
2376 } else {
2377 bp->spq_prod_bd++;
2378 bp->spq_prod_idx++;
2379 }
2380
34f80b04 2381 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2382 bp->spq_prod_idx);
2383
34f80b04 2384 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2385 return 0;
2386}
2387
2388/* acquire split MCP access lock register */
4a37fb66 2389static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2390{
a2fbb9ea 2391 u32 i, j, val;
34f80b04 2392 int rc = 0;
a2fbb9ea
ET
2393
2394 might_sleep();
2395 i = 100;
2396 for (j = 0; j < i*10; j++) {
2397 val = (1UL << 31);
2398 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2399 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2400 if (val & (1L << 31))
2401 break;
2402
2403 msleep(5);
2404 }
a2fbb9ea 2405 if (!(val & (1L << 31))) {
19680c48 2406 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2407 rc = -EBUSY;
2408 }
2409
2410 return rc;
2411}
2412
4a37fb66
YG
2413/* release split MCP access lock register */
2414static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2415{
2416 u32 val = 0;
2417
2418 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2419}
2420
2421static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2422{
2423 struct host_def_status_block *def_sb = bp->def_status_blk;
2424 u16 rc = 0;
2425
2426 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2427 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2428 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2429 rc |= 1;
2430 }
2431 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2432 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2433 rc |= 2;
2434 }
2435 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2436 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2437 rc |= 4;
2438 }
2439 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2440 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2441 rc |= 8;
2442 }
2443 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2444 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2445 rc |= 16;
2446 }
2447 return rc;
2448}
2449
2450/*
2451 * slow path service functions
2452 */
2453
2454static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2455{
34f80b04 2456 int port = BP_PORT(bp);
5c862848
EG
2457 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2458 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2459 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2460 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2461 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2462 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2463 u32 aeu_mask;
a2fbb9ea 2464
a2fbb9ea
ET
2465 if (bp->attn_state & asserted)
2466 BNX2X_ERR("IGU ERROR\n");
2467
3fcaf2e5
EG
2468 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2469 aeu_mask = REG_RD(bp, aeu_addr);
2470
a2fbb9ea 2471 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2472 aeu_mask, asserted);
2473 aeu_mask &= ~(asserted & 0xff);
2474 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2475
3fcaf2e5
EG
2476 REG_WR(bp, aeu_addr, aeu_mask);
2477 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2478
3fcaf2e5 2479 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2480 bp->attn_state |= asserted;
3fcaf2e5 2481 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2482
2483 if (asserted & ATTN_HARD_WIRED_MASK) {
2484 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2485
a5e9a7cf
EG
2486 bnx2x_acquire_phy_lock(bp);
2487
877e9aa4
ET
2488 /* save nig interrupt mask */
2489 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2490 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2491
c18487ee 2492 bnx2x_link_attn(bp);
a2fbb9ea
ET
2493
2494 /* handle unicore attn? */
2495 }
2496 if (asserted & ATTN_SW_TIMER_4_FUNC)
2497 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2498
2499 if (asserted & GPIO_2_FUNC)
2500 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2501
2502 if (asserted & GPIO_3_FUNC)
2503 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2504
2505 if (asserted & GPIO_4_FUNC)
2506 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2507
2508 if (port == 0) {
2509 if (asserted & ATTN_GENERAL_ATTN_1) {
2510 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2511 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2512 }
2513 if (asserted & ATTN_GENERAL_ATTN_2) {
2514 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2515 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2516 }
2517 if (asserted & ATTN_GENERAL_ATTN_3) {
2518 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2519 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2520 }
2521 } else {
2522 if (asserted & ATTN_GENERAL_ATTN_4) {
2523 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2524 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2525 }
2526 if (asserted & ATTN_GENERAL_ATTN_5) {
2527 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2528 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2529 }
2530 if (asserted & ATTN_GENERAL_ATTN_6) {
2531 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2532 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2533 }
2534 }
2535
2536 } /* if hardwired */
2537
5c862848
EG
2538 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2539 asserted, hc_addr);
2540 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2541
2542 /* now set back the mask */
a5e9a7cf 2543 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2544 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2545 bnx2x_release_phy_lock(bp);
2546 }
a2fbb9ea
ET
2547}
2548
877e9aa4 2549static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2550{
34f80b04 2551 int port = BP_PORT(bp);
877e9aa4
ET
2552 int reg_offset;
2553 u32 val;
2554
34f80b04
EG
2555 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2556 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2557
34f80b04 2558 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2559
2560 val = REG_RD(bp, reg_offset);
2561 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2562 REG_WR(bp, reg_offset, val);
2563
2564 BNX2X_ERR("SPIO5 hw attention\n");
2565
34f80b04 2566 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2567 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2568 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2569 /* Fan failure attention */
2570
17de50b7 2571 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2572 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2573 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2574 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2575 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2576 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2577 /* mark the failure */
c18487ee 2578 bp->link_params.ext_phy_config &=
877e9aa4 2579 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2580 bp->link_params.ext_phy_config |=
877e9aa4
ET
2581 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2582 SHMEM_WR(bp,
2583 dev_info.port_hw_config[port].
2584 external_phy_config,
c18487ee 2585 bp->link_params.ext_phy_config);
877e9aa4
ET
2586 /* log the failure */
2587 printk(KERN_ERR PFX "Fan Failure on Network"
2588 " Controller %s has caused the driver to"
2589 " shutdown the card to prevent permanent"
2590 " damage. Please contact Dell Support for"
2591 " assistance\n", bp->dev->name);
2592 break;
2593
2594 default:
2595 break;
2596 }
2597 }
34f80b04
EG
2598
2599 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2600
2601 val = REG_RD(bp, reg_offset);
2602 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2603 REG_WR(bp, reg_offset, val);
2604
2605 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2606 (attn & HW_INTERRUT_ASSERT_SET_0));
2607 bnx2x_panic();
2608 }
877e9aa4
ET
2609}
2610
2611static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2612{
2613 u32 val;
2614
2615 if (attn & BNX2X_DOORQ_ASSERT) {
2616
2617 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2618 BNX2X_ERR("DB hw attention 0x%x\n", val);
2619 /* DORQ discard attention */
2620 if (val & 0x2)
2621 BNX2X_ERR("FATAL error from DORQ\n");
2622 }
34f80b04
EG
2623
2624 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2625
2626 int port = BP_PORT(bp);
2627 int reg_offset;
2628
2629 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2630 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2631
2632 val = REG_RD(bp, reg_offset);
2633 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2634 REG_WR(bp, reg_offset, val);
2635
2636 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2637 (attn & HW_INTERRUT_ASSERT_SET_1));
2638 bnx2x_panic();
2639 }
877e9aa4
ET
2640}
2641
2642static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2643{
2644 u32 val;
2645
2646 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2647
2648 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2649 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2650 /* CFC error attention */
2651 if (val & 0x2)
2652 BNX2X_ERR("FATAL error from CFC\n");
2653 }
2654
2655 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2656
2657 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2658 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2659 /* RQ_USDMDP_FIFO_OVERFLOW */
2660 if (val & 0x18000)
2661 BNX2X_ERR("FATAL error from PXP\n");
2662 }
34f80b04
EG
2663
2664 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2665
2666 int port = BP_PORT(bp);
2667 int reg_offset;
2668
2669 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2670 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2671
2672 val = REG_RD(bp, reg_offset);
2673 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2674 REG_WR(bp, reg_offset, val);
2675
2676 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2677 (attn & HW_INTERRUT_ASSERT_SET_2));
2678 bnx2x_panic();
2679 }
877e9aa4
ET
2680}
2681
2682static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2683{
34f80b04
EG
2684 u32 val;
2685
877e9aa4
ET
2686 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2687
34f80b04
EG
2688 if (attn & BNX2X_PMF_LINK_ASSERT) {
2689 int func = BP_FUNC(bp);
2690
2691 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2692 bnx2x__link_status_update(bp);
2693 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2694 DRV_STATUS_PMF)
2695 bnx2x_pmf_update(bp);
2696
2697 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2698
2699 BNX2X_ERR("MC assert!\n");
2700 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2701 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2702 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2703 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2704 bnx2x_panic();
2705
2706 } else if (attn & BNX2X_MCP_ASSERT) {
2707
2708 BNX2X_ERR("MCP assert!\n");
2709 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2710 bnx2x_fw_dump(bp);
877e9aa4
ET
2711
2712 } else
2713 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2714 }
2715
2716 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2717 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2718 if (attn & BNX2X_GRC_TIMEOUT) {
2719 val = CHIP_IS_E1H(bp) ?
2720 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2721 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2722 }
2723 if (attn & BNX2X_GRC_RSV) {
2724 val = CHIP_IS_E1H(bp) ?
2725 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2726 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2727 }
877e9aa4 2728 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2729 }
2730}
2731
2732static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2733{
a2fbb9ea
ET
2734 struct attn_route attn;
2735 struct attn_route group_mask;
34f80b04 2736 int port = BP_PORT(bp);
877e9aa4 2737 int index;
a2fbb9ea
ET
2738 u32 reg_addr;
2739 u32 val;
3fcaf2e5 2740 u32 aeu_mask;
a2fbb9ea
ET
2741
2742 /* need to take HW lock because MCP or other port might also
2743 try to handle this event */
4a37fb66 2744 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2745
2746 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2747 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2748 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2749 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2750 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2751 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2752
2753 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2754 if (deasserted & (1 << index)) {
2755 group_mask = bp->attn_group[index];
2756
34f80b04
EG
2757 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2758 index, group_mask.sig[0], group_mask.sig[1],
2759 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2760
877e9aa4
ET
2761 bnx2x_attn_int_deasserted3(bp,
2762 attn.sig[3] & group_mask.sig[3]);
2763 bnx2x_attn_int_deasserted1(bp,
2764 attn.sig[1] & group_mask.sig[1]);
2765 bnx2x_attn_int_deasserted2(bp,
2766 attn.sig[2] & group_mask.sig[2]);
2767 bnx2x_attn_int_deasserted0(bp,
2768 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2769
a2fbb9ea
ET
2770 if ((attn.sig[0] & group_mask.sig[0] &
2771 HW_PRTY_ASSERT_SET_0) ||
2772 (attn.sig[1] & group_mask.sig[1] &
2773 HW_PRTY_ASSERT_SET_1) ||
2774 (attn.sig[2] & group_mask.sig[2] &
2775 HW_PRTY_ASSERT_SET_2))
6378c025 2776 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2777 }
2778 }
2779
4a37fb66 2780 bnx2x_release_alr(bp);
a2fbb9ea 2781
5c862848 2782 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2783
2784 val = ~deasserted;
3fcaf2e5
EG
2785 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2786 val, reg_addr);
5c862848 2787 REG_WR(bp, reg_addr, val);
a2fbb9ea 2788
a2fbb9ea 2789 if (~bp->attn_state & deasserted)
3fcaf2e5 2790 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2791
2792 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2793 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2794
3fcaf2e5
EG
2795 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2796 aeu_mask = REG_RD(bp, reg_addr);
2797
2798 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2799 aeu_mask, deasserted);
2800 aeu_mask |= (deasserted & 0xff);
2801 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2802
3fcaf2e5
EG
2803 REG_WR(bp, reg_addr, aeu_mask);
2804 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2805
2806 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2807 bp->attn_state &= ~deasserted;
2808 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2809}
2810
2811static void bnx2x_attn_int(struct bnx2x *bp)
2812{
2813 /* read local copy of bits */
2814 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2815 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2816 u32 attn_state = bp->attn_state;
2817
2818 /* look for changed bits */
2819 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2820 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2821
2822 DP(NETIF_MSG_HW,
2823 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2824 attn_bits, attn_ack, asserted, deasserted);
2825
2826 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2827 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2828
2829 /* handle bits that were raised */
2830 if (asserted)
2831 bnx2x_attn_int_asserted(bp, asserted);
2832
2833 if (deasserted)
2834 bnx2x_attn_int_deasserted(bp, deasserted);
2835}
2836
2837static void bnx2x_sp_task(struct work_struct *work)
2838{
1cf167f2 2839 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2840 u16 status;
2841
34f80b04 2842
a2fbb9ea
ET
2843 /* Return here if interrupt is disabled */
2844 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2845 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2846 return;
2847 }
2848
2849 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2850/* if (status == 0) */
2851/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2852
3196a88a 2853 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2854
877e9aa4
ET
2855 /* HW attentions */
2856 if (status & 0x1)
a2fbb9ea 2857 bnx2x_attn_int(bp);
a2fbb9ea 2858
bb2a0f7a
YG
2859 /* CStorm events: query_stats, port delete ramrod */
2860 if (status & 0x2)
2861 bp->stats_pending = 0;
2862
a2fbb9ea
ET
2863 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2864 IGU_INT_NOP, 1);
2865 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2866 IGU_INT_NOP, 1);
2867 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2868 IGU_INT_NOP, 1);
2869 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2870 IGU_INT_NOP, 1);
2871 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2872 IGU_INT_ENABLE, 1);
877e9aa4 2873
a2fbb9ea
ET
2874}
2875
2876static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2877{
2878 struct net_device *dev = dev_instance;
2879 struct bnx2x *bp = netdev_priv(dev);
2880
2881 /* Return here if interrupt is disabled */
2882 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2883 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2884 return IRQ_HANDLED;
2885 }
2886
877e9aa4 2887 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2888
2889#ifdef BNX2X_STOP_ON_ERROR
2890 if (unlikely(bp->panic))
2891 return IRQ_HANDLED;
2892#endif
2893
1cf167f2 2894 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2895
2896 return IRQ_HANDLED;
2897}
2898
2899/* end of slow path */
2900
2901/* Statistics */
2902
2903/****************************************************************************
2904* Macros
2905****************************************************************************/
2906
a2fbb9ea
ET
2907/* sum[hi:lo] += add[hi:lo] */
2908#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2909 do { \
2910 s_lo += a_lo; \
2911 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2912 } while (0)
2913
2914/* difference = minuend - subtrahend */
2915#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2916 do { \
bb2a0f7a
YG
2917 if (m_lo < s_lo) { \
2918 /* underflow */ \
a2fbb9ea 2919 d_hi = m_hi - s_hi; \
bb2a0f7a 2920 if (d_hi > 0) { \
6378c025 2921 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2922 d_hi--; \
2923 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2924 } else { \
6378c025 2925 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2926 d_hi = 0; \
2927 d_lo = 0; \
2928 } \
bb2a0f7a
YG
2929 } else { \
2930 /* m_lo >= s_lo */ \
a2fbb9ea 2931 if (m_hi < s_hi) { \
bb2a0f7a
YG
2932 d_hi = 0; \
2933 d_lo = 0; \
2934 } else { \
6378c025 2935 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2936 d_hi = m_hi - s_hi; \
2937 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2938 } \
2939 } \
2940 } while (0)
2941
bb2a0f7a 2942#define UPDATE_STAT64(s, t) \
a2fbb9ea 2943 do { \
bb2a0f7a
YG
2944 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2945 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2946 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2947 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2948 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2949 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2950 } while (0)
2951
bb2a0f7a 2952#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2953 do { \
bb2a0f7a
YG
2954 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2955 diff.lo, new->s##_lo, old->s##_lo); \
2956 ADD_64(estats->t##_hi, diff.hi, \
2957 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2958 } while (0)
2959
2960/* sum[hi:lo] += add */
2961#define ADD_EXTEND_64(s_hi, s_lo, a) \
2962 do { \
2963 s_lo += a; \
2964 s_hi += (s_lo < a) ? 1 : 0; \
2965 } while (0)
2966
bb2a0f7a 2967#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2968 do { \
bb2a0f7a
YG
2969 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2970 pstats->mac_stx[1].s##_lo, \
2971 new->s); \
a2fbb9ea
ET
2972 } while (0)
2973
bb2a0f7a 2974#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2975 do { \
2976 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2977 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2978 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2979 } while (0)
2980
2981#define UPDATE_EXTEND_XSTAT(s, t) \
2982 do { \
2983 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2984 old_xclient->s = le32_to_cpu(xclient->s); \
2985 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2986 } while (0)
2987
2988/*
2989 * General service functions
2990 */
2991
2992static inline long bnx2x_hilo(u32 *hiref)
2993{
2994 u32 lo = *(hiref + 1);
2995#if (BITS_PER_LONG == 64)
2996 u32 hi = *hiref;
2997
2998 return HILO_U64(hi, lo);
2999#else
3000 return lo;
3001#endif
3002}
3003
3004/*
3005 * Init service functions
3006 */
3007
bb2a0f7a
YG
3008static void bnx2x_storm_stats_post(struct bnx2x *bp)
3009{
3010 if (!bp->stats_pending) {
3011 struct eth_query_ramrod_data ramrod_data = {0};
3012 int rc;
3013
3014 ramrod_data.drv_counter = bp->stats_counter++;
3015 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3016 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3017
3018 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3019 ((u32 *)&ramrod_data)[1],
3020 ((u32 *)&ramrod_data)[0], 0);
3021 if (rc == 0) {
3022 /* stats ramrod has it's own slot on the spq */
3023 bp->spq_left++;
3024 bp->stats_pending = 1;
3025 }
3026 }
3027}
3028
3029static void bnx2x_stats_init(struct bnx2x *bp)
3030{
3031 int port = BP_PORT(bp);
3032
3033 bp->executer_idx = 0;
3034 bp->stats_counter = 0;
3035
3036 /* port stats */
3037 if (!BP_NOMCP(bp))
3038 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3039 else
3040 bp->port.port_stx = 0;
3041 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3042
3043 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3044 bp->port.old_nig_stats.brb_discard =
3045 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3046 bp->port.old_nig_stats.brb_truncate =
3047 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3048 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3049 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3050 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3051 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3052
3053 /* function stats */
3054 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3055 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3056 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3057 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3058
3059 bp->stats_state = STATS_STATE_DISABLED;
3060 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3061 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3062}
3063
3064static void bnx2x_hw_stats_post(struct bnx2x *bp)
3065{
3066 struct dmae_command *dmae = &bp->stats_dmae;
3067 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3068
3069 *stats_comp = DMAE_COMP_VAL;
3070
3071 /* loader */
3072 if (bp->executer_idx) {
3073 int loader_idx = PMF_DMAE_C(bp);
3074
3075 memset(dmae, 0, sizeof(struct dmae_command));
3076
3077 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3078 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3079 DMAE_CMD_DST_RESET |
3080#ifdef __BIG_ENDIAN
3081 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3082#else
3083 DMAE_CMD_ENDIANITY_DW_SWAP |
3084#endif
3085 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3086 DMAE_CMD_PORT_0) |
3087 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3088 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3089 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3090 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3091 sizeof(struct dmae_command) *
3092 (loader_idx + 1)) >> 2;
3093 dmae->dst_addr_hi = 0;
3094 dmae->len = sizeof(struct dmae_command) >> 2;
3095 if (CHIP_IS_E1(bp))
3096 dmae->len--;
3097 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3098 dmae->comp_addr_hi = 0;
3099 dmae->comp_val = 1;
3100
3101 *stats_comp = 0;
3102 bnx2x_post_dmae(bp, dmae, loader_idx);
3103
3104 } else if (bp->func_stx) {
3105 *stats_comp = 0;
3106 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3107 }
3108}
3109
3110static int bnx2x_stats_comp(struct bnx2x *bp)
3111{
3112 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3113 int cnt = 10;
3114
3115 might_sleep();
3116 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3117 if (!cnt) {
3118 BNX2X_ERR("timeout waiting for stats finished\n");
3119 break;
3120 }
3121 cnt--;
12469401 3122 msleep(1);
bb2a0f7a
YG
3123 }
3124 return 1;
3125}
3126
3127/*
3128 * Statistics service functions
3129 */
3130
3131static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3132{
3133 struct dmae_command *dmae;
3134 u32 opcode;
3135 int loader_idx = PMF_DMAE_C(bp);
3136 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3137
3138 /* sanity */
3139 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3140 BNX2X_ERR("BUG!\n");
3141 return;
3142 }
3143
3144 bp->executer_idx = 0;
3145
3146 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3147 DMAE_CMD_C_ENABLE |
3148 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3149#ifdef __BIG_ENDIAN
3150 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3151#else
3152 DMAE_CMD_ENDIANITY_DW_SWAP |
3153#endif
3154 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3155 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3156
3157 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3158 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3159 dmae->src_addr_lo = bp->port.port_stx >> 2;
3160 dmae->src_addr_hi = 0;
3161 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3162 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3163 dmae->len = DMAE_LEN32_RD_MAX;
3164 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3165 dmae->comp_addr_hi = 0;
3166 dmae->comp_val = 1;
3167
3168 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3169 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3170 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3171 dmae->src_addr_hi = 0;
7a9b2557
VZ
3172 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3173 DMAE_LEN32_RD_MAX * 4);
3174 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3175 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3176 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3177 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3178 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3179 dmae->comp_val = DMAE_COMP_VAL;
3180
3181 *stats_comp = 0;
3182 bnx2x_hw_stats_post(bp);
3183 bnx2x_stats_comp(bp);
3184}
3185
3186static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3187{
3188 struct dmae_command *dmae;
34f80b04 3189 int port = BP_PORT(bp);
bb2a0f7a 3190 int vn = BP_E1HVN(bp);
a2fbb9ea 3191 u32 opcode;
bb2a0f7a 3192 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3193 u32 mac_addr;
bb2a0f7a
YG
3194 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3195
3196 /* sanity */
3197 if (!bp->link_vars.link_up || !bp->port.pmf) {
3198 BNX2X_ERR("BUG!\n");
3199 return;
3200 }
a2fbb9ea
ET
3201
3202 bp->executer_idx = 0;
bb2a0f7a
YG
3203
3204 /* MCP */
3205 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3206 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3207 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3208#ifdef __BIG_ENDIAN
bb2a0f7a 3209 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3210#else
bb2a0f7a 3211 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3212#endif
bb2a0f7a
YG
3213 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3214 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3215
bb2a0f7a 3216 if (bp->port.port_stx) {
a2fbb9ea
ET
3217
3218 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219 dmae->opcode = opcode;
bb2a0f7a
YG
3220 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3221 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3222 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3223 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3224 dmae->len = sizeof(struct host_port_stats) >> 2;
3225 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226 dmae->comp_addr_hi = 0;
3227 dmae->comp_val = 1;
a2fbb9ea
ET
3228 }
3229
bb2a0f7a
YG
3230 if (bp->func_stx) {
3231
3232 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3233 dmae->opcode = opcode;
3234 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3235 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3236 dmae->dst_addr_lo = bp->func_stx >> 2;
3237 dmae->dst_addr_hi = 0;
3238 dmae->len = sizeof(struct host_func_stats) >> 2;
3239 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3240 dmae->comp_addr_hi = 0;
3241 dmae->comp_val = 1;
a2fbb9ea
ET
3242 }
3243
bb2a0f7a 3244 /* MAC */
a2fbb9ea
ET
3245 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3246 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3247 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3248#ifdef __BIG_ENDIAN
3249 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3250#else
3251 DMAE_CMD_ENDIANITY_DW_SWAP |
3252#endif
bb2a0f7a
YG
3253 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3254 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3255
c18487ee 3256 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3257
3258 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3259 NIG_REG_INGRESS_BMAC0_MEM);
3260
3261 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3262 BIGMAC_REGISTER_TX_STAT_GTBYT */
3263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264 dmae->opcode = opcode;
3265 dmae->src_addr_lo = (mac_addr +
3266 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3267 dmae->src_addr_hi = 0;
3268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3269 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3270 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3271 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3272 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3273 dmae->comp_addr_hi = 0;
3274 dmae->comp_val = 1;
3275
3276 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3277 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3278 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3279 dmae->opcode = opcode;
3280 dmae->src_addr_lo = (mac_addr +
3281 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3282 dmae->src_addr_hi = 0;
3283 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3284 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3285 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3286 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3287 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3288 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3289 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3290 dmae->comp_addr_hi = 0;
3291 dmae->comp_val = 1;
3292
c18487ee 3293 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3294
3295 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3296
3297 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3298 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3299 dmae->opcode = opcode;
3300 dmae->src_addr_lo = (mac_addr +
3301 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3302 dmae->src_addr_hi = 0;
3303 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3304 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3305 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3308 dmae->comp_val = 1;
3309
3310 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3311 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312 dmae->opcode = opcode;
3313 dmae->src_addr_lo = (mac_addr +
3314 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3315 dmae->src_addr_hi = 0;
3316 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3317 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3318 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3319 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3320 dmae->len = 1;
3321 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322 dmae->comp_addr_hi = 0;
3323 dmae->comp_val = 1;
3324
3325 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3326 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3327 dmae->opcode = opcode;
3328 dmae->src_addr_lo = (mac_addr +
3329 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3330 dmae->src_addr_hi = 0;
3331 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3332 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3333 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3334 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3335 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3338 dmae->comp_val = 1;
3339 }
3340
3341 /* NIG */
bb2a0f7a
YG
3342 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3343 dmae->opcode = opcode;
3344 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3345 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3346 dmae->src_addr_hi = 0;
3347 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3348 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3349 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3350 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351 dmae->comp_addr_hi = 0;
3352 dmae->comp_val = 1;
3353
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = opcode;
3356 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3357 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3358 dmae->src_addr_hi = 0;
3359 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3360 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3361 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3362 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3363 dmae->len = (2*sizeof(u32)) >> 2;
3364 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3365 dmae->comp_addr_hi = 0;
3366 dmae->comp_val = 1;
3367
a2fbb9ea
ET
3368 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3369 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3370 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3371 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3372#ifdef __BIG_ENDIAN
3373 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3374#else
3375 DMAE_CMD_ENDIANITY_DW_SWAP |
3376#endif
bb2a0f7a
YG
3377 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3378 (vn << DMAE_CMD_E1HVN_SHIFT));
3379 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3380 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3381 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3382 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3383 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3384 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3385 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3386 dmae->len = (2*sizeof(u32)) >> 2;
3387 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3388 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3389 dmae->comp_val = DMAE_COMP_VAL;
3390
3391 *stats_comp = 0;
a2fbb9ea
ET
3392}
3393
bb2a0f7a 3394static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3395{
bb2a0f7a
YG
3396 struct dmae_command *dmae = &bp->stats_dmae;
3397 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3398
bb2a0f7a
YG
3399 /* sanity */
3400 if (!bp->func_stx) {
3401 BNX2X_ERR("BUG!\n");
3402 return;
3403 }
a2fbb9ea 3404
bb2a0f7a
YG
3405 bp->executer_idx = 0;
3406 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3407
bb2a0f7a
YG
3408 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3409 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3410 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3411#ifdef __BIG_ENDIAN
3412 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3413#else
3414 DMAE_CMD_ENDIANITY_DW_SWAP |
3415#endif
3416 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3417 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3418 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3419 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3420 dmae->dst_addr_lo = bp->func_stx >> 2;
3421 dmae->dst_addr_hi = 0;
3422 dmae->len = sizeof(struct host_func_stats) >> 2;
3423 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3424 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3425 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3426
bb2a0f7a
YG
3427 *stats_comp = 0;
3428}
a2fbb9ea 3429
bb2a0f7a
YG
3430static void bnx2x_stats_start(struct bnx2x *bp)
3431{
3432 if (bp->port.pmf)
3433 bnx2x_port_stats_init(bp);
3434
3435 else if (bp->func_stx)
3436 bnx2x_func_stats_init(bp);
3437
3438 bnx2x_hw_stats_post(bp);
3439 bnx2x_storm_stats_post(bp);
3440}
3441
3442static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3443{
3444 bnx2x_stats_comp(bp);
3445 bnx2x_stats_pmf_update(bp);
3446 bnx2x_stats_start(bp);
3447}
3448
3449static void bnx2x_stats_restart(struct bnx2x *bp)
3450{
3451 bnx2x_stats_comp(bp);
3452 bnx2x_stats_start(bp);
3453}
3454
3455static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3456{
3457 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3458 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3459 struct regpair diff;
3460
3461 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3462 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3463 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3464 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3465 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3466 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3467 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3468 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3469 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3470 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3471 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3472 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3473 UPDATE_STAT64(tx_stat_gt127,
3474 tx_stat_etherstatspkts65octetsto127octets);
3475 UPDATE_STAT64(tx_stat_gt255,
3476 tx_stat_etherstatspkts128octetsto255octets);
3477 UPDATE_STAT64(tx_stat_gt511,
3478 tx_stat_etherstatspkts256octetsto511octets);
3479 UPDATE_STAT64(tx_stat_gt1023,
3480 tx_stat_etherstatspkts512octetsto1023octets);
3481 UPDATE_STAT64(tx_stat_gt1518,
3482 tx_stat_etherstatspkts1024octetsto1522octets);
3483 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3484 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3485 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3486 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3487 UPDATE_STAT64(tx_stat_gterr,
3488 tx_stat_dot3statsinternalmactransmiterrors);
3489 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3490}
3491
3492static void bnx2x_emac_stats_update(struct bnx2x *bp)
3493{
3494 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3495 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3496
3497 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3498 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3499 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3500 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3501 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3502 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3503 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3504 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3505 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3506 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3507 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3508 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3509 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3510 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3511 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3512 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3513 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3514 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3516 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3517 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3518 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3519 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3520 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3523 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3524 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3525 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3526 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3527 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3528}
3529
3530static int bnx2x_hw_stats_update(struct bnx2x *bp)
3531{
3532 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3533 struct nig_stats *old = &(bp->port.old_nig_stats);
3534 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3535 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3536 struct regpair diff;
3537
3538 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3539 bnx2x_bmac_stats_update(bp);
3540
3541 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3542 bnx2x_emac_stats_update(bp);
3543
3544 else { /* unreached */
3545 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3546 return -1;
3547 }
a2fbb9ea 3548
bb2a0f7a
YG
3549 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3550 new->brb_discard - old->brb_discard);
66e855f3
YG
3551 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3552 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3553
bb2a0f7a
YG
3554 UPDATE_STAT64_NIG(egress_mac_pkt0,
3555 etherstatspkts1024octetsto1522octets);
3556 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3557
bb2a0f7a 3558 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3559
bb2a0f7a
YG
3560 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3561 sizeof(struct mac_stx));
3562 estats->brb_drop_hi = pstats->brb_drop_hi;
3563 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3564
bb2a0f7a 3565 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3566
bb2a0f7a 3567 return 0;
a2fbb9ea
ET
3568}
3569
bb2a0f7a 3570static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3571{
3572 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3573 int cl_id = BP_CL_ID(bp);
3574 struct tstorm_per_port_stats *tport =
3575 &stats->tstorm_common.port_statistics;
a2fbb9ea 3576 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3577 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3578 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3579 struct xstorm_per_client_stats *xclient =
3580 &stats->xstorm_common.client_statistics[cl_id];
3581 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3582 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3583 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3584 u32 diff;
3585
bb2a0f7a
YG
3586 /* are storm stats valid? */
3587 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3588 bp->stats_counter) {
3589 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3590 " tstorm counter (%d) != stats_counter (%d)\n",
3591 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3592 return -1;
3593 }
bb2a0f7a
YG
3594 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3595 bp->stats_counter) {
3596 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3597 " xstorm counter (%d) != stats_counter (%d)\n",
3598 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3599 return -2;
3600 }
a2fbb9ea 3601
bb2a0f7a
YG
3602 fstats->total_bytes_received_hi =
3603 fstats->valid_bytes_received_hi =
a2fbb9ea 3604 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3605 fstats->total_bytes_received_lo =
3606 fstats->valid_bytes_received_lo =
a2fbb9ea 3607 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3608
3609 estats->error_bytes_received_hi =
3610 le32_to_cpu(tclient->rcv_error_bytes.hi);
3611 estats->error_bytes_received_lo =
3612 le32_to_cpu(tclient->rcv_error_bytes.lo);
3613 ADD_64(estats->error_bytes_received_hi,
3614 estats->rx_stat_ifhcinbadoctets_hi,
3615 estats->error_bytes_received_lo,
3616 estats->rx_stat_ifhcinbadoctets_lo);
3617
3618 ADD_64(fstats->total_bytes_received_hi,
3619 estats->error_bytes_received_hi,
3620 fstats->total_bytes_received_lo,
3621 estats->error_bytes_received_lo);
3622
3623 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3624 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3625 total_multicast_packets_received);
a2fbb9ea 3626 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3627 total_broadcast_packets_received);
3628
3629 fstats->total_bytes_transmitted_hi =
3630 le32_to_cpu(xclient->total_sent_bytes.hi);
3631 fstats->total_bytes_transmitted_lo =
3632 le32_to_cpu(xclient->total_sent_bytes.lo);
3633
3634 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3635 total_unicast_packets_transmitted);
3636 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3637 total_multicast_packets_transmitted);
3638 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3639 total_broadcast_packets_transmitted);
3640
3641 memcpy(estats, &(fstats->total_bytes_received_hi),
3642 sizeof(struct host_func_stats) - 2*sizeof(u32));
3643
3644 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3645 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3646 estats->brb_truncate_discard =
3647 le32_to_cpu(tport->brb_truncate_discard);
3648 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3649
3650 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3651 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3652 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3653 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3654 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3655 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3656 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3657 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3658 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3659 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3660 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3661 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3662 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3663
bb2a0f7a
YG
3664 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3665 old_tclient->packets_too_big_discard =
a2fbb9ea 3666 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3667 estats->no_buff_discard =
3668 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3669 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3670
3671 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3672 old_xclient->unicast_bytes_sent.hi =
3673 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3674 old_xclient->unicast_bytes_sent.lo =
3675 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3676 old_xclient->multicast_bytes_sent.hi =
3677 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3678 old_xclient->multicast_bytes_sent.lo =
3679 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3680 old_xclient->broadcast_bytes_sent.hi =
3681 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3682 old_xclient->broadcast_bytes_sent.lo =
3683 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3684
3685 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3686
3687 return 0;
3688}
3689
bb2a0f7a 3690static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3691{
bb2a0f7a
YG
3692 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3693 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3694 struct net_device_stats *nstats = &bp->dev->stats;
3695
3696 nstats->rx_packets =
3697 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3698 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3699 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3700
3701 nstats->tx_packets =
3702 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3703 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3704 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3705
bb2a0f7a 3706 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3707
0e39e645 3708 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3709
bb2a0f7a
YG
3710 nstats->rx_dropped = old_tclient->checksum_discard +
3711 estats->mac_discard;
a2fbb9ea
ET
3712 nstats->tx_dropped = 0;
3713
3714 nstats->multicast =
3715 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3716
bb2a0f7a
YG
3717 nstats->collisions =
3718 estats->tx_stat_dot3statssinglecollisionframes_lo +
3719 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3720 estats->tx_stat_dot3statslatecollisions_lo +
3721 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3722
bb2a0f7a
YG
3723 estats->jabber_packets_received =
3724 old_tclient->packets_too_big_discard +
3725 estats->rx_stat_dot3statsframestoolong_lo;
3726
3727 nstats->rx_length_errors =
3728 estats->rx_stat_etherstatsundersizepkts_lo +
3729 estats->jabber_packets_received;
66e855f3 3730 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3731 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3732 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3733 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3734 nstats->rx_missed_errors = estats->xxoverflow_discard;
3735
3736 nstats->rx_errors = nstats->rx_length_errors +
3737 nstats->rx_over_errors +
3738 nstats->rx_crc_errors +
3739 nstats->rx_frame_errors +
0e39e645
ET
3740 nstats->rx_fifo_errors +
3741 nstats->rx_missed_errors;
a2fbb9ea 3742
bb2a0f7a
YG
3743 nstats->tx_aborted_errors =
3744 estats->tx_stat_dot3statslatecollisions_lo +
3745 estats->tx_stat_dot3statsexcessivecollisions_lo;
3746 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3747 nstats->tx_fifo_errors = 0;
3748 nstats->tx_heartbeat_errors = 0;
3749 nstats->tx_window_errors = 0;
3750
3751 nstats->tx_errors = nstats->tx_aborted_errors +
3752 nstats->tx_carrier_errors;
a2fbb9ea
ET
3753}
3754
bb2a0f7a 3755static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3756{
bb2a0f7a
YG
3757 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3758 int update = 0;
a2fbb9ea 3759
bb2a0f7a
YG
3760 if (*stats_comp != DMAE_COMP_VAL)
3761 return;
3762
3763 if (bp->port.pmf)
3764 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3765
bb2a0f7a 3766 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3767
bb2a0f7a
YG
3768 if (update)
3769 bnx2x_net_stats_update(bp);
a2fbb9ea 3770
bb2a0f7a
YG
3771 else {
3772 if (bp->stats_pending) {
3773 bp->stats_pending++;
3774 if (bp->stats_pending == 3) {
3775 BNX2X_ERR("stats not updated for 3 times\n");
3776 bnx2x_panic();
3777 return;
3778 }
3779 }
a2fbb9ea
ET
3780 }
3781
3782 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3783 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3784 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3785 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3786 int i;
a2fbb9ea
ET
3787
3788 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3789 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3790 " tx pkt (%lx)\n",
3791 bnx2x_tx_avail(bp->fp),
7a9b2557 3792 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3793 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3794 " rx pkt (%lx)\n",
7a9b2557
VZ
3795 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3796 bp->fp->rx_comp_cons),
3797 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3798 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3799 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3800 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3801 printk(KERN_DEBUG "tstats: checksum_discard %u "
3802 "packets_too_big_discard %u no_buff_discard %u "
3803 "mac_discard %u mac_filter_discard %u "
3804 "xxovrflow_discard %u brb_truncate_discard %u "
3805 "ttl0_discard %u\n",
bb2a0f7a
YG
3806 old_tclient->checksum_discard,
3807 old_tclient->packets_too_big_discard,
3808 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3809 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3810 estats->brb_truncate_discard,
3811 old_tclient->ttl0_discard);
a2fbb9ea
ET
3812
3813 for_each_queue(bp, i) {
3814 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3815 bnx2x_fp(bp, i, tx_pkt),
3816 bnx2x_fp(bp, i, rx_pkt),
3817 bnx2x_fp(bp, i, rx_calls));
3818 }
3819 }
3820
bb2a0f7a
YG
3821 bnx2x_hw_stats_post(bp);
3822 bnx2x_storm_stats_post(bp);
3823}
a2fbb9ea 3824
bb2a0f7a
YG
3825static void bnx2x_port_stats_stop(struct bnx2x *bp)
3826{
3827 struct dmae_command *dmae;
3828 u32 opcode;
3829 int loader_idx = PMF_DMAE_C(bp);
3830 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3831
bb2a0f7a 3832 bp->executer_idx = 0;
a2fbb9ea 3833
bb2a0f7a
YG
3834 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3835 DMAE_CMD_C_ENABLE |
3836 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3837#ifdef __BIG_ENDIAN
bb2a0f7a 3838 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3839#else
bb2a0f7a 3840 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3841#endif
bb2a0f7a
YG
3842 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3843 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3844
3845 if (bp->port.port_stx) {
3846
3847 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3848 if (bp->func_stx)
3849 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3850 else
3851 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3852 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3853 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3854 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3855 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3856 dmae->len = sizeof(struct host_port_stats) >> 2;
3857 if (bp->func_stx) {
3858 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3859 dmae->comp_addr_hi = 0;
3860 dmae->comp_val = 1;
3861 } else {
3862 dmae->comp_addr_lo =
3863 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3864 dmae->comp_addr_hi =
3865 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3866 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3867
bb2a0f7a
YG
3868 *stats_comp = 0;
3869 }
a2fbb9ea
ET
3870 }
3871
bb2a0f7a
YG
3872 if (bp->func_stx) {
3873
3874 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3875 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3876 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3877 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3878 dmae->dst_addr_lo = bp->func_stx >> 2;
3879 dmae->dst_addr_hi = 0;
3880 dmae->len = sizeof(struct host_func_stats) >> 2;
3881 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3882 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3883 dmae->comp_val = DMAE_COMP_VAL;
3884
3885 *stats_comp = 0;
a2fbb9ea 3886 }
bb2a0f7a
YG
3887}
3888
3889static void bnx2x_stats_stop(struct bnx2x *bp)
3890{
3891 int update = 0;
3892
3893 bnx2x_stats_comp(bp);
3894
3895 if (bp->port.pmf)
3896 update = (bnx2x_hw_stats_update(bp) == 0);
3897
3898 update |= (bnx2x_storm_stats_update(bp) == 0);
3899
3900 if (update) {
3901 bnx2x_net_stats_update(bp);
a2fbb9ea 3902
bb2a0f7a
YG
3903 if (bp->port.pmf)
3904 bnx2x_port_stats_stop(bp);
3905
3906 bnx2x_hw_stats_post(bp);
3907 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3908 }
3909}
3910
bb2a0f7a
YG
3911static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3912{
3913}
3914
3915static const struct {
3916 void (*action)(struct bnx2x *bp);
3917 enum bnx2x_stats_state next_state;
3918} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3919/* state event */
3920{
3921/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3922/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3923/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3924/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3925},
3926{
3927/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3928/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3929/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3930/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3931}
3932};
3933
3934static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3935{
3936 enum bnx2x_stats_state state = bp->stats_state;
3937
3938 bnx2x_stats_stm[state][event].action(bp);
3939 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3940
3941 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3942 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3943 state, event, bp->stats_state);
3944}
3945
a2fbb9ea
ET
3946static void bnx2x_timer(unsigned long data)
3947{
3948 struct bnx2x *bp = (struct bnx2x *) data;
3949
3950 if (!netif_running(bp->dev))
3951 return;
3952
3953 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3954 goto timer_restart;
a2fbb9ea
ET
3955
3956 if (poll) {
3957 struct bnx2x_fastpath *fp = &bp->fp[0];
3958 int rc;
3959
3960 bnx2x_tx_int(fp, 1000);
3961 rc = bnx2x_rx_int(fp, 1000);
3962 }
3963
34f80b04
EG
3964 if (!BP_NOMCP(bp)) {
3965 int func = BP_FUNC(bp);
a2fbb9ea
ET
3966 u32 drv_pulse;
3967 u32 mcp_pulse;
3968
3969 ++bp->fw_drv_pulse_wr_seq;
3970 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3971 /* TBD - add SYSTEM_TIME */
3972 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3973 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3974
34f80b04 3975 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3976 MCP_PULSE_SEQ_MASK);
3977 /* The delta between driver pulse and mcp response
3978 * should be 1 (before mcp response) or 0 (after mcp response)
3979 */
3980 if ((drv_pulse != mcp_pulse) &&
3981 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3982 /* someone lost a heartbeat... */
3983 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3984 drv_pulse, mcp_pulse);
3985 }
3986 }
3987
bb2a0f7a
YG
3988 if ((bp->state == BNX2X_STATE_OPEN) ||
3989 (bp->state == BNX2X_STATE_DISABLED))
3990 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3991
f1410647 3992timer_restart:
a2fbb9ea
ET
3993 mod_timer(&bp->timer, jiffies + bp->current_interval);
3994}
3995
3996/* end of Statistics */
3997
3998/* nic init */
3999
4000/*
4001 * nic init service functions
4002 */
4003
34f80b04 4004static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4005{
34f80b04
EG
4006 int port = BP_PORT(bp);
4007
4008 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4009 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4010 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4011 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4012 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4013 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4014}
4015
5c862848
EG
4016static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4017 dma_addr_t mapping, int sb_id)
34f80b04
EG
4018{
4019 int port = BP_PORT(bp);
bb2a0f7a 4020 int func = BP_FUNC(bp);
a2fbb9ea 4021 int index;
34f80b04 4022 u64 section;
a2fbb9ea
ET
4023
4024 /* USTORM */
4025 section = ((u64)mapping) + offsetof(struct host_status_block,
4026 u_status_block);
34f80b04 4027 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4028
4029 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4030 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4031 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4032 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4033 U64_HI(section));
bb2a0f7a
YG
4034 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4035 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4036
4037 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4038 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4039 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4040
4041 /* CSTORM */
4042 section = ((u64)mapping) + offsetof(struct host_status_block,
4043 c_status_block);
34f80b04 4044 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4045
4046 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4047 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4048 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4049 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4050 U64_HI(section));
7a9b2557
VZ
4051 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4052 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4053
4054 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4055 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4056 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4057
4058 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4059}
4060
4061static void bnx2x_zero_def_sb(struct bnx2x *bp)
4062{
4063 int func = BP_FUNC(bp);
a2fbb9ea 4064
34f80b04
EG
4065 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4066 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4067 sizeof(struct ustorm_def_status_block)/4);
4068 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4069 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4070 sizeof(struct cstorm_def_status_block)/4);
4071 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4072 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4073 sizeof(struct xstorm_def_status_block)/4);
4074 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4075 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4076 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4077}
4078
4079static void bnx2x_init_def_sb(struct bnx2x *bp,
4080 struct host_def_status_block *def_sb,
34f80b04 4081 dma_addr_t mapping, int sb_id)
a2fbb9ea 4082{
34f80b04
EG
4083 int port = BP_PORT(bp);
4084 int func = BP_FUNC(bp);
a2fbb9ea
ET
4085 int index, val, reg_offset;
4086 u64 section;
4087
4088 /* ATTN */
4089 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4090 atten_status_block);
34f80b04 4091 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4092
49d66772
ET
4093 bp->attn_state = 0;
4094
a2fbb9ea
ET
4095 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4096 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4097
34f80b04 4098 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4099 bp->attn_group[index].sig[0] = REG_RD(bp,
4100 reg_offset + 0x10*index);
4101 bp->attn_group[index].sig[1] = REG_RD(bp,
4102 reg_offset + 0x4 + 0x10*index);
4103 bp->attn_group[index].sig[2] = REG_RD(bp,
4104 reg_offset + 0x8 + 0x10*index);
4105 bp->attn_group[index].sig[3] = REG_RD(bp,
4106 reg_offset + 0xc + 0x10*index);
4107 }
4108
a2fbb9ea
ET
4109 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4110 HC_REG_ATTN_MSG0_ADDR_L);
4111
4112 REG_WR(bp, reg_offset, U64_LO(section));
4113 REG_WR(bp, reg_offset + 4, U64_HI(section));
4114
4115 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4116
4117 val = REG_RD(bp, reg_offset);
34f80b04 4118 val |= sb_id;
a2fbb9ea
ET
4119 REG_WR(bp, reg_offset, val);
4120
4121 /* USTORM */
4122 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4123 u_def_status_block);
34f80b04 4124 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4125
4126 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4127 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4128 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4129 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4130 U64_HI(section));
5c862848 4131 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4132 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4133
4134 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4135 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4136 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4137
4138 /* CSTORM */
4139 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4140 c_def_status_block);
34f80b04 4141 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4142
4143 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4144 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4145 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4146 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4147 U64_HI(section));
5c862848 4148 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4149 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4150
4151 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4152 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4153 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4154
4155 /* TSTORM */
4156 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4157 t_def_status_block);
34f80b04 4158 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4159
4160 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4161 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4162 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4163 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4164 U64_HI(section));
5c862848 4165 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4166 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4167
4168 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4169 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4170 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4171
4172 /* XSTORM */
4173 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4174 x_def_status_block);
34f80b04 4175 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4176
4177 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4178 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4179 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4180 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4181 U64_HI(section));
5c862848 4182 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4183 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4184
4185 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4186 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4187 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4188
bb2a0f7a 4189 bp->stats_pending = 0;
66e855f3 4190 bp->set_mac_pending = 0;
bb2a0f7a 4191
34f80b04 4192 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4193}
4194
4195static void bnx2x_update_coalesce(struct bnx2x *bp)
4196{
34f80b04 4197 int port = BP_PORT(bp);
a2fbb9ea
ET
4198 int i;
4199
4200 for_each_queue(bp, i) {
34f80b04 4201 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4202
4203 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4204 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4205 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4206 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4207 bp->rx_ticks/12);
a2fbb9ea 4208 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4209 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4210 U_SB_ETH_RX_CQ_INDEX),
4211 bp->rx_ticks ? 0 : 1);
4212 REG_WR16(bp, BAR_USTRORM_INTMEM +
4213 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4214 U_SB_ETH_RX_BD_INDEX),
34f80b04 4215 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4216
4217 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4218 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4219 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4220 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4221 bp->tx_ticks/12);
a2fbb9ea 4222 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4223 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4224 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4225 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4226 }
4227}
4228
7a9b2557
VZ
4229static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4230 struct bnx2x_fastpath *fp, int last)
4231{
4232 int i;
4233
4234 for (i = 0; i < last; i++) {
4235 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4236 struct sk_buff *skb = rx_buf->skb;
4237
4238 if (skb == NULL) {
4239 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4240 continue;
4241 }
4242
4243 if (fp->tpa_state[i] == BNX2X_TPA_START)
4244 pci_unmap_single(bp->pdev,
4245 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4246 bp->rx_buf_size,
7a9b2557
VZ
4247 PCI_DMA_FROMDEVICE);
4248
4249 dev_kfree_skb(skb);
4250 rx_buf->skb = NULL;
4251 }
4252}
4253
a2fbb9ea
ET
4254static void bnx2x_init_rx_rings(struct bnx2x *bp)
4255{
7a9b2557 4256 int func = BP_FUNC(bp);
32626230
EG
4257 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4258 ETH_MAX_AGGREGATION_QUEUES_E1H;
4259 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4260 int i, j;
a2fbb9ea 4261
437cf2f1
EG
4262 bp->rx_buf_size = bp->dev->mtu;
4263 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4264 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4265
7a9b2557
VZ
4266 if (bp->flags & TPA_ENABLE_FLAG) {
4267 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4268 "rx_buf_size %d effective_mtu %d\n",
4269 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4270
4271 for_each_queue(bp, j) {
32626230 4272 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4273
32626230 4274 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4275 fp->tpa_pool[i].skb =
4276 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4277 if (!fp->tpa_pool[i].skb) {
4278 BNX2X_ERR("Failed to allocate TPA "
4279 "skb pool for queue[%d] - "
4280 "disabling TPA on this "
4281 "queue!\n", j);
4282 bnx2x_free_tpa_pool(bp, fp, i);
4283 fp->disable_tpa = 1;
4284 break;
4285 }
4286 pci_unmap_addr_set((struct sw_rx_bd *)
4287 &bp->fp->tpa_pool[i],
4288 mapping, 0);
4289 fp->tpa_state[i] = BNX2X_TPA_STOP;
4290 }
4291 }
4292 }
4293
a2fbb9ea
ET
4294 for_each_queue(bp, j) {
4295 struct bnx2x_fastpath *fp = &bp->fp[j];
4296
4297 fp->rx_bd_cons = 0;
4298 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4299 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4300
4301 /* "next page" elements initialization */
4302 /* SGE ring */
4303 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4304 struct eth_rx_sge *sge;
4305
4306 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4307 sge->addr_hi =
4308 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4309 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4310 sge->addr_lo =
4311 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4312 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4313 }
4314
4315 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4316
7a9b2557 4317 /* RX BD ring */
a2fbb9ea
ET
4318 for (i = 1; i <= NUM_RX_RINGS; i++) {
4319 struct eth_rx_bd *rx_bd;
4320
4321 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4322 rx_bd->addr_hi =
4323 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4324 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4325 rx_bd->addr_lo =
4326 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4327 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4328 }
4329
34f80b04 4330 /* CQ ring */
a2fbb9ea
ET
4331 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4332 struct eth_rx_cqe_next_page *nextpg;
4333
4334 nextpg = (struct eth_rx_cqe_next_page *)
4335 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4336 nextpg->addr_hi =
4337 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4338 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4339 nextpg->addr_lo =
4340 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4341 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4342 }
4343
7a9b2557
VZ
4344 /* Allocate SGEs and initialize the ring elements */
4345 for (i = 0, ring_prod = 0;
4346 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4347
7a9b2557
VZ
4348 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4349 BNX2X_ERR("was only able to allocate "
4350 "%d rx sges\n", i);
4351 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4352 /* Cleanup already allocated elements */
4353 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4354 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4355 fp->disable_tpa = 1;
4356 ring_prod = 0;
4357 break;
4358 }
4359 ring_prod = NEXT_SGE_IDX(ring_prod);
4360 }
4361 fp->rx_sge_prod = ring_prod;
4362
4363 /* Allocate BDs and initialize BD ring */
66e855f3 4364 fp->rx_comp_cons = 0;
7a9b2557 4365 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4366 for (i = 0; i < bp->rx_ring_size; i++) {
4367 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4368 BNX2X_ERR("was only able to allocate "
4369 "%d rx skbs\n", i);
66e855f3 4370 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4371 break;
4372 }
4373 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4374 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4375 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4376 }
4377
7a9b2557
VZ
4378 fp->rx_bd_prod = ring_prod;
4379 /* must not have more available CQEs than BDs */
4380 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4381 cqe_ring_prod);
a2fbb9ea
ET
4382 fp->rx_pkt = fp->rx_calls = 0;
4383
7a9b2557
VZ
4384 /* Warning!
4385 * this will generate an interrupt (to the TSTORM)
4386 * must only be done after chip is initialized
4387 */
4388 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4389 fp->rx_sge_prod);
a2fbb9ea
ET
4390 if (j != 0)
4391 continue;
4392
4393 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4394 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4395 U64_LO(fp->rx_comp_mapping));
4396 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4397 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4398 U64_HI(fp->rx_comp_mapping));
4399 }
4400}
4401
4402static void bnx2x_init_tx_ring(struct bnx2x *bp)
4403{
4404 int i, j;
4405
4406 for_each_queue(bp, j) {
4407 struct bnx2x_fastpath *fp = &bp->fp[j];
4408
4409 for (i = 1; i <= NUM_TX_RINGS; i++) {
4410 struct eth_tx_bd *tx_bd =
4411 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4412
4413 tx_bd->addr_hi =
4414 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4415 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4416 tx_bd->addr_lo =
4417 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4418 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4419 }
4420
4421 fp->tx_pkt_prod = 0;
4422 fp->tx_pkt_cons = 0;
4423 fp->tx_bd_prod = 0;
4424 fp->tx_bd_cons = 0;
4425 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4426 fp->tx_pkt = 0;
4427 }
4428}
4429
4430static void bnx2x_init_sp_ring(struct bnx2x *bp)
4431{
34f80b04 4432 int func = BP_FUNC(bp);
a2fbb9ea
ET
4433
4434 spin_lock_init(&bp->spq_lock);
4435
4436 bp->spq_left = MAX_SPQ_PENDING;
4437 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4438 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4439 bp->spq_prod_bd = bp->spq;
4440 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4441
34f80b04 4442 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4443 U64_LO(bp->spq_mapping));
34f80b04
EG
4444 REG_WR(bp,
4445 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4446 U64_HI(bp->spq_mapping));
4447
34f80b04 4448 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4449 bp->spq_prod_idx);
4450}
4451
4452static void bnx2x_init_context(struct bnx2x *bp)
4453{
4454 int i;
4455
4456 for_each_queue(bp, i) {
4457 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4458 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4459 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4460
4461 context->xstorm_st_context.tx_bd_page_base_hi =
4462 U64_HI(fp->tx_desc_mapping);
4463 context->xstorm_st_context.tx_bd_page_base_lo =
4464 U64_LO(fp->tx_desc_mapping);
4465 context->xstorm_st_context.db_data_addr_hi =
4466 U64_HI(fp->tx_prods_mapping);
4467 context->xstorm_st_context.db_data_addr_lo =
4468 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4469 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4470 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4471
4472 context->ustorm_st_context.common.sb_index_numbers =
4473 BNX2X_RX_SB_INDEX_NUM;
4474 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4475 context->ustorm_st_context.common.status_block_id = sb_id;
4476 context->ustorm_st_context.common.flags =
4477 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4478 context->ustorm_st_context.common.mc_alignment_size =
4479 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4480 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4481 bp->rx_buf_size;
34f80b04 4482 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4483 U64_HI(fp->rx_desc_mapping);
34f80b04 4484 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4485 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4486 if (!fp->disable_tpa) {
4487 context->ustorm_st_context.common.flags |=
4488 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4489 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4490 context->ustorm_st_context.common.sge_buff_size =
4491 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4492 context->ustorm_st_context.common.sge_page_base_hi =
4493 U64_HI(fp->rx_sge_mapping);
4494 context->ustorm_st_context.common.sge_page_base_lo =
4495 U64_LO(fp->rx_sge_mapping);
4496 }
4497
a2fbb9ea 4498 context->cstorm_st_context.sb_index_number =
5c862848 4499 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4500 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4501
4502 context->xstorm_ag_context.cdu_reserved =
4503 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4504 CDU_REGION_NUMBER_XCM_AG,
4505 ETH_CONNECTION_TYPE);
4506 context->ustorm_ag_context.cdu_usage =
4507 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4508 CDU_REGION_NUMBER_UCM_AG,
4509 ETH_CONNECTION_TYPE);
4510 }
4511}
4512
4513static void bnx2x_init_ind_table(struct bnx2x *bp)
4514{
34f80b04 4515 int port = BP_PORT(bp);
a2fbb9ea
ET
4516 int i;
4517
4518 if (!is_multi(bp))
4519 return;
4520
34f80b04 4521 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4522 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4523 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4524 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4525 i % bp->num_queues);
4526
4527 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4528}
4529
49d66772
ET
4530static void bnx2x_set_client_config(struct bnx2x *bp)
4531{
49d66772 4532 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4533 int port = BP_PORT(bp);
4534 int i;
49d66772 4535
34f80b04 4536 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4537 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4538 tstorm_client.config_flags =
4539 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4540#ifdef BCM_VLAN
34f80b04 4541 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4542 tstorm_client.config_flags |=
4543 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4544 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4545 }
4546#endif
49d66772 4547
7a9b2557
VZ
4548 if (bp->flags & TPA_ENABLE_FLAG) {
4549 tstorm_client.max_sges_for_packet =
4f40f2cb 4550 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4551 tstorm_client.max_sges_for_packet =
4552 ((tstorm_client.max_sges_for_packet +
4553 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4554 PAGES_PER_SGE_SHIFT;
4555
4556 tstorm_client.config_flags |=
4557 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4558 }
4559
49d66772
ET
4560 for_each_queue(bp, i) {
4561 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4562 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4563 ((u32 *)&tstorm_client)[0]);
4564 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4565 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4566 ((u32 *)&tstorm_client)[1]);
4567 }
4568
34f80b04
EG
4569 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4570 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4571}
4572
a2fbb9ea
ET
4573static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4574{
a2fbb9ea 4575 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4576 int mode = bp->rx_mode;
4577 int mask = (1 << BP_L_ID(bp));
4578 int func = BP_FUNC(bp);
a2fbb9ea
ET
4579 int i;
4580
3196a88a 4581 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4582
4583 switch (mode) {
4584 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4585 tstorm_mac_filter.ucast_drop_all = mask;
4586 tstorm_mac_filter.mcast_drop_all = mask;
4587 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4588 break;
4589 case BNX2X_RX_MODE_NORMAL:
34f80b04 4590 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4591 break;
4592 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4593 tstorm_mac_filter.mcast_accept_all = mask;
4594 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4595 break;
4596 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4597 tstorm_mac_filter.ucast_accept_all = mask;
4598 tstorm_mac_filter.mcast_accept_all = mask;
4599 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4600 break;
4601 default:
34f80b04
EG
4602 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4603 break;
a2fbb9ea
ET
4604 }
4605
4606 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4607 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4608 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4609 ((u32 *)&tstorm_mac_filter)[i]);
4610
34f80b04 4611/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4612 ((u32 *)&tstorm_mac_filter)[i]); */
4613 }
a2fbb9ea 4614
49d66772
ET
4615 if (mode != BNX2X_RX_MODE_NONE)
4616 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4617}
4618
471de716
EG
4619static void bnx2x_init_internal_common(struct bnx2x *bp)
4620{
4621 int i;
4622
3cdf1db7
YG
4623 if (bp->flags & TPA_ENABLE_FLAG) {
4624 struct tstorm_eth_tpa_exist tpa = {0};
4625
4626 tpa.tpa_exist = 1;
4627
4628 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4629 ((u32 *)&tpa)[0]);
4630 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4631 ((u32 *)&tpa)[1]);
4632 }
4633
471de716
EG
4634 /* Zero this manually as its initialization is
4635 currently missing in the initTool */
4636 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4637 REG_WR(bp, BAR_USTRORM_INTMEM +
4638 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4639}
4640
4641static void bnx2x_init_internal_port(struct bnx2x *bp)
4642{
4643 int port = BP_PORT(bp);
4644
4645 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4646 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4647 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4648 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4649}
4650
4651static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4652{
a2fbb9ea
ET
4653 struct tstorm_eth_function_common_config tstorm_config = {0};
4654 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4655 int port = BP_PORT(bp);
4656 int func = BP_FUNC(bp);
4657 int i;
471de716 4658 u16 max_agg_size;
a2fbb9ea
ET
4659
4660 if (is_multi(bp)) {
4661 tstorm_config.config_flags = MULTI_FLAGS;
4662 tstorm_config.rss_result_mask = MULTI_MASK;
4663 }
4664
34f80b04
EG
4665 tstorm_config.leading_client_id = BP_L_ID(bp);
4666
a2fbb9ea 4667 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4668 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4669 (*(u32 *)&tstorm_config));
4670
c14423fe 4671 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4672 bnx2x_set_storm_rx_mode(bp);
4673
66e855f3
YG
4674 /* reset xstorm per client statistics */
4675 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4676 REG_WR(bp, BAR_XSTRORM_INTMEM +
4677 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4678 i*4, 0);
4679 }
4680 /* reset tstorm per client statistics */
4681 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4682 REG_WR(bp, BAR_TSTRORM_INTMEM +
4683 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4684 i*4, 0);
4685 }
4686
4687 /* Init statistics related context */
34f80b04 4688 stats_flags.collect_eth = 1;
a2fbb9ea 4689
66e855f3 4690 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4691 ((u32 *)&stats_flags)[0]);
66e855f3 4692 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4693 ((u32 *)&stats_flags)[1]);
4694
66e855f3 4695 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4696 ((u32 *)&stats_flags)[0]);
66e855f3 4697 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4698 ((u32 *)&stats_flags)[1]);
4699
66e855f3 4700 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4701 ((u32 *)&stats_flags)[0]);
66e855f3 4702 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4703 ((u32 *)&stats_flags)[1]);
4704
66e855f3
YG
4705 REG_WR(bp, BAR_XSTRORM_INTMEM +
4706 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4707 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4708 REG_WR(bp, BAR_XSTRORM_INTMEM +
4709 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4710 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4711
4712 REG_WR(bp, BAR_TSTRORM_INTMEM +
4713 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4714 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4715 REG_WR(bp, BAR_TSTRORM_INTMEM +
4716 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4717 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4718
4719 if (CHIP_IS_E1H(bp)) {
4720 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4721 IS_E1HMF(bp));
4722 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4723 IS_E1HMF(bp));
4724 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4725 IS_E1HMF(bp));
4726 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4727 IS_E1HMF(bp));
4728
7a9b2557
VZ
4729 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4730 bp->e1hov);
34f80b04
EG
4731 }
4732
4f40f2cb
EG
4733 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4734 max_agg_size =
4735 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4736 SGE_PAGE_SIZE * PAGES_PER_SGE),
4737 (u32)0xffff);
7a9b2557
VZ
4738 for_each_queue(bp, i) {
4739 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4740
4741 REG_WR(bp, BAR_USTRORM_INTMEM +
4742 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4743 U64_LO(fp->rx_comp_mapping));
4744 REG_WR(bp, BAR_USTRORM_INTMEM +
4745 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4746 U64_HI(fp->rx_comp_mapping));
4747
7a9b2557
VZ
4748 REG_WR16(bp, BAR_USTRORM_INTMEM +
4749 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4750 max_agg_size);
4751 }
a2fbb9ea
ET
4752}
4753
471de716
EG
4754static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4755{
4756 switch (load_code) {
4757 case FW_MSG_CODE_DRV_LOAD_COMMON:
4758 bnx2x_init_internal_common(bp);
4759 /* no break */
4760
4761 case FW_MSG_CODE_DRV_LOAD_PORT:
4762 bnx2x_init_internal_port(bp);
4763 /* no break */
4764
4765 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4766 bnx2x_init_internal_func(bp);
4767 break;
4768
4769 default:
4770 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4771 break;
4772 }
4773}
4774
4775static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4776{
4777 int i;
4778
4779 for_each_queue(bp, i) {
4780 struct bnx2x_fastpath *fp = &bp->fp[i];
4781
34f80b04 4782 fp->bp = bp;
a2fbb9ea 4783 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4784 fp->index = i;
34f80b04
EG
4785 fp->cl_id = BP_L_ID(bp) + i;
4786 fp->sb_id = fp->cl_id;
4787 DP(NETIF_MSG_IFUP,
4788 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4789 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4790 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4791 FP_SB_ID(fp));
4792 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4793 }
4794
5c862848
EG
4795 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4796 DEF_SB_ID);
4797 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4798 bnx2x_update_coalesce(bp);
4799 bnx2x_init_rx_rings(bp);
4800 bnx2x_init_tx_ring(bp);
4801 bnx2x_init_sp_ring(bp);
4802 bnx2x_init_context(bp);
471de716 4803 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4804 bnx2x_init_ind_table(bp);
615f8fd9 4805 bnx2x_int_enable(bp);
a2fbb9ea
ET
4806}
4807
4808/* end of nic init */
4809
4810/*
4811 * gzip service functions
4812 */
4813
4814static int bnx2x_gunzip_init(struct bnx2x *bp)
4815{
4816 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4817 &bp->gunzip_mapping);
4818 if (bp->gunzip_buf == NULL)
4819 goto gunzip_nomem1;
4820
4821 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4822 if (bp->strm == NULL)
4823 goto gunzip_nomem2;
4824
4825 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4826 GFP_KERNEL);
4827 if (bp->strm->workspace == NULL)
4828 goto gunzip_nomem3;
4829
4830 return 0;
4831
4832gunzip_nomem3:
4833 kfree(bp->strm);
4834 bp->strm = NULL;
4835
4836gunzip_nomem2:
4837 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4838 bp->gunzip_mapping);
4839 bp->gunzip_buf = NULL;
4840
4841gunzip_nomem1:
4842 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4843 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4844 return -ENOMEM;
4845}
4846
4847static void bnx2x_gunzip_end(struct bnx2x *bp)
4848{
4849 kfree(bp->strm->workspace);
4850
4851 kfree(bp->strm);
4852 bp->strm = NULL;
4853
4854 if (bp->gunzip_buf) {
4855 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4856 bp->gunzip_mapping);
4857 bp->gunzip_buf = NULL;
4858 }
4859}
4860
4861static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4862{
4863 int n, rc;
4864
4865 /* check gzip header */
4866 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4867 return -EINVAL;
4868
4869 n = 10;
4870
34f80b04 4871#define FNAME 0x8
a2fbb9ea
ET
4872
4873 if (zbuf[3] & FNAME)
4874 while ((zbuf[n++] != 0) && (n < len));
4875
4876 bp->strm->next_in = zbuf + n;
4877 bp->strm->avail_in = len - n;
4878 bp->strm->next_out = bp->gunzip_buf;
4879 bp->strm->avail_out = FW_BUF_SIZE;
4880
4881 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4882 if (rc != Z_OK)
4883 return rc;
4884
4885 rc = zlib_inflate(bp->strm, Z_FINISH);
4886 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4887 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4888 bp->dev->name, bp->strm->msg);
4889
4890 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4891 if (bp->gunzip_outlen & 0x3)
4892 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4893 " gunzip_outlen (%d) not aligned\n",
4894 bp->dev->name, bp->gunzip_outlen);
4895 bp->gunzip_outlen >>= 2;
4896
4897 zlib_inflateEnd(bp->strm);
4898
4899 if (rc == Z_STREAM_END)
4900 return 0;
4901
4902 return rc;
4903}
4904
4905/* nic load/unload */
4906
4907/*
34f80b04 4908 * General service functions
a2fbb9ea
ET
4909 */
4910
4911/* send a NIG loopback debug packet */
4912static void bnx2x_lb_pckt(struct bnx2x *bp)
4913{
a2fbb9ea 4914 u32 wb_write[3];
a2fbb9ea
ET
4915
4916 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4917 wb_write[0] = 0x55555555;
4918 wb_write[1] = 0x55555555;
34f80b04 4919 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4920 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4921
4922 /* NON-IP protocol */
a2fbb9ea
ET
4923 wb_write[0] = 0x09000000;
4924 wb_write[1] = 0x55555555;
34f80b04 4925 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4926 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4927}
4928
4929/* some of the internal memories
4930 * are not directly readable from the driver
4931 * to test them we send debug packets
4932 */
4933static int bnx2x_int_mem_test(struct bnx2x *bp)
4934{
4935 int factor;
4936 int count, i;
4937 u32 val = 0;
4938
ad8d3948 4939 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4940 factor = 120;
ad8d3948
EG
4941 else if (CHIP_REV_IS_EMUL(bp))
4942 factor = 200;
4943 else
a2fbb9ea 4944 factor = 1;
a2fbb9ea
ET
4945
4946 DP(NETIF_MSG_HW, "start part1\n");
4947
4948 /* Disable inputs of parser neighbor blocks */
4949 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4950 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4951 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4952 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4953
4954 /* Write 0 to parser credits for CFC search request */
4955 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4956
4957 /* send Ethernet packet */
4958 bnx2x_lb_pckt(bp);
4959
4960 /* TODO do i reset NIG statistic? */
4961 /* Wait until NIG register shows 1 packet of size 0x10 */
4962 count = 1000 * factor;
4963 while (count) {
34f80b04 4964
a2fbb9ea
ET
4965 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4966 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4967 if (val == 0x10)
4968 break;
4969
4970 msleep(10);
4971 count--;
4972 }
4973 if (val != 0x10) {
4974 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4975 return -1;
4976 }
4977
4978 /* Wait until PRS register shows 1 packet */
4979 count = 1000 * factor;
4980 while (count) {
4981 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4982 if (val == 1)
4983 break;
4984
4985 msleep(10);
4986 count--;
4987 }
4988 if (val != 0x1) {
4989 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4990 return -2;
4991 }
4992
4993 /* Reset and init BRB, PRS */
34f80b04 4994 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4995 msleep(50);
34f80b04 4996 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4997 msleep(50);
4998 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4999 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5000
5001 DP(NETIF_MSG_HW, "part2\n");
5002
5003 /* Disable inputs of parser neighbor blocks */
5004 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5005 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5006 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5007 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5008
5009 /* Write 0 to parser credits for CFC search request */
5010 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5011
5012 /* send 10 Ethernet packets */
5013 for (i = 0; i < 10; i++)
5014 bnx2x_lb_pckt(bp);
5015
5016 /* Wait until NIG register shows 10 + 1
5017 packets of size 11*0x10 = 0xb0 */
5018 count = 1000 * factor;
5019 while (count) {
34f80b04 5020
a2fbb9ea
ET
5021 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5022 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5023 if (val == 0xb0)
5024 break;
5025
5026 msleep(10);
5027 count--;
5028 }
5029 if (val != 0xb0) {
5030 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5031 return -3;
5032 }
5033
5034 /* Wait until PRS register shows 2 packets */
5035 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5036 if (val != 2)
5037 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5038
5039 /* Write 1 to parser credits for CFC search request */
5040 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5041
5042 /* Wait until PRS register shows 3 packets */
5043 msleep(10 * factor);
5044 /* Wait until NIG register shows 1 packet of size 0x10 */
5045 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5046 if (val != 3)
5047 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5048
5049 /* clear NIG EOP FIFO */
5050 for (i = 0; i < 11; i++)
5051 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5052 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5053 if (val != 1) {
5054 BNX2X_ERR("clear of NIG failed\n");
5055 return -4;
5056 }
5057
5058 /* Reset and init BRB, PRS, NIG */
5059 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5060 msleep(50);
5061 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5062 msleep(50);
5063 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5064 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5065#ifndef BCM_ISCSI
5066 /* set NIC mode */
5067 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5068#endif
5069
5070 /* Enable inputs of parser neighbor blocks */
5071 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5072 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5073 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5074 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5075
5076 DP(NETIF_MSG_HW, "done\n");
5077
5078 return 0; /* OK */
5079}
5080
5081static void enable_blocks_attention(struct bnx2x *bp)
5082{
5083 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5084 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5085 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5086 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5087 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5088 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5089 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5090 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5091 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5092/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5093/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5094 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5095 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5096 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5097/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5098/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5099 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5100 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5101 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5102 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5103/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5104/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5105 if (CHIP_REV_IS_FPGA(bp))
5106 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5107 else
5108 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5109 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5110 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5111 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5112/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5113/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5114 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5115 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5116/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5117 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5118}
5119
34f80b04
EG
5120
5121static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5122{
a2fbb9ea 5123 u32 val, i;
a2fbb9ea 5124
34f80b04 5125 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5126
34f80b04
EG
5127 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5128 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5129
34f80b04
EG
5130 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5131 if (CHIP_IS_E1H(bp))
5132 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5133
34f80b04
EG
5134 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5135 msleep(30);
5136 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5137
34f80b04
EG
5138 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5139 if (CHIP_IS_E1(bp)) {
5140 /* enable HW interrupt from PXP on USDM overflow
5141 bit 16 on INT_MASK_0 */
5142 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5143 }
a2fbb9ea 5144
34f80b04
EG
5145 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5146 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5147
5148#ifdef __BIG_ENDIAN
34f80b04
EG
5149 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5150 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5151 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5152 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5153 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5154 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5155
5156/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5157 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5158 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5159 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5160 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5161#endif
5162
34f80b04 5163 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5164#ifdef BCM_ISCSI
34f80b04
EG
5165 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5166 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5167 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5168#endif
5169
34f80b04
EG
5170 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5171 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5172
34f80b04
EG
5173 /* let the HW do it's magic ... */
5174 msleep(100);
5175 /* finish PXP init */
5176 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5177 if (val != 1) {
5178 BNX2X_ERR("PXP2 CFG failed\n");
5179 return -EBUSY;
5180 }
5181 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5182 if (val != 1) {
5183 BNX2X_ERR("PXP2 RD_INIT failed\n");
5184 return -EBUSY;
5185 }
a2fbb9ea 5186
34f80b04
EG
5187 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5188 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5189
34f80b04 5190 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5191
34f80b04
EG
5192 /* clean the DMAE memory */
5193 bp->dmae_ready = 1;
5194 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5195
34f80b04
EG
5196 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5197 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5198 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5199 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5200
34f80b04
EG
5201 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5202 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5203 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5204 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5205
5206 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5207 /* soft reset pulse */
5208 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5209 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5210
5211#ifdef BCM_ISCSI
34f80b04 5212 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5213#endif
a2fbb9ea 5214
34f80b04
EG
5215 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5216 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5217 if (!CHIP_REV_IS_SLOW(bp)) {
5218 /* enable hw interrupt from doorbell Q */
5219 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5220 }
a2fbb9ea 5221
34f80b04
EG
5222 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5223 if (CHIP_REV_IS_SLOW(bp)) {
5224 /* fix for emulation and FPGA for no pause */
5225 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5226 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5227 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5228 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5229 }
a2fbb9ea 5230
34f80b04 5231 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3196a88a
EG
5232 /* set NIC mode */
5233 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5234 if (CHIP_IS_E1H(bp))
5235 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5236
34f80b04
EG
5237 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5238 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5239 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5240 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5241
34f80b04
EG
5242 if (CHIP_IS_E1H(bp)) {
5243 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5244 STORM_INTMEM_SIZE_E1H/2);
5245 bnx2x_init_fill(bp,
5246 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5247 0, STORM_INTMEM_SIZE_E1H/2);
5248 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5249 STORM_INTMEM_SIZE_E1H/2);
5250 bnx2x_init_fill(bp,
5251 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5252 0, STORM_INTMEM_SIZE_E1H/2);
5253 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5254 STORM_INTMEM_SIZE_E1H/2);
5255 bnx2x_init_fill(bp,
5256 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5257 0, STORM_INTMEM_SIZE_E1H/2);
5258 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5259 STORM_INTMEM_SIZE_E1H/2);
5260 bnx2x_init_fill(bp,
5261 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5262 0, STORM_INTMEM_SIZE_E1H/2);
5263 } else { /* E1 */
ad8d3948
EG
5264 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5265 STORM_INTMEM_SIZE_E1);
5266 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5267 STORM_INTMEM_SIZE_E1);
5268 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5269 STORM_INTMEM_SIZE_E1);
5270 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5271 STORM_INTMEM_SIZE_E1);
34f80b04 5272 }
a2fbb9ea 5273
34f80b04
EG
5274 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5275 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5276 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5277 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5278
34f80b04
EG
5279 /* sync semi rtc */
5280 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5281 0x80000000);
5282 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5283 0x80000000);
a2fbb9ea 5284
34f80b04
EG
5285 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5286 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5287 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5288
34f80b04
EG
5289 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5290 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5291 REG_WR(bp, i, 0xc0cac01a);
5292 /* TODO: replace with something meaningful */
5293 }
5294 if (CHIP_IS_E1H(bp))
5295 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5296 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5297
34f80b04
EG
5298 if (sizeof(union cdu_context) != 1024)
5299 /* we currently assume that a context is 1024 bytes */
5300 printk(KERN_ALERT PFX "please adjust the size of"
5301 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5302
34f80b04
EG
5303 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5304 val = (4 << 24) + (0 << 12) + 1024;
5305 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5306 if (CHIP_IS_E1(bp)) {
5307 /* !!! fix pxp client crdit until excel update */
5308 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5309 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5310 }
a2fbb9ea 5311
34f80b04
EG
5312 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5313 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5314
34f80b04
EG
5315 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5316 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5317
34f80b04
EG
5318 /* PXPCS COMMON comes here */
5319 /* Reset PCIE errors for debug */
5320 REG_WR(bp, 0x2814, 0xffffffff);
5321 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5322
34f80b04
EG
5323 /* EMAC0 COMMON comes here */
5324 /* EMAC1 COMMON comes here */
5325 /* DBU COMMON comes here */
5326 /* DBG COMMON comes here */
5327
5328 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5329 if (CHIP_IS_E1H(bp)) {
5330 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5331 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5332 }
5333
5334 if (CHIP_REV_IS_SLOW(bp))
5335 msleep(200);
5336
5337 /* finish CFC init */
5338 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5339 if (val != 1) {
5340 BNX2X_ERR("CFC LL_INIT failed\n");
5341 return -EBUSY;
5342 }
5343 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5344 if (val != 1) {
5345 BNX2X_ERR("CFC AC_INIT failed\n");
5346 return -EBUSY;
5347 }
5348 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5349 if (val != 1) {
5350 BNX2X_ERR("CFC CAM_INIT failed\n");
5351 return -EBUSY;
5352 }
5353 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5354
34f80b04
EG
5355 /* read NIG statistic
5356 to see if this is our first up since powerup */
5357 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5358 val = *bnx2x_sp(bp, wb_data[0]);
5359
5360 /* do internal memory self test */
5361 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5362 BNX2X_ERR("internal mem self test failed\n");
5363 return -EBUSY;
5364 }
5365
5366 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5367 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5368 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5369 /* Fan failure is indicated by SPIO 5 */
5370 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5371 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5372
5373 /* set to active low mode */
5374 val = REG_RD(bp, MISC_REG_SPIO_INT);
5375 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5376 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5377 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5378
34f80b04
EG
5379 /* enable interrupt to signal the IGU */
5380 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5381 val |= (1 << MISC_REGISTERS_SPIO_5);
5382 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5383 break;
f1410647 5384
34f80b04
EG
5385 default:
5386 break;
5387 }
f1410647 5388
34f80b04
EG
5389 /* clear PXP2 attentions */
5390 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5391
34f80b04 5392 enable_blocks_attention(bp);
a2fbb9ea 5393
6bbca910
YR
5394 if (!BP_NOMCP(bp)) {
5395 bnx2x_acquire_phy_lock(bp);
5396 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5397 bnx2x_release_phy_lock(bp);
5398 } else
5399 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5400
34f80b04
EG
5401 return 0;
5402}
a2fbb9ea 5403
34f80b04
EG
5404static int bnx2x_init_port(struct bnx2x *bp)
5405{
5406 int port = BP_PORT(bp);
5407 u32 val;
a2fbb9ea 5408
34f80b04
EG
5409 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5410
5411 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5412
5413 /* Port PXP comes here */
5414 /* Port PXP2 comes here */
a2fbb9ea
ET
5415#ifdef BCM_ISCSI
5416 /* Port0 1
5417 * Port1 385 */
5418 i++;
5419 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5420 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5421 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5422 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5423
5424 /* Port0 2
5425 * Port1 386 */
5426 i++;
5427 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5428 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5429 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5430 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5431
5432 /* Port0 3
5433 * Port1 387 */
5434 i++;
5435 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5436 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5437 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5438 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5439#endif
34f80b04 5440 /* Port CMs come here */
a2fbb9ea
ET
5441
5442 /* Port QM comes here */
a2fbb9ea
ET
5443#ifdef BCM_ISCSI
5444 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5445 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5446
5447 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5448 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5449#endif
5450 /* Port DQ comes here */
5451 /* Port BRB1 comes here */
ad8d3948 5452 /* Port PRS comes here */
a2fbb9ea
ET
5453 /* Port TSDM comes here */
5454 /* Port CSDM comes here */
5455 /* Port USDM comes here */
5456 /* Port XSDM comes here */
34f80b04
EG
5457 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5458 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5459 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5460 port ? USEM_PORT1_END : USEM_PORT0_END);
5461 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5462 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5463 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5464 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5465 /* Port UPB comes here */
34f80b04
EG
5466 /* Port XPB comes here */
5467
5468 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5469 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5470
5471 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5472 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5473
5474 /* update threshold */
34f80b04 5475 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5476 /* update init credit */
34f80b04 5477 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5478
5479 /* probe changes */
34f80b04 5480 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5481 msleep(5);
34f80b04 5482 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5483
5484#ifdef BCM_ISCSI
5485 /* tell the searcher where the T2 table is */
5486 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5487
5488 wb_write[0] = U64_LO(bp->t2_mapping);
5489 wb_write[1] = U64_HI(bp->t2_mapping);
5490 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5491 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5492 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5493 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5494
5495 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5496 /* Port SRCH comes here */
5497#endif
5498 /* Port CDU comes here */
5499 /* Port CFC comes here */
34f80b04
EG
5500
5501 if (CHIP_IS_E1(bp)) {
5502 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5503 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5504 }
5505 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5506 port ? HC_PORT1_END : HC_PORT0_END);
5507
5508 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5509 MISC_AEU_PORT0_START,
34f80b04
EG
5510 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5511 /* init aeu_mask_attn_func_0/1:
5512 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5513 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5514 * bits 4-7 are used for "per vn group attention" */
5515 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5516 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5517
a2fbb9ea
ET
5518 /* Port PXPCS comes here */
5519 /* Port EMAC0 comes here */
5520 /* Port EMAC1 comes here */
5521 /* Port DBU comes here */
5522 /* Port DBG comes here */
34f80b04
EG
5523 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5524 port ? NIG_PORT1_END : NIG_PORT0_END);
5525
5526 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5527
5528 if (CHIP_IS_E1H(bp)) {
5529 u32 wsum;
5530 struct cmng_struct_per_port m_cmng_port;
5531 int vn;
5532
5533 /* 0x2 disable e1hov, 0x1 enable */
5534 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5535 (IS_E1HMF(bp) ? 0x1 : 0x2));
5536
5537 /* Init RATE SHAPING and FAIRNESS contexts.
5538 Initialize as if there is 10G link. */
5539 wsum = bnx2x_calc_vn_wsum(bp);
5540 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5541 if (IS_E1HMF(bp))
5542 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5543 bnx2x_init_vn_minmax(bp, 2*vn + port,
5544 wsum, 10000, &m_cmng_port);
5545 }
5546
a2fbb9ea
ET
5547 /* Port MCP comes here */
5548 /* Port DMAE comes here */
5549
34f80b04 5550 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5551 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5552 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5553 /* add SPIO 5 to group 0 */
5554 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5555 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5556 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5557 break;
5558
5559 default:
5560 break;
5561 }
5562
c18487ee 5563 bnx2x__link_reset(bp);
a2fbb9ea 5564
34f80b04
EG
5565 return 0;
5566}
5567
5568#define ILT_PER_FUNC (768/2)
5569#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5570/* the phys address is shifted right 12 bits and has an added
5571 1=valid bit added to the 53rd bit
5572 then since this is a wide register(TM)
5573 we split it into two 32 bit writes
5574 */
5575#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5576#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5577#define PXP_ONE_ILT(x) (((x) << 10) | x)
5578#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5579
5580#define CNIC_ILT_LINES 0
5581
5582static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5583{
5584 int reg;
5585
5586 if (CHIP_IS_E1H(bp))
5587 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5588 else /* E1 */
5589 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5590
5591 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5592}
5593
5594static int bnx2x_init_func(struct bnx2x *bp)
5595{
5596 int port = BP_PORT(bp);
5597 int func = BP_FUNC(bp);
5598 int i;
5599
5600 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5601
5602 i = FUNC_ILT_BASE(func);
5603
5604 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5605 if (CHIP_IS_E1H(bp)) {
5606 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5607 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5608 } else /* E1 */
5609 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5610 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5611
5612
5613 if (CHIP_IS_E1H(bp)) {
5614 for (i = 0; i < 9; i++)
5615 bnx2x_init_block(bp,
5616 cm_start[func][i], cm_end[func][i]);
5617
5618 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5619 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5620 }
5621
5622 /* HC init per function */
5623 if (CHIP_IS_E1H(bp)) {
5624 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5625
5626 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5628 }
5629 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5630
5631 if (CHIP_IS_E1H(bp))
5632 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5633
c14423fe 5634 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5635 REG_WR(bp, 0x2114, 0xffffffff);
5636 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5637
34f80b04
EG
5638 return 0;
5639}
5640
5641static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5642{
5643 int i, rc = 0;
a2fbb9ea 5644
34f80b04
EG
5645 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5646 BP_FUNC(bp), load_code);
a2fbb9ea 5647
34f80b04
EG
5648 bp->dmae_ready = 0;
5649 mutex_init(&bp->dmae_mutex);
5650 bnx2x_gunzip_init(bp);
a2fbb9ea 5651
34f80b04
EG
5652 switch (load_code) {
5653 case FW_MSG_CODE_DRV_LOAD_COMMON:
5654 rc = bnx2x_init_common(bp);
5655 if (rc)
5656 goto init_hw_err;
5657 /* no break */
5658
5659 case FW_MSG_CODE_DRV_LOAD_PORT:
5660 bp->dmae_ready = 1;
5661 rc = bnx2x_init_port(bp);
5662 if (rc)
5663 goto init_hw_err;
5664 /* no break */
5665
5666 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5667 bp->dmae_ready = 1;
5668 rc = bnx2x_init_func(bp);
5669 if (rc)
5670 goto init_hw_err;
5671 break;
5672
5673 default:
5674 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5675 break;
5676 }
5677
5678 if (!BP_NOMCP(bp)) {
5679 int func = BP_FUNC(bp);
a2fbb9ea
ET
5680
5681 bp->fw_drv_pulse_wr_seq =
34f80b04 5682 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5683 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5684 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5685 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5686 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5687 } else
5688 bp->func_stx = 0;
a2fbb9ea 5689
34f80b04
EG
5690 /* this needs to be done before gunzip end */
5691 bnx2x_zero_def_sb(bp);
5692 for_each_queue(bp, i)
5693 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5694
5695init_hw_err:
5696 bnx2x_gunzip_end(bp);
5697
5698 return rc;
a2fbb9ea
ET
5699}
5700
c14423fe 5701/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5702static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5703{
34f80b04 5704 int func = BP_FUNC(bp);
f1410647
ET
5705 u32 seq = ++bp->fw_seq;
5706 u32 rc = 0;
19680c48
EG
5707 u32 cnt = 1;
5708 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5709
34f80b04 5710 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5711 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5712
19680c48
EG
5713 do {
5714 /* let the FW do it's magic ... */
5715 msleep(delay);
a2fbb9ea 5716
19680c48 5717 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5718
19680c48
EG
5719 /* Give the FW up to 2 second (200*10ms) */
5720 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5721
5722 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5723 cnt*delay, rc, seq);
a2fbb9ea
ET
5724
5725 /* is this a reply to our command? */
5726 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5727 rc &= FW_MSG_CODE_MASK;
f1410647 5728
a2fbb9ea
ET
5729 } else {
5730 /* FW BUG! */
5731 BNX2X_ERR("FW failed to respond!\n");
5732 bnx2x_fw_dump(bp);
5733 rc = 0;
5734 }
f1410647 5735
a2fbb9ea
ET
5736 return rc;
5737}
5738
5739static void bnx2x_free_mem(struct bnx2x *bp)
5740{
5741
5742#define BNX2X_PCI_FREE(x, y, size) \
5743 do { \
5744 if (x) { \
5745 pci_free_consistent(bp->pdev, size, x, y); \
5746 x = NULL; \
5747 y = 0; \
5748 } \
5749 } while (0)
5750
5751#define BNX2X_FREE(x) \
5752 do { \
5753 if (x) { \
5754 vfree(x); \
5755 x = NULL; \
5756 } \
5757 } while (0)
5758
5759 int i;
5760
5761 /* fastpath */
5762 for_each_queue(bp, i) {
5763
5764 /* Status blocks */
5765 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5766 bnx2x_fp(bp, i, status_blk_mapping),
5767 sizeof(struct host_status_block) +
5768 sizeof(struct eth_tx_db_data));
5769
5770 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5771 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5772 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5773 bnx2x_fp(bp, i, tx_desc_mapping),
5774 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5775
5776 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5777 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5778 bnx2x_fp(bp, i, rx_desc_mapping),
5779 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5780
5781 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5782 bnx2x_fp(bp, i, rx_comp_mapping),
5783 sizeof(struct eth_fast_path_rx_cqe) *
5784 NUM_RCQ_BD);
a2fbb9ea 5785
7a9b2557 5786 /* SGE ring */
32626230 5787 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5788 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5789 bnx2x_fp(bp, i, rx_sge_mapping),
5790 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5791 }
a2fbb9ea
ET
5792 /* end of fastpath */
5793
5794 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5795 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5796
5797 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5798 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5799
5800#ifdef BCM_ISCSI
5801 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5802 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5803 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5804 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5805#endif
7a9b2557 5806 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5807
5808#undef BNX2X_PCI_FREE
5809#undef BNX2X_KFREE
5810}
5811
5812static int bnx2x_alloc_mem(struct bnx2x *bp)
5813{
5814
5815#define BNX2X_PCI_ALLOC(x, y, size) \
5816 do { \
5817 x = pci_alloc_consistent(bp->pdev, size, y); \
5818 if (x == NULL) \
5819 goto alloc_mem_err; \
5820 memset(x, 0, size); \
5821 } while (0)
5822
5823#define BNX2X_ALLOC(x, size) \
5824 do { \
5825 x = vmalloc(size); \
5826 if (x == NULL) \
5827 goto alloc_mem_err; \
5828 memset(x, 0, size); \
5829 } while (0)
5830
5831 int i;
5832
5833 /* fastpath */
a2fbb9ea
ET
5834 for_each_queue(bp, i) {
5835 bnx2x_fp(bp, i, bp) = bp;
5836
5837 /* Status blocks */
5838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5839 &bnx2x_fp(bp, i, status_blk_mapping),
5840 sizeof(struct host_status_block) +
5841 sizeof(struct eth_tx_db_data));
5842
5843 bnx2x_fp(bp, i, hw_tx_prods) =
5844 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5845
5846 bnx2x_fp(bp, i, tx_prods_mapping) =
5847 bnx2x_fp(bp, i, status_blk_mapping) +
5848 sizeof(struct host_status_block);
5849
5850 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5851 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5852 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5853 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5854 &bnx2x_fp(bp, i, tx_desc_mapping),
5855 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5856
5857 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5858 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5859 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5860 &bnx2x_fp(bp, i, rx_desc_mapping),
5861 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5862
5863 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5864 &bnx2x_fp(bp, i, rx_comp_mapping),
5865 sizeof(struct eth_fast_path_rx_cqe) *
5866 NUM_RCQ_BD);
5867
7a9b2557
VZ
5868 /* SGE ring */
5869 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5870 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5871 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5872 &bnx2x_fp(bp, i, rx_sge_mapping),
5873 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5874 }
5875 /* end of fastpath */
5876
5877 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5878 sizeof(struct host_def_status_block));
5879
5880 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5881 sizeof(struct bnx2x_slowpath));
5882
5883#ifdef BCM_ISCSI
5884 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5885
5886 /* Initialize T1 */
5887 for (i = 0; i < 64*1024; i += 64) {
5888 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5889 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5890 }
5891
5892 /* allocate searcher T2 table
5893 we allocate 1/4 of alloc num for T2
5894 (which is not entered into the ILT) */
5895 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5896
5897 /* Initialize T2 */
5898 for (i = 0; i < 16*1024; i += 64)
5899 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5900
c14423fe 5901 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5902 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5903
5904 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5905 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5906
5907 /* QM queues (128*MAX_CONN) */
5908 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5909#endif
5910
5911 /* Slow path ring */
5912 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5913
5914 return 0;
5915
5916alloc_mem_err:
5917 bnx2x_free_mem(bp);
5918 return -ENOMEM;
5919
5920#undef BNX2X_PCI_ALLOC
5921#undef BNX2X_ALLOC
5922}
5923
5924static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5925{
5926 int i;
5927
5928 for_each_queue(bp, i) {
5929 struct bnx2x_fastpath *fp = &bp->fp[i];
5930
5931 u16 bd_cons = fp->tx_bd_cons;
5932 u16 sw_prod = fp->tx_pkt_prod;
5933 u16 sw_cons = fp->tx_pkt_cons;
5934
a2fbb9ea
ET
5935 while (sw_cons != sw_prod) {
5936 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5937 sw_cons++;
5938 }
5939 }
5940}
5941
5942static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5943{
5944 int i, j;
5945
5946 for_each_queue(bp, j) {
5947 struct bnx2x_fastpath *fp = &bp->fp[j];
5948
a2fbb9ea
ET
5949 for (i = 0; i < NUM_RX_BD; i++) {
5950 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5951 struct sk_buff *skb = rx_buf->skb;
5952
5953 if (skb == NULL)
5954 continue;
5955
5956 pci_unmap_single(bp->pdev,
5957 pci_unmap_addr(rx_buf, mapping),
437cf2f1 5958 bp->rx_buf_size,
a2fbb9ea
ET
5959 PCI_DMA_FROMDEVICE);
5960
5961 rx_buf->skb = NULL;
5962 dev_kfree_skb(skb);
5963 }
7a9b2557 5964 if (!fp->disable_tpa)
32626230
EG
5965 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5966 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5967 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5968 }
5969}
5970
5971static void bnx2x_free_skbs(struct bnx2x *bp)
5972{
5973 bnx2x_free_tx_skbs(bp);
5974 bnx2x_free_rx_skbs(bp);
5975}
5976
5977static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5978{
34f80b04 5979 int i, offset = 1;
a2fbb9ea
ET
5980
5981 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5982 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5983 bp->msix_table[0].vector);
5984
5985 for_each_queue(bp, i) {
c14423fe 5986 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5987 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5988 bnx2x_fp(bp, i, state));
5989
228241eb
ET
5990 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5991 BNX2X_ERR("IRQ of fp #%d being freed while "
5992 "state != closed\n", i);
a2fbb9ea 5993
34f80b04 5994 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5995 }
a2fbb9ea
ET
5996}
5997
5998static void bnx2x_free_irq(struct bnx2x *bp)
5999{
a2fbb9ea 6000 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6001 bnx2x_free_msix_irqs(bp);
6002 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6003 bp->flags &= ~USING_MSIX_FLAG;
6004
6005 } else
6006 free_irq(bp->pdev->irq, bp->dev);
6007}
6008
6009static int bnx2x_enable_msix(struct bnx2x *bp)
6010{
34f80b04 6011 int i, rc, offset;
a2fbb9ea
ET
6012
6013 bp->msix_table[0].entry = 0;
34f80b04
EG
6014 offset = 1;
6015 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6016
34f80b04
EG
6017 for_each_queue(bp, i) {
6018 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6019
34f80b04
EG
6020 bp->msix_table[i + offset].entry = igu_vec;
6021 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6022 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6023 }
6024
34f80b04
EG
6025 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6026 bp->num_queues + offset);
6027 if (rc) {
6028 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6029 return -1;
6030 }
a2fbb9ea
ET
6031 bp->flags |= USING_MSIX_FLAG;
6032
6033 return 0;
a2fbb9ea
ET
6034}
6035
a2fbb9ea
ET
6036static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6037{
34f80b04 6038 int i, rc, offset = 1;
a2fbb9ea 6039
a2fbb9ea
ET
6040 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6041 bp->dev->name, bp->dev);
a2fbb9ea
ET
6042 if (rc) {
6043 BNX2X_ERR("request sp irq failed\n");
6044 return -EBUSY;
6045 }
6046
6047 for_each_queue(bp, i) {
34f80b04 6048 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6049 bnx2x_msix_fp_int, 0,
6050 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6051 if (rc) {
3196a88a
EG
6052 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6053 i + offset, -rc);
a2fbb9ea
ET
6054 bnx2x_free_msix_irqs(bp);
6055 return -EBUSY;
6056 }
6057
6058 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6059 }
6060
6061 return 0;
a2fbb9ea
ET
6062}
6063
6064static int bnx2x_req_irq(struct bnx2x *bp)
6065{
34f80b04 6066 int rc;
a2fbb9ea 6067
34f80b04
EG
6068 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6069 bp->dev->name, bp->dev);
a2fbb9ea
ET
6070 if (!rc)
6071 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6072
6073 return rc;
a2fbb9ea
ET
6074}
6075
65abd74d
YG
6076static void bnx2x_napi_enable(struct bnx2x *bp)
6077{
6078 int i;
6079
6080 for_each_queue(bp, i)
6081 napi_enable(&bnx2x_fp(bp, i, napi));
6082}
6083
6084static void bnx2x_napi_disable(struct bnx2x *bp)
6085{
6086 int i;
6087
6088 for_each_queue(bp, i)
6089 napi_disable(&bnx2x_fp(bp, i, napi));
6090}
6091
6092static void bnx2x_netif_start(struct bnx2x *bp)
6093{
6094 if (atomic_dec_and_test(&bp->intr_sem)) {
6095 if (netif_running(bp->dev)) {
6096 if (bp->state == BNX2X_STATE_OPEN)
6097 netif_wake_queue(bp->dev);
6098 bnx2x_napi_enable(bp);
6099 bnx2x_int_enable(bp);
6100 }
6101 }
6102}
6103
f8ef6e44 6104static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6105{
f8ef6e44 6106 bnx2x_int_disable_sync(bp, disable_hw);
65abd74d
YG
6107 if (netif_running(bp->dev)) {
6108 bnx2x_napi_disable(bp);
6109 netif_tx_disable(bp->dev);
6110 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6111 }
6112}
6113
a2fbb9ea
ET
6114/*
6115 * Init service functions
6116 */
6117
3101c2bc 6118static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6119{
6120 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6121 int port = BP_PORT(bp);
a2fbb9ea
ET
6122
6123 /* CAM allocation
6124 * unicasts 0-31:port0 32-63:port1
6125 * multicast 64-127:port0 128-191:port1
6126 */
6127 config->hdr.length_6b = 2;
34f80b04
EG
6128 config->hdr.offset = port ? 31 : 0;
6129 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6130 config->hdr.reserved1 = 0;
6131
6132 /* primary MAC */
6133 config->config_table[0].cam_entry.msb_mac_addr =
6134 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6135 config->config_table[0].cam_entry.middle_mac_addr =
6136 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6137 config->config_table[0].cam_entry.lsb_mac_addr =
6138 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6139 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6140 if (set)
6141 config->config_table[0].target_table_entry.flags = 0;
6142 else
6143 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6144 config->config_table[0].target_table_entry.client_id = 0;
6145 config->config_table[0].target_table_entry.vlan_id = 0;
6146
3101c2bc
YG
6147 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6148 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6149 config->config_table[0].cam_entry.msb_mac_addr,
6150 config->config_table[0].cam_entry.middle_mac_addr,
6151 config->config_table[0].cam_entry.lsb_mac_addr);
6152
6153 /* broadcast */
6154 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6155 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6156 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6157 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6158 if (set)
6159 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6160 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6161 else
6162 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6163 config->config_table[1].target_table_entry.client_id = 0;
6164 config->config_table[1].target_table_entry.vlan_id = 0;
6165
6166 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6167 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6168 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6169}
6170
3101c2bc 6171static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6172{
6173 struct mac_configuration_cmd_e1h *config =
6174 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6175
3101c2bc 6176 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6177 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6178 return;
6179 }
6180
6181 /* CAM allocation for E1H
6182 * unicasts: by func number
6183 * multicast: 20+FUNC*20, 20 each
6184 */
6185 config->hdr.length_6b = 1;
6186 config->hdr.offset = BP_FUNC(bp);
6187 config->hdr.client_id = BP_CL_ID(bp);
6188 config->hdr.reserved1 = 0;
6189
6190 /* primary MAC */
6191 config->config_table[0].msb_mac_addr =
6192 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6193 config->config_table[0].middle_mac_addr =
6194 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6195 config->config_table[0].lsb_mac_addr =
6196 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6197 config->config_table[0].client_id = BP_L_ID(bp);
6198 config->config_table[0].vlan_id = 0;
6199 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6200 if (set)
6201 config->config_table[0].flags = BP_PORT(bp);
6202 else
6203 config->config_table[0].flags =
6204 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6205
3101c2bc
YG
6206 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6207 (set ? "setting" : "clearing"),
34f80b04
EG
6208 config->config_table[0].msb_mac_addr,
6209 config->config_table[0].middle_mac_addr,
6210 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6211
6212 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6213 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6214 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6215}
6216
a2fbb9ea
ET
6217static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6218 int *state_p, int poll)
6219{
6220 /* can take a while if any port is running */
34f80b04 6221 int cnt = 500;
a2fbb9ea 6222
c14423fe
ET
6223 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6224 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6225
6226 might_sleep();
34f80b04 6227 while (cnt--) {
a2fbb9ea
ET
6228 if (poll) {
6229 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6230 /* if index is different from 0
6231 * the reply for some commands will
3101c2bc 6232 * be on the non default queue
a2fbb9ea
ET
6233 */
6234 if (idx)
6235 bnx2x_rx_int(&bp->fp[idx], 10);
6236 }
a2fbb9ea 6237
3101c2bc 6238 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6239 if (*state_p == state)
a2fbb9ea
ET
6240 return 0;
6241
a2fbb9ea 6242 msleep(1);
a2fbb9ea
ET
6243 }
6244
a2fbb9ea 6245 /* timeout! */
49d66772
ET
6246 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6247 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6248#ifdef BNX2X_STOP_ON_ERROR
6249 bnx2x_panic();
6250#endif
a2fbb9ea 6251
49d66772 6252 return -EBUSY;
a2fbb9ea
ET
6253}
6254
6255static int bnx2x_setup_leading(struct bnx2x *bp)
6256{
34f80b04 6257 int rc;
a2fbb9ea 6258
c14423fe 6259 /* reset IGU state */
34f80b04 6260 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6261
6262 /* SETUP ramrod */
6263 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6264
34f80b04
EG
6265 /* Wait for completion */
6266 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6267
34f80b04 6268 return rc;
a2fbb9ea
ET
6269}
6270
6271static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6272{
a2fbb9ea 6273 /* reset IGU state */
34f80b04 6274 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6275
228241eb 6276 /* SETUP ramrod */
a2fbb9ea
ET
6277 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6278 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6279
6280 /* Wait for completion */
6281 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6282 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6283}
6284
a2fbb9ea
ET
6285static int bnx2x_poll(struct napi_struct *napi, int budget);
6286static void bnx2x_set_rx_mode(struct net_device *dev);
6287
34f80b04
EG
6288/* must be called with rtnl_lock */
6289static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6290{
228241eb 6291 u32 load_code;
34f80b04 6292 int i, rc;
34f80b04
EG
6293#ifdef BNX2X_STOP_ON_ERROR
6294 if (unlikely(bp->panic))
6295 return -EPERM;
6296#endif
a2fbb9ea
ET
6297
6298 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6299
34f80b04
EG
6300 /* Send LOAD_REQUEST command to MCP
6301 Returns the type of LOAD command:
6302 if it is the first port to be initialized
6303 common blocks should be initialized, otherwise - not
a2fbb9ea 6304 */
34f80b04 6305 if (!BP_NOMCP(bp)) {
228241eb
ET
6306 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6307 if (!load_code) {
da5a662a 6308 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6309 return -EBUSY;
6310 }
34f80b04 6311 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6312 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6313
a2fbb9ea 6314 } else {
da5a662a
VZ
6315 int port = BP_PORT(bp);
6316
34f80b04
EG
6317 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6318 load_count[0], load_count[1], load_count[2]);
6319 load_count[0]++;
da5a662a 6320 load_count[1 + port]++;
34f80b04
EG
6321 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6322 load_count[0], load_count[1], load_count[2]);
6323 if (load_count[0] == 1)
6324 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6325 else if (load_count[1 + port] == 1)
34f80b04
EG
6326 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6327 else
6328 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6329 }
6330
34f80b04
EG
6331 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6332 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6333 bp->port.pmf = 1;
6334 else
6335 bp->port.pmf = 0;
6336 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6337
6338 /* if we can't use MSI-X we only need one fp,
6339 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6340 * and fallback to inta with one fp
6341 */
34f80b04
EG
6342 if (use_inta) {
6343 bp->num_queues = 1;
6344
6345 } else {
6346 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6347 /* user requested number */
6348 bp->num_queues = use_multi;
6349
6350 else if (use_multi)
6351 bp->num_queues = min_t(u32, num_online_cpus(),
6352 BP_MAX_QUEUES(bp));
6353 else
a2fbb9ea 6354 bp->num_queues = 1;
34f80b04
EG
6355
6356 if (bnx2x_enable_msix(bp)) {
6357 /* failed to enable MSI-X */
6358 bp->num_queues = 1;
6359 if (use_multi)
6360 BNX2X_ERR("Multi requested but failed"
6361 " to enable MSI-X\n");
a2fbb9ea
ET
6362 }
6363 }
34f80b04
EG
6364 DP(NETIF_MSG_IFUP,
6365 "set number of queues to %d\n", bp->num_queues);
c14423fe 6366
a2fbb9ea
ET
6367 if (bnx2x_alloc_mem(bp))
6368 return -ENOMEM;
6369
7a9b2557
VZ
6370 for_each_queue(bp, i)
6371 bnx2x_fp(bp, i, disable_tpa) =
6372 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6373
34f80b04
EG
6374 if (bp->flags & USING_MSIX_FLAG) {
6375 rc = bnx2x_req_msix_irqs(bp);
6376 if (rc) {
6377 pci_disable_msix(bp->pdev);
6378 goto load_error;
6379 }
6380 } else {
6381 bnx2x_ack_int(bp);
6382 rc = bnx2x_req_irq(bp);
6383 if (rc) {
6384 BNX2X_ERR("IRQ request failed, aborting\n");
6385 goto load_error;
a2fbb9ea
ET
6386 }
6387 }
6388
6389 for_each_queue(bp, i)
6390 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6391 bnx2x_poll, 128);
6392
a2fbb9ea 6393 /* Initialize HW */
34f80b04
EG
6394 rc = bnx2x_init_hw(bp, load_code);
6395 if (rc) {
a2fbb9ea 6396 BNX2X_ERR("HW init failed, aborting\n");
d1014634 6397 goto load_int_disable;
a2fbb9ea
ET
6398 }
6399
a2fbb9ea 6400 /* Setup NIC internals and enable interrupts */
471de716 6401 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6402
6403 /* Send LOAD_DONE command to MCP */
34f80b04 6404 if (!BP_NOMCP(bp)) {
228241eb
ET
6405 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6406 if (!load_code) {
da5a662a 6407 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6408 rc = -EBUSY;
d1014634 6409 goto load_rings_free;
a2fbb9ea
ET
6410 }
6411 }
6412
bb2a0f7a
YG
6413 bnx2x_stats_init(bp);
6414
a2fbb9ea
ET
6415 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6416
6417 /* Enable Rx interrupt handling before sending the ramrod
6418 as it's completed on Rx FP queue */
65abd74d 6419 bnx2x_napi_enable(bp);
a2fbb9ea 6420
da5a662a
VZ
6421 /* Enable interrupt handling */
6422 atomic_set(&bp->intr_sem, 0);
6423
34f80b04
EG
6424 rc = bnx2x_setup_leading(bp);
6425 if (rc) {
da5a662a 6426 BNX2X_ERR("Setup leading failed!\n");
d1014634 6427 goto load_netif_stop;
34f80b04 6428 }
a2fbb9ea 6429
34f80b04
EG
6430 if (CHIP_IS_E1H(bp))
6431 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6432 BNX2X_ERR("!!! mf_cfg function disabled\n");
6433 bp->state = BNX2X_STATE_DISABLED;
6434 }
a2fbb9ea 6435
34f80b04
EG
6436 if (bp->state == BNX2X_STATE_OPEN)
6437 for_each_nondefault_queue(bp, i) {
6438 rc = bnx2x_setup_multi(bp, i);
6439 if (rc)
d1014634 6440 goto load_netif_stop;
34f80b04 6441 }
a2fbb9ea 6442
34f80b04 6443 if (CHIP_IS_E1(bp))
3101c2bc 6444 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6445 else
3101c2bc 6446 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6447
6448 if (bp->port.pmf)
6449 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6450
6451 /* Start fast path */
34f80b04
EG
6452 switch (load_mode) {
6453 case LOAD_NORMAL:
6454 /* Tx queue should be only reenabled */
6455 netif_wake_queue(bp->dev);
6456 bnx2x_set_rx_mode(bp->dev);
6457 break;
6458
6459 case LOAD_OPEN:
a2fbb9ea 6460 netif_start_queue(bp->dev);
34f80b04 6461 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6462 if (bp->flags & USING_MSIX_FLAG)
6463 printk(KERN_INFO PFX "%s: using MSI-X\n",
6464 bp->dev->name);
34f80b04 6465 break;
a2fbb9ea 6466
34f80b04 6467 case LOAD_DIAG:
a2fbb9ea 6468 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6469 bp->state = BNX2X_STATE_DIAG;
6470 break;
6471
6472 default:
6473 break;
a2fbb9ea
ET
6474 }
6475
34f80b04
EG
6476 if (!bp->port.pmf)
6477 bnx2x__link_status_update(bp);
6478
a2fbb9ea
ET
6479 /* start the timer */
6480 mod_timer(&bp->timer, jiffies + bp->current_interval);
6481
34f80b04 6482
a2fbb9ea
ET
6483 return 0;
6484
d1014634 6485load_netif_stop:
65abd74d 6486 bnx2x_napi_disable(bp);
d1014634 6487load_rings_free:
7a9b2557
VZ
6488 /* Free SKBs, SGEs, TPA pool and driver internals */
6489 bnx2x_free_skbs(bp);
6490 for_each_queue(bp, i)
3196a88a 6491 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d1014634 6492load_int_disable:
f8ef6e44 6493 bnx2x_int_disable_sync(bp, 1);
d1014634
YG
6494 /* Release IRQs */
6495 bnx2x_free_irq(bp);
228241eb 6496load_error:
a2fbb9ea 6497 bnx2x_free_mem(bp);
9a035440 6498 bp->port.pmf = 0;
a2fbb9ea
ET
6499
6500 /* TBD we really need to reset the chip
6501 if we want to recover from this */
34f80b04 6502 return rc;
a2fbb9ea
ET
6503}
6504
6505static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6506{
a2fbb9ea
ET
6507 int rc;
6508
c14423fe 6509 /* halt the connection */
a2fbb9ea 6510 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6511 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6512
34f80b04 6513 /* Wait for completion */
a2fbb9ea 6514 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6515 &(bp->fp[index].state), 1);
c14423fe 6516 if (rc) /* timeout */
a2fbb9ea
ET
6517 return rc;
6518
6519 /* delete cfc entry */
6520 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6521
34f80b04
EG
6522 /* Wait for completion */
6523 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6524 &(bp->fp[index].state), 1);
6525 return rc;
a2fbb9ea
ET
6526}
6527
da5a662a 6528static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6529{
49d66772 6530 u16 dsb_sp_prod_idx;
c14423fe 6531 /* if the other port is handling traffic,
a2fbb9ea 6532 this can take a lot of time */
34f80b04
EG
6533 int cnt = 500;
6534 int rc;
a2fbb9ea
ET
6535
6536 might_sleep();
6537
6538 /* Send HALT ramrod */
6539 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6540 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6541
34f80b04
EG
6542 /* Wait for completion */
6543 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6544 &(bp->fp[0].state), 1);
6545 if (rc) /* timeout */
da5a662a 6546 return rc;
a2fbb9ea 6547
49d66772 6548 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6549
228241eb 6550 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6551 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6552
49d66772 6553 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6554 we are going to reset the chip anyway
6555 so there is not much to do if this times out
6556 */
34f80b04 6557 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6558 if (!cnt) {
6559 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6560 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6561 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6562#ifdef BNX2X_STOP_ON_ERROR
6563 bnx2x_panic();
da5a662a
VZ
6564#else
6565 rc = -EBUSY;
34f80b04
EG
6566#endif
6567 break;
6568 }
6569 cnt--;
da5a662a 6570 msleep(1);
49d66772
ET
6571 }
6572 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6573 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6574
6575 return rc;
a2fbb9ea
ET
6576}
6577
34f80b04
EG
6578static void bnx2x_reset_func(struct bnx2x *bp)
6579{
6580 int port = BP_PORT(bp);
6581 int func = BP_FUNC(bp);
6582 int base, i;
6583
6584 /* Configure IGU */
6585 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6586 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6587
6588 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6589
6590 /* Clear ILT */
6591 base = FUNC_ILT_BASE(func);
6592 for (i = base; i < base + ILT_PER_FUNC; i++)
6593 bnx2x_ilt_wr(bp, i, 0);
6594}
6595
6596static void bnx2x_reset_port(struct bnx2x *bp)
6597{
6598 int port = BP_PORT(bp);
6599 u32 val;
6600
6601 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6602
6603 /* Do not rcv packets to BRB */
6604 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6605 /* Do not direct rcv packets that are not for MCP to the BRB */
6606 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6607 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6608
6609 /* Configure AEU */
6610 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6611
6612 msleep(100);
6613 /* Check for BRB port occupancy */
6614 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6615 if (val)
6616 DP(NETIF_MSG_IFDOWN,
33471629 6617 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6618
6619 /* TODO: Close Doorbell port? */
6620}
6621
6622static void bnx2x_reset_common(struct bnx2x *bp)
6623{
6624 /* reset_common */
6625 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6626 0xd3ffff7f);
6627 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6628}
6629
6630static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6631{
6632 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6633 BP_FUNC(bp), reset_code);
6634
6635 switch (reset_code) {
6636 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6637 bnx2x_reset_port(bp);
6638 bnx2x_reset_func(bp);
6639 bnx2x_reset_common(bp);
6640 break;
6641
6642 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6643 bnx2x_reset_port(bp);
6644 bnx2x_reset_func(bp);
6645 break;
6646
6647 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6648 bnx2x_reset_func(bp);
6649 break;
49d66772 6650
34f80b04
EG
6651 default:
6652 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6653 break;
6654 }
6655}
6656
33471629 6657/* must be called with rtnl_lock */
34f80b04 6658static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6659{
da5a662a 6660 int port = BP_PORT(bp);
a2fbb9ea 6661 u32 reset_code = 0;
da5a662a 6662 int i, cnt, rc;
a2fbb9ea
ET
6663
6664 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6665
228241eb
ET
6666 bp->rx_mode = BNX2X_RX_MODE_NONE;
6667 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6668
f8ef6e44 6669 bnx2x_netif_stop(bp, 1);
65abd74d
YG
6670 if (!netif_running(bp->dev))
6671 bnx2x_napi_disable(bp);
34f80b04
EG
6672 del_timer_sync(&bp->timer);
6673 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6674 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6675 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6676
da5a662a 6677 /* Wait until tx fast path tasks complete */
228241eb
ET
6678 for_each_queue(bp, i) {
6679 struct bnx2x_fastpath *fp = &bp->fp[i];
6680
34f80b04
EG
6681 cnt = 1000;
6682 smp_rmb();
da5a662a
VZ
6683 while (BNX2X_HAS_TX_WORK(fp)) {
6684
65abd74d 6685 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6686 if (!cnt) {
6687 BNX2X_ERR("timeout waiting for queue[%d]\n",
6688 i);
6689#ifdef BNX2X_STOP_ON_ERROR
6690 bnx2x_panic();
6691 return -EBUSY;
6692#else
6693 break;
6694#endif
6695 }
6696 cnt--;
da5a662a 6697 msleep(1);
34f80b04
EG
6698 smp_rmb();
6699 }
228241eb 6700 }
da5a662a
VZ
6701 /* Give HW time to discard old tx messages */
6702 msleep(1);
a2fbb9ea 6703
34f80b04
EG
6704 /* Release IRQs */
6705 bnx2x_free_irq(bp);
6706
3101c2bc
YG
6707 if (CHIP_IS_E1(bp)) {
6708 struct mac_configuration_cmd *config =
6709 bnx2x_sp(bp, mcast_config);
6710
6711 bnx2x_set_mac_addr_e1(bp, 0);
6712
6713 for (i = 0; i < config->hdr.length_6b; i++)
6714 CAM_INVALIDATE(config->config_table[i]);
6715
6716 config->hdr.length_6b = i;
6717 if (CHIP_REV_IS_SLOW(bp))
6718 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6719 else
6720 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6721 config->hdr.client_id = BP_CL_ID(bp);
6722 config->hdr.reserved1 = 0;
6723
6724 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6725 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6726 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6727
6728 } else { /* E1H */
65abd74d
YG
6729 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6730
3101c2bc
YG
6731 bnx2x_set_mac_addr_e1h(bp, 0);
6732
6733 for (i = 0; i < MC_HASH_SIZE; i++)
6734 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6735 }
6736
65abd74d
YG
6737 if (unload_mode == UNLOAD_NORMAL)
6738 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6739
6740 else if (bp->flags & NO_WOL_FLAG) {
6741 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6742 if (CHIP_IS_E1H(bp))
6743 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6744
6745 } else if (bp->wol) {
6746 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6747 u8 *mac_addr = bp->dev->dev_addr;
6748 u32 val;
6749 /* The mac address is written to entries 1-4 to
6750 preserve entry 0 which is used by the PMF */
6751 u8 entry = (BP_E1HVN(bp) + 1)*8;
6752
6753 val = (mac_addr[0] << 8) | mac_addr[1];
6754 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6755
6756 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6757 (mac_addr[4] << 8) | mac_addr[5];
6758 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6759
6760 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6761
6762 } else
6763 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6764
34f80b04
EG
6765 /* Close multi and leading connections
6766 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6767 for_each_nondefault_queue(bp, i)
6768 if (bnx2x_stop_multi(bp, i))
228241eb 6769 goto unload_error;
a2fbb9ea 6770
da5a662a
VZ
6771 rc = bnx2x_stop_leading(bp);
6772 if (rc) {
34f80b04 6773 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6774#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6775 return -EBUSY;
da5a662a
VZ
6776#else
6777 goto unload_error;
34f80b04 6778#endif
228241eb
ET
6779 }
6780
6781unload_error:
34f80b04 6782 if (!BP_NOMCP(bp))
228241eb 6783 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6784 else {
6785 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6786 load_count[0], load_count[1], load_count[2]);
6787 load_count[0]--;
da5a662a 6788 load_count[1 + port]--;
34f80b04
EG
6789 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6790 load_count[0], load_count[1], load_count[2]);
6791 if (load_count[0] == 0)
6792 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6793 else if (load_count[1 + port] == 0)
34f80b04
EG
6794 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6795 else
6796 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6797 }
a2fbb9ea 6798
34f80b04
EG
6799 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6800 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6801 bnx2x__link_reset(bp);
a2fbb9ea
ET
6802
6803 /* Reset the chip */
228241eb 6804 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6805
6806 /* Report UNLOAD_DONE to MCP */
34f80b04 6807 if (!BP_NOMCP(bp))
a2fbb9ea 6808 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6809 bp->port.pmf = 0;
a2fbb9ea 6810
7a9b2557 6811 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6812 bnx2x_free_skbs(bp);
7a9b2557 6813 for_each_queue(bp, i)
3196a88a 6814 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
a2fbb9ea
ET
6815 bnx2x_free_mem(bp);
6816
6817 bp->state = BNX2X_STATE_CLOSED;
228241eb 6818
a2fbb9ea
ET
6819 netif_carrier_off(bp->dev);
6820
6821 return 0;
6822}
6823
34f80b04
EG
6824static void bnx2x_reset_task(struct work_struct *work)
6825{
6826 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6827
6828#ifdef BNX2X_STOP_ON_ERROR
6829 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6830 " so reset not done to allow debug dump,\n"
6831 KERN_ERR " you will need to reboot when done\n");
6832 return;
6833#endif
6834
6835 rtnl_lock();
6836
6837 if (!netif_running(bp->dev))
6838 goto reset_task_exit;
6839
6840 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6841 bnx2x_nic_load(bp, LOAD_NORMAL);
6842
6843reset_task_exit:
6844 rtnl_unlock();
6845}
6846
a2fbb9ea
ET
6847/* end of nic load/unload */
6848
6849/* ethtool_ops */
6850
6851/*
6852 * Init service functions
6853 */
6854
34f80b04
EG
6855static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6856{
6857 u32 val;
6858
6859 /* Check if there is any driver already loaded */
6860 val = REG_RD(bp, MISC_REG_UNPREPARED);
6861 if (val == 0x1) {
6862 /* Check if it is the UNDI driver
6863 * UNDI driver initializes CID offset for normal bell to 0x7
6864 */
4a37fb66 6865 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04 6866 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
76b190c5
EG
6867 if (val == 0x7)
6868 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6869 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6870
34f80b04
EG
6871 if (val == 0x7) {
6872 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6873 /* save our func */
34f80b04 6874 int func = BP_FUNC(bp);
da5a662a
VZ
6875 u32 swap_en;
6876 u32 swap_val;
34f80b04
EG
6877
6878 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6879
6880 /* try unload UNDI on port 0 */
6881 bp->func = 0;
da5a662a
VZ
6882 bp->fw_seq =
6883 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6884 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6885 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6886
6887 /* if UNDI is loaded on the other port */
6888 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6889
da5a662a
VZ
6890 /* send "DONE" for previous unload */
6891 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6892
6893 /* unload UNDI on port 1 */
34f80b04 6894 bp->func = 1;
da5a662a
VZ
6895 bp->fw_seq =
6896 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6897 DRV_MSG_SEQ_NUMBER_MASK);
6898 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6899
6900 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6901 }
6902
da5a662a
VZ
6903 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6904 HC_REG_CONFIG_0), 0x1000);
6905
6906 /* close input traffic and wait for it */
6907 /* Do not rcv packets to BRB */
6908 REG_WR(bp,
6909 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6910 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6911 /* Do not direct rcv packets that are not for MCP to
6912 * the BRB */
6913 REG_WR(bp,
6914 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6915 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6916 /* clear AEU */
6917 REG_WR(bp,
6918 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6919 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6920 msleep(10);
6921
6922 /* save NIG port swap info */
6923 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6924 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6925 /* reset device */
6926 REG_WR(bp,
6927 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6928 0xd3ffffff);
34f80b04
EG
6929 REG_WR(bp,
6930 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6931 0x1403);
da5a662a
VZ
6932 /* take the NIG out of reset and restore swap values */
6933 REG_WR(bp,
6934 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6935 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6936 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6937 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6938
6939 /* send unload done to the MCP */
6940 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6941
6942 /* restore our func and fw_seq */
6943 bp->func = func;
6944 bp->fw_seq =
6945 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6946 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
6947 }
6948 }
6949}
6950
6951static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6952{
6953 u32 val, val2, val3, val4, id;
72ce58c3 6954 u16 pmc;
34f80b04
EG
6955
6956 /* Get the chip revision id and number. */
6957 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6958 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6959 id = ((val & 0xffff) << 16);
6960 val = REG_RD(bp, MISC_REG_CHIP_REV);
6961 id |= ((val & 0xf) << 12);
6962 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6963 id |= ((val & 0xff) << 4);
6964 REG_RD(bp, MISC_REG_BOND_ID);
6965 id |= (val & 0xf);
6966 bp->common.chip_id = id;
6967 bp->link_params.chip_id = bp->common.chip_id;
6968 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6969
6970 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6971 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6972 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6973 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6974 bp->common.flash_size, bp->common.flash_size);
6975
6976 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6977 bp->link_params.shmem_base = bp->common.shmem_base;
6978 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6979
6980 if (!bp->common.shmem_base ||
6981 (bp->common.shmem_base < 0xA0000) ||
6982 (bp->common.shmem_base >= 0xC0000)) {
6983 BNX2X_DEV_INFO("MCP not active\n");
6984 bp->flags |= NO_MCP_FLAG;
6985 return;
6986 }
6987
6988 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6989 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6990 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6991 BNX2X_ERR("BAD MCP validity signature\n");
6992
6993 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6994 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6995
6996 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6997 bp->common.hw_config, bp->common.board);
6998
6999 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7000 SHARED_HW_CFG_LED_MODE_MASK) >>
7001 SHARED_HW_CFG_LED_MODE_SHIFT);
7002
7003 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7004 bp->common.bc_ver = val;
7005 BNX2X_DEV_INFO("bc_ver %X\n", val);
7006 if (val < BNX2X_BC_VER) {
7007 /* for now only warn
7008 * later we might need to enforce this */
7009 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7010 " please upgrade BC\n", BNX2X_BC_VER, val);
7011 }
72ce58c3
EG
7012
7013 if (BP_E1HVN(bp) == 0) {
7014 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7015 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7016 } else {
7017 /* no WOL capability for E1HVN != 0 */
7018 bp->flags |= NO_WOL_FLAG;
7019 }
7020 BNX2X_DEV_INFO("%sWoL capable\n",
7021 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7022
7023 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7024 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7025 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7026 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7027
7028 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7029 val, val2, val3, val4);
7030}
7031
7032static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7033 u32 switch_cfg)
a2fbb9ea 7034{
34f80b04 7035 int port = BP_PORT(bp);
a2fbb9ea
ET
7036 u32 ext_phy_type;
7037
a2fbb9ea
ET
7038 switch (switch_cfg) {
7039 case SWITCH_CFG_1G:
7040 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7041
c18487ee
YR
7042 ext_phy_type =
7043 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7044 switch (ext_phy_type) {
7045 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7046 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7047 ext_phy_type);
7048
34f80b04
EG
7049 bp->port.supported |= (SUPPORTED_10baseT_Half |
7050 SUPPORTED_10baseT_Full |
7051 SUPPORTED_100baseT_Half |
7052 SUPPORTED_100baseT_Full |
7053 SUPPORTED_1000baseT_Full |
7054 SUPPORTED_2500baseX_Full |
7055 SUPPORTED_TP |
7056 SUPPORTED_FIBRE |
7057 SUPPORTED_Autoneg |
7058 SUPPORTED_Pause |
7059 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7060 break;
7061
7062 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7063 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7064 ext_phy_type);
7065
34f80b04
EG
7066 bp->port.supported |= (SUPPORTED_10baseT_Half |
7067 SUPPORTED_10baseT_Full |
7068 SUPPORTED_100baseT_Half |
7069 SUPPORTED_100baseT_Full |
7070 SUPPORTED_1000baseT_Full |
7071 SUPPORTED_TP |
7072 SUPPORTED_FIBRE |
7073 SUPPORTED_Autoneg |
7074 SUPPORTED_Pause |
7075 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7076 break;
7077
7078 default:
7079 BNX2X_ERR("NVRAM config error. "
7080 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7081 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7082 return;
7083 }
7084
34f80b04
EG
7085 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7086 port*0x10);
7087 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7088 break;
7089
7090 case SWITCH_CFG_10G:
7091 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7092
c18487ee
YR
7093 ext_phy_type =
7094 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7095 switch (ext_phy_type) {
7096 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7097 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7098 ext_phy_type);
7099
34f80b04
EG
7100 bp->port.supported |= (SUPPORTED_10baseT_Half |
7101 SUPPORTED_10baseT_Full |
7102 SUPPORTED_100baseT_Half |
7103 SUPPORTED_100baseT_Full |
7104 SUPPORTED_1000baseT_Full |
7105 SUPPORTED_2500baseX_Full |
7106 SUPPORTED_10000baseT_Full |
7107 SUPPORTED_TP |
7108 SUPPORTED_FIBRE |
7109 SUPPORTED_Autoneg |
7110 SUPPORTED_Pause |
7111 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7112 break;
7113
7114 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7115 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7116 ext_phy_type);
f1410647 7117
34f80b04
EG
7118 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7119 SUPPORTED_FIBRE |
7120 SUPPORTED_Pause |
7121 SUPPORTED_Asym_Pause);
f1410647
ET
7122 break;
7123
a2fbb9ea 7124 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7125 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7126 ext_phy_type);
7127
34f80b04
EG
7128 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7129 SUPPORTED_1000baseT_Full |
7130 SUPPORTED_FIBRE |
7131 SUPPORTED_Pause |
7132 SUPPORTED_Asym_Pause);
f1410647
ET
7133 break;
7134
7135 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7136 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7137 ext_phy_type);
7138
34f80b04
EG
7139 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7140 SUPPORTED_1000baseT_Full |
7141 SUPPORTED_FIBRE |
7142 SUPPORTED_Autoneg |
7143 SUPPORTED_Pause |
7144 SUPPORTED_Asym_Pause);
f1410647
ET
7145 break;
7146
c18487ee
YR
7147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7148 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7149 ext_phy_type);
7150
34f80b04
EG
7151 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7152 SUPPORTED_2500baseX_Full |
7153 SUPPORTED_1000baseT_Full |
7154 SUPPORTED_FIBRE |
7155 SUPPORTED_Autoneg |
7156 SUPPORTED_Pause |
7157 SUPPORTED_Asym_Pause);
c18487ee
YR
7158 break;
7159
f1410647
ET
7160 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7161 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7162 ext_phy_type);
7163
34f80b04
EG
7164 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7165 SUPPORTED_TP |
7166 SUPPORTED_Autoneg |
7167 SUPPORTED_Pause |
7168 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7169 break;
7170
c18487ee
YR
7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7172 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7173 bp->link_params.ext_phy_config);
7174 break;
7175
a2fbb9ea
ET
7176 default:
7177 BNX2X_ERR("NVRAM config error. "
7178 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7179 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7180 return;
7181 }
7182
34f80b04
EG
7183 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7184 port*0x18);
7185 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7186
a2fbb9ea
ET
7187 break;
7188
7189 default:
7190 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7191 bp->port.link_config);
a2fbb9ea
ET
7192 return;
7193 }
34f80b04 7194 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7195
7196 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7197 if (!(bp->link_params.speed_cap_mask &
7198 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7199 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7200
c18487ee
YR
7201 if (!(bp->link_params.speed_cap_mask &
7202 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7203 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7204
c18487ee
YR
7205 if (!(bp->link_params.speed_cap_mask &
7206 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7207 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7208
c18487ee
YR
7209 if (!(bp->link_params.speed_cap_mask &
7210 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7211 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7212
c18487ee
YR
7213 if (!(bp->link_params.speed_cap_mask &
7214 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7215 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7216 SUPPORTED_1000baseT_Full);
a2fbb9ea 7217
c18487ee
YR
7218 if (!(bp->link_params.speed_cap_mask &
7219 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7220 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7221
c18487ee
YR
7222 if (!(bp->link_params.speed_cap_mask &
7223 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7224 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7225
34f80b04 7226 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7227}
7228
34f80b04 7229static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7230{
c18487ee 7231 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7232
34f80b04 7233 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7234 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7235 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7236 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7237 bp->port.advertising = bp->port.supported;
a2fbb9ea 7238 } else {
c18487ee
YR
7239 u32 ext_phy_type =
7240 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7241
7242 if ((ext_phy_type ==
7243 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7244 (ext_phy_type ==
7245 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7246 /* force 10G, no AN */
c18487ee 7247 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7248 bp->port.advertising =
a2fbb9ea
ET
7249 (ADVERTISED_10000baseT_Full |
7250 ADVERTISED_FIBRE);
7251 break;
7252 }
7253 BNX2X_ERR("NVRAM config error. "
7254 "Invalid link_config 0x%x"
7255 " Autoneg not supported\n",
34f80b04 7256 bp->port.link_config);
a2fbb9ea
ET
7257 return;
7258 }
7259 break;
7260
7261 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7262 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7263 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7264 bp->port.advertising = (ADVERTISED_10baseT_Full |
7265 ADVERTISED_TP);
a2fbb9ea
ET
7266 } else {
7267 BNX2X_ERR("NVRAM config error. "
7268 "Invalid link_config 0x%x"
7269 " speed_cap_mask 0x%x\n",
34f80b04 7270 bp->port.link_config,
c18487ee 7271 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7272 return;
7273 }
7274 break;
7275
7276 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7277 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7278 bp->link_params.req_line_speed = SPEED_10;
7279 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7280 bp->port.advertising = (ADVERTISED_10baseT_Half |
7281 ADVERTISED_TP);
a2fbb9ea
ET
7282 } else {
7283 BNX2X_ERR("NVRAM config error. "
7284 "Invalid link_config 0x%x"
7285 " speed_cap_mask 0x%x\n",
34f80b04 7286 bp->port.link_config,
c18487ee 7287 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7288 return;
7289 }
7290 break;
7291
7292 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7293 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7294 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7295 bp->port.advertising = (ADVERTISED_100baseT_Full |
7296 ADVERTISED_TP);
a2fbb9ea
ET
7297 } else {
7298 BNX2X_ERR("NVRAM config error. "
7299 "Invalid link_config 0x%x"
7300 " speed_cap_mask 0x%x\n",
34f80b04 7301 bp->port.link_config,
c18487ee 7302 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7303 return;
7304 }
7305 break;
7306
7307 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7308 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7309 bp->link_params.req_line_speed = SPEED_100;
7310 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7311 bp->port.advertising = (ADVERTISED_100baseT_Half |
7312 ADVERTISED_TP);
a2fbb9ea
ET
7313 } else {
7314 BNX2X_ERR("NVRAM config error. "
7315 "Invalid link_config 0x%x"
7316 " speed_cap_mask 0x%x\n",
34f80b04 7317 bp->port.link_config,
c18487ee 7318 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7319 return;
7320 }
7321 break;
7322
7323 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7324 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7325 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7326 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7327 ADVERTISED_TP);
a2fbb9ea
ET
7328 } else {
7329 BNX2X_ERR("NVRAM config error. "
7330 "Invalid link_config 0x%x"
7331 " speed_cap_mask 0x%x\n",
34f80b04 7332 bp->port.link_config,
c18487ee 7333 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7334 return;
7335 }
7336 break;
7337
7338 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7339 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7340 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7341 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7342 ADVERTISED_TP);
a2fbb9ea
ET
7343 } else {
7344 BNX2X_ERR("NVRAM config error. "
7345 "Invalid link_config 0x%x"
7346 " speed_cap_mask 0x%x\n",
34f80b04 7347 bp->port.link_config,
c18487ee 7348 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7349 return;
7350 }
7351 break;
7352
7353 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7354 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7355 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7356 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7357 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7358 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7359 ADVERTISED_FIBRE);
a2fbb9ea
ET
7360 } else {
7361 BNX2X_ERR("NVRAM config error. "
7362 "Invalid link_config 0x%x"
7363 " speed_cap_mask 0x%x\n",
34f80b04 7364 bp->port.link_config,
c18487ee 7365 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7366 return;
7367 }
7368 break;
7369
7370 default:
7371 BNX2X_ERR("NVRAM config error. "
7372 "BAD link speed link_config 0x%x\n",
34f80b04 7373 bp->port.link_config);
c18487ee 7374 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7375 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7376 break;
7377 }
a2fbb9ea 7378
34f80b04
EG
7379 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7380 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7381 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7382 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7383 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7384
c18487ee 7385 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7386 " advertising 0x%x\n",
c18487ee
YR
7387 bp->link_params.req_line_speed,
7388 bp->link_params.req_duplex,
34f80b04 7389 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7390}
7391
34f80b04 7392static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7393{
34f80b04
EG
7394 int port = BP_PORT(bp);
7395 u32 val, val2;
a2fbb9ea 7396
c18487ee 7397 bp->link_params.bp = bp;
34f80b04 7398 bp->link_params.port = port;
c18487ee 7399
c18487ee 7400 bp->link_params.serdes_config =
f1410647 7401 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7402 bp->link_params.lane_config =
a2fbb9ea 7403 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7404 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7405 SHMEM_RD(bp,
7406 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7407 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7408 SHMEM_RD(bp,
7409 dev_info.port_hw_config[port].speed_capability_mask);
7410
34f80b04 7411 bp->port.link_config =
a2fbb9ea
ET
7412 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7413
34f80b04
EG
7414 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7415 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7416 " link_config 0x%08x\n",
c18487ee
YR
7417 bp->link_params.serdes_config,
7418 bp->link_params.lane_config,
7419 bp->link_params.ext_phy_config,
34f80b04 7420 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7421
34f80b04 7422 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7423 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7424 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7425
7426 bnx2x_link_settings_requested(bp);
7427
7428 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7429 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7430 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7431 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7432 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7433 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7434 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7435 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7436 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7437 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7438}
7439
7440static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7441{
7442 int func = BP_FUNC(bp);
7443 u32 val, val2;
7444 int rc = 0;
a2fbb9ea 7445
34f80b04 7446 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7447
34f80b04
EG
7448 bp->e1hov = 0;
7449 bp->e1hmf = 0;
7450 if (CHIP_IS_E1H(bp)) {
7451 bp->mf_config =
7452 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7453
3196a88a
EG
7454 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7455 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7456 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7457
34f80b04
EG
7458 bp->e1hov = val;
7459 bp->e1hmf = 1;
7460 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7461 "(0x%04x)\n",
7462 func, bp->e1hov, bp->e1hov);
7463 } else {
7464 BNX2X_DEV_INFO("Single function mode\n");
7465 if (BP_E1HVN(bp)) {
7466 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7467 " aborting\n", func);
7468 rc = -EPERM;
7469 }
7470 }
7471 }
a2fbb9ea 7472
34f80b04
EG
7473 if (!BP_NOMCP(bp)) {
7474 bnx2x_get_port_hwinfo(bp);
7475
7476 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7477 DRV_MSG_SEQ_NUMBER_MASK);
7478 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7479 }
7480
7481 if (IS_E1HMF(bp)) {
7482 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7483 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7484 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7485 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7486 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7487 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7488 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7489 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7490 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7491 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7492 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7493 ETH_ALEN);
7494 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7495 ETH_ALEN);
a2fbb9ea 7496 }
34f80b04
EG
7497
7498 return rc;
a2fbb9ea
ET
7499 }
7500
34f80b04
EG
7501 if (BP_NOMCP(bp)) {
7502 /* only supposed to happen on emulation/FPGA */
33471629 7503 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7504 random_ether_addr(bp->dev->dev_addr);
7505 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7506 }
a2fbb9ea 7507
34f80b04
EG
7508 return rc;
7509}
7510
7511static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7512{
7513 int func = BP_FUNC(bp);
7514 int rc;
7515
da5a662a
VZ
7516 /* Disable interrupt handling until HW is initialized */
7517 atomic_set(&bp->intr_sem, 1);
7518
34f80b04 7519 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7520
1cf167f2 7521 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7522 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7523
7524 rc = bnx2x_get_hwinfo(bp);
7525
7526 /* need to reset chip if undi was active */
7527 if (!BP_NOMCP(bp))
7528 bnx2x_undi_unload(bp);
7529
7530 if (CHIP_REV_IS_FPGA(bp))
7531 printk(KERN_ERR PFX "FPGA detected\n");
7532
7533 if (BP_NOMCP(bp) && (func == 0))
7534 printk(KERN_ERR PFX
7535 "MCP disabled, must load devices in order!\n");
7536
7a9b2557
VZ
7537 /* Set TPA flags */
7538 if (disable_tpa) {
7539 bp->flags &= ~TPA_ENABLE_FLAG;
7540 bp->dev->features &= ~NETIF_F_LRO;
7541 } else {
7542 bp->flags |= TPA_ENABLE_FLAG;
7543 bp->dev->features |= NETIF_F_LRO;
7544 }
7545
7546
34f80b04
EG
7547 bp->tx_ring_size = MAX_TX_AVAIL;
7548 bp->rx_ring_size = MAX_RX_AVAIL;
7549
7550 bp->rx_csum = 1;
7551 bp->rx_offset = 0;
7552
7553 bp->tx_ticks = 50;
7554 bp->rx_ticks = 25;
7555
34f80b04
EG
7556 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7557 bp->current_interval = (poll ? poll : bp->timer_interval);
7558
7559 init_timer(&bp->timer);
7560 bp->timer.expires = jiffies + bp->current_interval;
7561 bp->timer.data = (unsigned long) bp;
7562 bp->timer.function = bnx2x_timer;
7563
7564 return rc;
a2fbb9ea
ET
7565}
7566
7567/*
7568 * ethtool service functions
7569 */
7570
7571/* All ethtool functions called with rtnl_lock */
7572
7573static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7574{
7575 struct bnx2x *bp = netdev_priv(dev);
7576
34f80b04
EG
7577 cmd->supported = bp->port.supported;
7578 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7579
7580 if (netif_carrier_ok(dev)) {
c18487ee
YR
7581 cmd->speed = bp->link_vars.line_speed;
7582 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7583 } else {
c18487ee
YR
7584 cmd->speed = bp->link_params.req_line_speed;
7585 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7586 }
34f80b04
EG
7587 if (IS_E1HMF(bp)) {
7588 u16 vn_max_rate;
7589
7590 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7591 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7592 if (vn_max_rate < cmd->speed)
7593 cmd->speed = vn_max_rate;
7594 }
a2fbb9ea 7595
c18487ee
YR
7596 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7597 u32 ext_phy_type =
7598 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7599
7600 switch (ext_phy_type) {
7601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7602 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7603 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7604 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7605 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7606 cmd->port = PORT_FIBRE;
7607 break;
7608
7609 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7610 cmd->port = PORT_TP;
7611 break;
7612
c18487ee
YR
7613 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7614 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7615 bp->link_params.ext_phy_config);
7616 break;
7617
f1410647
ET
7618 default:
7619 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7620 bp->link_params.ext_phy_config);
7621 break;
f1410647
ET
7622 }
7623 } else
a2fbb9ea 7624 cmd->port = PORT_TP;
a2fbb9ea 7625
34f80b04 7626 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7627 cmd->transceiver = XCVR_INTERNAL;
7628
c18487ee 7629 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7630 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7631 else
a2fbb9ea 7632 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7633
7634 cmd->maxtxpkt = 0;
7635 cmd->maxrxpkt = 0;
7636
7637 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7638 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7639 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7640 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7641 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7642 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7643 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7644
7645 return 0;
7646}
7647
7648static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7649{
7650 struct bnx2x *bp = netdev_priv(dev);
7651 u32 advertising;
7652
34f80b04
EG
7653 if (IS_E1HMF(bp))
7654 return 0;
7655
a2fbb9ea
ET
7656 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7657 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7658 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7659 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7660 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7661 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7662 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7663
a2fbb9ea 7664 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7665 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7666 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7667 return -EINVAL;
f1410647 7668 }
a2fbb9ea
ET
7669
7670 /* advertise the requested speed and duplex if supported */
34f80b04 7671 cmd->advertising &= bp->port.supported;
a2fbb9ea 7672
c18487ee
YR
7673 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7674 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7675 bp->port.advertising |= (ADVERTISED_Autoneg |
7676 cmd->advertising);
a2fbb9ea
ET
7677
7678 } else { /* forced speed */
7679 /* advertise the requested speed and duplex if supported */
7680 switch (cmd->speed) {
7681 case SPEED_10:
7682 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7683 if (!(bp->port.supported &
f1410647
ET
7684 SUPPORTED_10baseT_Full)) {
7685 DP(NETIF_MSG_LINK,
7686 "10M full not supported\n");
a2fbb9ea 7687 return -EINVAL;
f1410647 7688 }
a2fbb9ea
ET
7689
7690 advertising = (ADVERTISED_10baseT_Full |
7691 ADVERTISED_TP);
7692 } else {
34f80b04 7693 if (!(bp->port.supported &
f1410647
ET
7694 SUPPORTED_10baseT_Half)) {
7695 DP(NETIF_MSG_LINK,
7696 "10M half not supported\n");
a2fbb9ea 7697 return -EINVAL;
f1410647 7698 }
a2fbb9ea
ET
7699
7700 advertising = (ADVERTISED_10baseT_Half |
7701 ADVERTISED_TP);
7702 }
7703 break;
7704
7705 case SPEED_100:
7706 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7707 if (!(bp->port.supported &
f1410647
ET
7708 SUPPORTED_100baseT_Full)) {
7709 DP(NETIF_MSG_LINK,
7710 "100M full not supported\n");
a2fbb9ea 7711 return -EINVAL;
f1410647 7712 }
a2fbb9ea
ET
7713
7714 advertising = (ADVERTISED_100baseT_Full |
7715 ADVERTISED_TP);
7716 } else {
34f80b04 7717 if (!(bp->port.supported &
f1410647
ET
7718 SUPPORTED_100baseT_Half)) {
7719 DP(NETIF_MSG_LINK,
7720 "100M half not supported\n");
a2fbb9ea 7721 return -EINVAL;
f1410647 7722 }
a2fbb9ea
ET
7723
7724 advertising = (ADVERTISED_100baseT_Half |
7725 ADVERTISED_TP);
7726 }
7727 break;
7728
7729 case SPEED_1000:
f1410647
ET
7730 if (cmd->duplex != DUPLEX_FULL) {
7731 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7732 return -EINVAL;
f1410647 7733 }
a2fbb9ea 7734
34f80b04 7735 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7736 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7737 return -EINVAL;
f1410647 7738 }
a2fbb9ea
ET
7739
7740 advertising = (ADVERTISED_1000baseT_Full |
7741 ADVERTISED_TP);
7742 break;
7743
7744 case SPEED_2500:
f1410647
ET
7745 if (cmd->duplex != DUPLEX_FULL) {
7746 DP(NETIF_MSG_LINK,
7747 "2.5G half not supported\n");
a2fbb9ea 7748 return -EINVAL;
f1410647 7749 }
a2fbb9ea 7750
34f80b04 7751 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7752 DP(NETIF_MSG_LINK,
7753 "2.5G full not supported\n");
a2fbb9ea 7754 return -EINVAL;
f1410647 7755 }
a2fbb9ea 7756
f1410647 7757 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7758 ADVERTISED_TP);
7759 break;
7760
7761 case SPEED_10000:
f1410647
ET
7762 if (cmd->duplex != DUPLEX_FULL) {
7763 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7764 return -EINVAL;
f1410647 7765 }
a2fbb9ea 7766
34f80b04 7767 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7768 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7769 return -EINVAL;
f1410647 7770 }
a2fbb9ea
ET
7771
7772 advertising = (ADVERTISED_10000baseT_Full |
7773 ADVERTISED_FIBRE);
7774 break;
7775
7776 default:
f1410647 7777 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7778 return -EINVAL;
7779 }
7780
c18487ee
YR
7781 bp->link_params.req_line_speed = cmd->speed;
7782 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7783 bp->port.advertising = advertising;
a2fbb9ea
ET
7784 }
7785
c18487ee 7786 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7787 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7788 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7789 bp->port.advertising);
a2fbb9ea 7790
34f80b04 7791 if (netif_running(dev)) {
bb2a0f7a 7792 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7793 bnx2x_link_set(bp);
7794 }
a2fbb9ea
ET
7795
7796 return 0;
7797}
7798
c18487ee
YR
7799#define PHY_FW_VER_LEN 10
7800
a2fbb9ea
ET
7801static void bnx2x_get_drvinfo(struct net_device *dev,
7802 struct ethtool_drvinfo *info)
7803{
7804 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7805 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7806
7807 strcpy(info->driver, DRV_MODULE_NAME);
7808 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7809
7810 phy_fw_ver[0] = '\0';
34f80b04 7811 if (bp->port.pmf) {
4a37fb66 7812 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7813 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7814 (bp->state != BNX2X_STATE_CLOSED),
7815 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7816 bnx2x_release_phy_lock(bp);
34f80b04 7817 }
c18487ee 7818
f0e53a84
EG
7819 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7820 (bp->common.bc_ver & 0xff0000) >> 16,
7821 (bp->common.bc_ver & 0xff00) >> 8,
7822 (bp->common.bc_ver & 0xff),
7823 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7824 strcpy(info->bus_info, pci_name(bp->pdev));
7825 info->n_stats = BNX2X_NUM_STATS;
7826 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7827 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7828 info->regdump_len = 0;
7829}
7830
7831static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7832{
7833 struct bnx2x *bp = netdev_priv(dev);
7834
7835 if (bp->flags & NO_WOL_FLAG) {
7836 wol->supported = 0;
7837 wol->wolopts = 0;
7838 } else {
7839 wol->supported = WAKE_MAGIC;
7840 if (bp->wol)
7841 wol->wolopts = WAKE_MAGIC;
7842 else
7843 wol->wolopts = 0;
7844 }
7845 memset(&wol->sopass, 0, sizeof(wol->sopass));
7846}
7847
7848static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7849{
7850 struct bnx2x *bp = netdev_priv(dev);
7851
7852 if (wol->wolopts & ~WAKE_MAGIC)
7853 return -EINVAL;
7854
7855 if (wol->wolopts & WAKE_MAGIC) {
7856 if (bp->flags & NO_WOL_FLAG)
7857 return -EINVAL;
7858
7859 bp->wol = 1;
34f80b04 7860 } else
a2fbb9ea 7861 bp->wol = 0;
34f80b04 7862
a2fbb9ea
ET
7863 return 0;
7864}
7865
7866static u32 bnx2x_get_msglevel(struct net_device *dev)
7867{
7868 struct bnx2x *bp = netdev_priv(dev);
7869
7870 return bp->msglevel;
7871}
7872
7873static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7874{
7875 struct bnx2x *bp = netdev_priv(dev);
7876
7877 if (capable(CAP_NET_ADMIN))
7878 bp->msglevel = level;
7879}
7880
7881static int bnx2x_nway_reset(struct net_device *dev)
7882{
7883 struct bnx2x *bp = netdev_priv(dev);
7884
34f80b04
EG
7885 if (!bp->port.pmf)
7886 return 0;
a2fbb9ea 7887
34f80b04 7888 if (netif_running(dev)) {
bb2a0f7a 7889 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7890 bnx2x_link_set(bp);
7891 }
a2fbb9ea
ET
7892
7893 return 0;
7894}
7895
7896static int bnx2x_get_eeprom_len(struct net_device *dev)
7897{
7898 struct bnx2x *bp = netdev_priv(dev);
7899
34f80b04 7900 return bp->common.flash_size;
a2fbb9ea
ET
7901}
7902
7903static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7904{
34f80b04 7905 int port = BP_PORT(bp);
a2fbb9ea
ET
7906 int count, i;
7907 u32 val = 0;
7908
7909 /* adjust timeout for emulation/FPGA */
7910 count = NVRAM_TIMEOUT_COUNT;
7911 if (CHIP_REV_IS_SLOW(bp))
7912 count *= 100;
7913
7914 /* request access to nvram interface */
7915 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7916 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7917
7918 for (i = 0; i < count*10; i++) {
7919 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7920 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7921 break;
7922
7923 udelay(5);
7924 }
7925
7926 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7927 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7928 return -EBUSY;
7929 }
7930
7931 return 0;
7932}
7933
7934static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7935{
34f80b04 7936 int port = BP_PORT(bp);
a2fbb9ea
ET
7937 int count, i;
7938 u32 val = 0;
7939
7940 /* adjust timeout for emulation/FPGA */
7941 count = NVRAM_TIMEOUT_COUNT;
7942 if (CHIP_REV_IS_SLOW(bp))
7943 count *= 100;
7944
7945 /* relinquish nvram interface */
7946 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7947 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7948
7949 for (i = 0; i < count*10; i++) {
7950 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7951 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7952 break;
7953
7954 udelay(5);
7955 }
7956
7957 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7958 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7959 return -EBUSY;
7960 }
7961
7962 return 0;
7963}
7964
7965static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7966{
7967 u32 val;
7968
7969 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7970
7971 /* enable both bits, even on read */
7972 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7973 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7974 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7975}
7976
7977static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7978{
7979 u32 val;
7980
7981 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7982
7983 /* disable both bits, even after read */
7984 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7985 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7986 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7987}
7988
7989static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7990 u32 cmd_flags)
7991{
f1410647 7992 int count, i, rc;
a2fbb9ea
ET
7993 u32 val;
7994
7995 /* build the command word */
7996 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7997
7998 /* need to clear DONE bit separately */
7999 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8000
8001 /* address of the NVRAM to read from */
8002 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8003 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8004
8005 /* issue a read command */
8006 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8007
8008 /* adjust timeout for emulation/FPGA */
8009 count = NVRAM_TIMEOUT_COUNT;
8010 if (CHIP_REV_IS_SLOW(bp))
8011 count *= 100;
8012
8013 /* wait for completion */
8014 *ret_val = 0;
8015 rc = -EBUSY;
8016 for (i = 0; i < count; i++) {
8017 udelay(5);
8018 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8019
8020 if (val & MCPR_NVM_COMMAND_DONE) {
8021 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8022 /* we read nvram data in cpu order
8023 * but ethtool sees it as an array of bytes
8024 * converting to big-endian will do the work */
8025 val = cpu_to_be32(val);
8026 *ret_val = val;
8027 rc = 0;
8028 break;
8029 }
8030 }
8031
8032 return rc;
8033}
8034
8035static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8036 int buf_size)
8037{
8038 int rc;
8039 u32 cmd_flags;
8040 u32 val;
8041
8042 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8043 DP(BNX2X_MSG_NVM,
c14423fe 8044 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8045 offset, buf_size);
8046 return -EINVAL;
8047 }
8048
34f80b04
EG
8049 if (offset + buf_size > bp->common.flash_size) {
8050 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8051 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8052 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8053 return -EINVAL;
8054 }
8055
8056 /* request access to nvram interface */
8057 rc = bnx2x_acquire_nvram_lock(bp);
8058 if (rc)
8059 return rc;
8060
8061 /* enable access to nvram interface */
8062 bnx2x_enable_nvram_access(bp);
8063
8064 /* read the first word(s) */
8065 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8066 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8067 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8068 memcpy(ret_buf, &val, 4);
8069
8070 /* advance to the next dword */
8071 offset += sizeof(u32);
8072 ret_buf += sizeof(u32);
8073 buf_size -= sizeof(u32);
8074 cmd_flags = 0;
8075 }
8076
8077 if (rc == 0) {
8078 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8079 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8080 memcpy(ret_buf, &val, 4);
8081 }
8082
8083 /* disable access to nvram interface */
8084 bnx2x_disable_nvram_access(bp);
8085 bnx2x_release_nvram_lock(bp);
8086
8087 return rc;
8088}
8089
8090static int bnx2x_get_eeprom(struct net_device *dev,
8091 struct ethtool_eeprom *eeprom, u8 *eebuf)
8092{
8093 struct bnx2x *bp = netdev_priv(dev);
8094 int rc;
8095
34f80b04 8096 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8097 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8098 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8099 eeprom->len, eeprom->len);
8100
8101 /* parameters already validated in ethtool_get_eeprom */
8102
8103 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8104
8105 return rc;
8106}
8107
8108static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8109 u32 cmd_flags)
8110{
f1410647 8111 int count, i, rc;
a2fbb9ea
ET
8112
8113 /* build the command word */
8114 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8115
8116 /* need to clear DONE bit separately */
8117 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8118
8119 /* write the data */
8120 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8121
8122 /* address of the NVRAM to write to */
8123 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8124 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8125
8126 /* issue the write command */
8127 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8128
8129 /* adjust timeout for emulation/FPGA */
8130 count = NVRAM_TIMEOUT_COUNT;
8131 if (CHIP_REV_IS_SLOW(bp))
8132 count *= 100;
8133
8134 /* wait for completion */
8135 rc = -EBUSY;
8136 for (i = 0; i < count; i++) {
8137 udelay(5);
8138 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8139 if (val & MCPR_NVM_COMMAND_DONE) {
8140 rc = 0;
8141 break;
8142 }
8143 }
8144
8145 return rc;
8146}
8147
f1410647 8148#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8149
8150static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8151 int buf_size)
8152{
8153 int rc;
8154 u32 cmd_flags;
8155 u32 align_offset;
8156 u32 val;
8157
34f80b04
EG
8158 if (offset + buf_size > bp->common.flash_size) {
8159 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8160 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8161 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8162 return -EINVAL;
8163 }
8164
8165 /* request access to nvram interface */
8166 rc = bnx2x_acquire_nvram_lock(bp);
8167 if (rc)
8168 return rc;
8169
8170 /* enable access to nvram interface */
8171 bnx2x_enable_nvram_access(bp);
8172
8173 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8174 align_offset = (offset & ~0x03);
8175 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8176
8177 if (rc == 0) {
8178 val &= ~(0xff << BYTE_OFFSET(offset));
8179 val |= (*data_buf << BYTE_OFFSET(offset));
8180
8181 /* nvram data is returned as an array of bytes
8182 * convert it back to cpu order */
8183 val = be32_to_cpu(val);
8184
a2fbb9ea
ET
8185 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8186 cmd_flags);
8187 }
8188
8189 /* disable access to nvram interface */
8190 bnx2x_disable_nvram_access(bp);
8191 bnx2x_release_nvram_lock(bp);
8192
8193 return rc;
8194}
8195
8196static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8197 int buf_size)
8198{
8199 int rc;
8200 u32 cmd_flags;
8201 u32 val;
8202 u32 written_so_far;
8203
34f80b04 8204 if (buf_size == 1) /* ethtool */
a2fbb9ea 8205 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8206
8207 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8208 DP(BNX2X_MSG_NVM,
c14423fe 8209 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8210 offset, buf_size);
8211 return -EINVAL;
8212 }
8213
34f80b04
EG
8214 if (offset + buf_size > bp->common.flash_size) {
8215 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8216 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8217 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8218 return -EINVAL;
8219 }
8220
8221 /* request access to nvram interface */
8222 rc = bnx2x_acquire_nvram_lock(bp);
8223 if (rc)
8224 return rc;
8225
8226 /* enable access to nvram interface */
8227 bnx2x_enable_nvram_access(bp);
8228
8229 written_so_far = 0;
8230 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8231 while ((written_so_far < buf_size) && (rc == 0)) {
8232 if (written_so_far == (buf_size - sizeof(u32)))
8233 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8234 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8235 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8236 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8237 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8238
8239 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8240
8241 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8242
8243 /* advance to the next dword */
8244 offset += sizeof(u32);
8245 data_buf += sizeof(u32);
8246 written_so_far += sizeof(u32);
8247 cmd_flags = 0;
8248 }
8249
8250 /* disable access to nvram interface */
8251 bnx2x_disable_nvram_access(bp);
8252 bnx2x_release_nvram_lock(bp);
8253
8254 return rc;
8255}
8256
8257static int bnx2x_set_eeprom(struct net_device *dev,
8258 struct ethtool_eeprom *eeprom, u8 *eebuf)
8259{
8260 struct bnx2x *bp = netdev_priv(dev);
8261 int rc;
8262
9f4c9583
EG
8263 if (!netif_running(dev))
8264 return -EAGAIN;
8265
34f80b04 8266 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8267 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8268 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8269 eeprom->len, eeprom->len);
8270
8271 /* parameters already validated in ethtool_set_eeprom */
8272
c18487ee 8273 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8274 if (eeprom->magic == 0x00504859)
8275 if (bp->port.pmf) {
8276
4a37fb66 8277 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8278 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8279 bp->link_params.ext_phy_config,
8280 (bp->state != BNX2X_STATE_CLOSED),
8281 eebuf, eeprom->len);
bb2a0f7a
YG
8282 if ((bp->state == BNX2X_STATE_OPEN) ||
8283 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8284 rc |= bnx2x_link_reset(&bp->link_params,
8285 &bp->link_vars);
8286 rc |= bnx2x_phy_init(&bp->link_params,
8287 &bp->link_vars);
bb2a0f7a 8288 }
4a37fb66 8289 bnx2x_release_phy_lock(bp);
34f80b04
EG
8290
8291 } else /* Only the PMF can access the PHY */
8292 return -EINVAL;
8293 else
c18487ee 8294 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8295
8296 return rc;
8297}
8298
8299static int bnx2x_get_coalesce(struct net_device *dev,
8300 struct ethtool_coalesce *coal)
8301{
8302 struct bnx2x *bp = netdev_priv(dev);
8303
8304 memset(coal, 0, sizeof(struct ethtool_coalesce));
8305
8306 coal->rx_coalesce_usecs = bp->rx_ticks;
8307 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8308
8309 return 0;
8310}
8311
8312static int bnx2x_set_coalesce(struct net_device *dev,
8313 struct ethtool_coalesce *coal)
8314{
8315 struct bnx2x *bp = netdev_priv(dev);
8316
8317 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8318 if (bp->rx_ticks > 3000)
8319 bp->rx_ticks = 3000;
8320
8321 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8322 if (bp->tx_ticks > 0x3000)
8323 bp->tx_ticks = 0x3000;
8324
34f80b04 8325 if (netif_running(dev))
a2fbb9ea
ET
8326 bnx2x_update_coalesce(bp);
8327
8328 return 0;
8329}
8330
8331static void bnx2x_get_ringparam(struct net_device *dev,
8332 struct ethtool_ringparam *ering)
8333{
8334 struct bnx2x *bp = netdev_priv(dev);
8335
8336 ering->rx_max_pending = MAX_RX_AVAIL;
8337 ering->rx_mini_max_pending = 0;
8338 ering->rx_jumbo_max_pending = 0;
8339
8340 ering->rx_pending = bp->rx_ring_size;
8341 ering->rx_mini_pending = 0;
8342 ering->rx_jumbo_pending = 0;
8343
8344 ering->tx_max_pending = MAX_TX_AVAIL;
8345 ering->tx_pending = bp->tx_ring_size;
8346}
8347
8348static int bnx2x_set_ringparam(struct net_device *dev,
8349 struct ethtool_ringparam *ering)
8350{
8351 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8352 int rc = 0;
a2fbb9ea
ET
8353
8354 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8355 (ering->tx_pending > MAX_TX_AVAIL) ||
8356 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8357 return -EINVAL;
8358
8359 bp->rx_ring_size = ering->rx_pending;
8360 bp->tx_ring_size = ering->tx_pending;
8361
34f80b04
EG
8362 if (netif_running(dev)) {
8363 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8364 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8365 }
8366
34f80b04 8367 return rc;
a2fbb9ea
ET
8368}
8369
8370static void bnx2x_get_pauseparam(struct net_device *dev,
8371 struct ethtool_pauseparam *epause)
8372{
8373 struct bnx2x *bp = netdev_priv(dev);
8374
c0700f90 8375 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8376 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8377
c0700f90
DM
8378 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8379 BNX2X_FLOW_CTRL_RX);
8380 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8381 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8382
8383 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8384 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8385 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8386}
8387
8388static int bnx2x_set_pauseparam(struct net_device *dev,
8389 struct ethtool_pauseparam *epause)
8390{
8391 struct bnx2x *bp = netdev_priv(dev);
8392
34f80b04
EG
8393 if (IS_E1HMF(bp))
8394 return 0;
8395
a2fbb9ea
ET
8396 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8397 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8398 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8399
c0700f90 8400 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8401
f1410647 8402 if (epause->rx_pause)
c0700f90 8403 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8404
f1410647 8405 if (epause->tx_pause)
c0700f90 8406 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8407
c0700f90
DM
8408 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8409 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8410
c18487ee 8411 if (epause->autoneg) {
34f80b04 8412 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8413 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8414 return -EINVAL;
8415 }
a2fbb9ea 8416
c18487ee 8417 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8418 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8419 }
a2fbb9ea 8420
c18487ee
YR
8421 DP(NETIF_MSG_LINK,
8422 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8423
8424 if (netif_running(dev)) {
bb2a0f7a 8425 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8426 bnx2x_link_set(bp);
8427 }
a2fbb9ea
ET
8428
8429 return 0;
8430}
8431
df0f2343
VZ
8432static int bnx2x_set_flags(struct net_device *dev, u32 data)
8433{
8434 struct bnx2x *bp = netdev_priv(dev);
8435 int changed = 0;
8436 int rc = 0;
8437
8438 /* TPA requires Rx CSUM offloading */
8439 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8440 if (!(dev->features & NETIF_F_LRO)) {
8441 dev->features |= NETIF_F_LRO;
8442 bp->flags |= TPA_ENABLE_FLAG;
8443 changed = 1;
8444 }
8445
8446 } else if (dev->features & NETIF_F_LRO) {
8447 dev->features &= ~NETIF_F_LRO;
8448 bp->flags &= ~TPA_ENABLE_FLAG;
8449 changed = 1;
8450 }
8451
8452 if (changed && netif_running(dev)) {
8453 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8454 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8455 }
8456
8457 return rc;
8458}
8459
a2fbb9ea
ET
8460static u32 bnx2x_get_rx_csum(struct net_device *dev)
8461{
8462 struct bnx2x *bp = netdev_priv(dev);
8463
8464 return bp->rx_csum;
8465}
8466
8467static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8468{
8469 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8470 int rc = 0;
a2fbb9ea
ET
8471
8472 bp->rx_csum = data;
df0f2343
VZ
8473
8474 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8475 TPA'ed packets will be discarded due to wrong TCP CSUM */
8476 if (!data) {
8477 u32 flags = ethtool_op_get_flags(dev);
8478
8479 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8480 }
8481
8482 return rc;
a2fbb9ea
ET
8483}
8484
8485static int bnx2x_set_tso(struct net_device *dev, u32 data)
8486{
755735eb 8487 if (data) {
a2fbb9ea 8488 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8489 dev->features |= NETIF_F_TSO6;
8490 } else {
a2fbb9ea 8491 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8492 dev->features &= ~NETIF_F_TSO6;
8493 }
8494
a2fbb9ea
ET
8495 return 0;
8496}
8497
f3c87cdd 8498static const struct {
a2fbb9ea
ET
8499 char string[ETH_GSTRING_LEN];
8500} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8501 { "register_test (offline)" },
8502 { "memory_test (offline)" },
8503 { "loopback_test (offline)" },
8504 { "nvram_test (online)" },
8505 { "interrupt_test (online)" },
8506 { "link_test (online)" },
8507 { "idle check (online)" },
8508 { "MC errors (online)" }
a2fbb9ea
ET
8509};
8510
8511static int bnx2x_self_test_count(struct net_device *dev)
8512{
8513 return BNX2X_NUM_TESTS;
8514}
8515
f3c87cdd
YG
8516static int bnx2x_test_registers(struct bnx2x *bp)
8517{
8518 int idx, i, rc = -ENODEV;
8519 u32 wr_val = 0;
9dabc424 8520 int port = BP_PORT(bp);
f3c87cdd
YG
8521 static const struct {
8522 u32 offset0;
8523 u32 offset1;
8524 u32 mask;
8525 } reg_tbl[] = {
8526/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8527 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8528 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8529 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8530 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8531 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8532 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8533 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8534 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8535 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8536/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8537 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8538 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8539 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8540 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8541 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8542 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8543 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8544 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8545 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8546/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8547 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8548 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8549 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8550 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8551 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8552 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8553 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8554 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8555 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8556/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8557 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8558 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8559 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8560 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8561 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8562 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8563 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8564
8565 { 0xffffffff, 0, 0x00000000 }
8566 };
8567
8568 if (!netif_running(bp->dev))
8569 return rc;
8570
8571 /* Repeat the test twice:
8572 First by writing 0x00000000, second by writing 0xffffffff */
8573 for (idx = 0; idx < 2; idx++) {
8574
8575 switch (idx) {
8576 case 0:
8577 wr_val = 0;
8578 break;
8579 case 1:
8580 wr_val = 0xffffffff;
8581 break;
8582 }
8583
8584 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8585 u32 offset, mask, save_val, val;
f3c87cdd
YG
8586
8587 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8588 mask = reg_tbl[i].mask;
8589
8590 save_val = REG_RD(bp, offset);
8591
8592 REG_WR(bp, offset, wr_val);
8593 val = REG_RD(bp, offset);
8594
8595 /* Restore the original register's value */
8596 REG_WR(bp, offset, save_val);
8597
8598 /* verify that value is as expected value */
8599 if ((val & mask) != (wr_val & mask))
8600 goto test_reg_exit;
8601 }
8602 }
8603
8604 rc = 0;
8605
8606test_reg_exit:
8607 return rc;
8608}
8609
8610static int bnx2x_test_memory(struct bnx2x *bp)
8611{
8612 int i, j, rc = -ENODEV;
8613 u32 val;
8614 static const struct {
8615 u32 offset;
8616 int size;
8617 } mem_tbl[] = {
8618 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8619 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8620 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8621 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8622 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8623 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8624 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8625
8626 { 0xffffffff, 0 }
8627 };
8628 static const struct {
8629 char *name;
8630 u32 offset;
9dabc424
YG
8631 u32 e1_mask;
8632 u32 e1h_mask;
f3c87cdd 8633 } prty_tbl[] = {
9dabc424
YG
8634 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8635 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8636 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8637 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8638 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8639 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8640
8641 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8642 };
8643
8644 if (!netif_running(bp->dev))
8645 return rc;
8646
8647 /* Go through all the memories */
8648 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8649 for (j = 0; j < mem_tbl[i].size; j++)
8650 REG_RD(bp, mem_tbl[i].offset + j*4);
8651
8652 /* Check the parity status */
8653 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8654 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8655 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8656 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8657 DP(NETIF_MSG_HW,
8658 "%s is 0x%x\n", prty_tbl[i].name, val);
8659 goto test_mem_exit;
8660 }
8661 }
8662
8663 rc = 0;
8664
8665test_mem_exit:
8666 return rc;
8667}
8668
f3c87cdd
YG
8669static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8670{
8671 int cnt = 1000;
8672
8673 if (link_up)
8674 while (bnx2x_link_test(bp) && cnt--)
8675 msleep(10);
8676}
8677
8678static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8679{
8680 unsigned int pkt_size, num_pkts, i;
8681 struct sk_buff *skb;
8682 unsigned char *packet;
8683 struct bnx2x_fastpath *fp = &bp->fp[0];
8684 u16 tx_start_idx, tx_idx;
8685 u16 rx_start_idx, rx_idx;
8686 u16 pkt_prod;
8687 struct sw_tx_bd *tx_buf;
8688 struct eth_tx_bd *tx_bd;
8689 dma_addr_t mapping;
8690 union eth_rx_cqe *cqe;
8691 u8 cqe_fp_flags;
8692 struct sw_rx_bd *rx_buf;
8693 u16 len;
8694 int rc = -ENODEV;
8695
8696 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8697 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8698 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8699 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8700 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8701
8702 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8703 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8704 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8705 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8706 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8707 /* wait until link state is restored */
8708 bnx2x_wait_for_link(bp, link_up);
8709
8710 } else
8711 return -EINVAL;
8712
8713 pkt_size = 1514;
8714 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8715 if (!skb) {
8716 rc = -ENOMEM;
8717 goto test_loopback_exit;
8718 }
8719 packet = skb_put(skb, pkt_size);
8720 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8721 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8722 for (i = ETH_HLEN; i < pkt_size; i++)
8723 packet[i] = (unsigned char) (i & 0xff);
8724
8725 num_pkts = 0;
8726 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8727 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8728
8729 pkt_prod = fp->tx_pkt_prod++;
8730 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8731 tx_buf->first_bd = fp->tx_bd_prod;
8732 tx_buf->skb = skb;
8733
8734 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8735 mapping = pci_map_single(bp->pdev, skb->data,
8736 skb_headlen(skb), PCI_DMA_TODEVICE);
8737 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8738 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8739 tx_bd->nbd = cpu_to_le16(1);
8740 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8741 tx_bd->vlan = cpu_to_le16(pkt_prod);
8742 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8743 ETH_TX_BD_FLAGS_END_BD);
8744 tx_bd->general_data = ((UNICAST_ADDRESS <<
8745 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8746
58f4c4cf
EG
8747 wmb();
8748
f3c87cdd
YG
8749 fp->hw_tx_prods->bds_prod =
8750 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8751 mb(); /* FW restriction: must not reorder writing nbd and packets */
8752 fp->hw_tx_prods->packets_prod =
8753 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8754 DOORBELL(bp, FP_IDX(fp), 0);
8755
8756 mmiowb();
8757
8758 num_pkts++;
8759 fp->tx_bd_prod++;
8760 bp->dev->trans_start = jiffies;
8761
8762 udelay(100);
8763
8764 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8765 if (tx_idx != tx_start_idx + num_pkts)
8766 goto test_loopback_exit;
8767
8768 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8769 if (rx_idx != rx_start_idx + num_pkts)
8770 goto test_loopback_exit;
8771
8772 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8773 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8774 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8775 goto test_loopback_rx_exit;
8776
8777 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8778 if (len != pkt_size)
8779 goto test_loopback_rx_exit;
8780
8781 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8782 skb = rx_buf->skb;
8783 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8784 for (i = ETH_HLEN; i < pkt_size; i++)
8785 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8786 goto test_loopback_rx_exit;
8787
8788 rc = 0;
8789
8790test_loopback_rx_exit:
f3c87cdd
YG
8791
8792 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8793 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8794 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8795 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8796
8797 /* Update producers */
8798 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8799 fp->rx_sge_prod);
f3c87cdd
YG
8800
8801test_loopback_exit:
8802 bp->link_params.loopback_mode = LOOPBACK_NONE;
8803
8804 return rc;
8805}
8806
8807static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8808{
8809 int rc = 0;
8810
8811 if (!netif_running(bp->dev))
8812 return BNX2X_LOOPBACK_FAILED;
8813
f8ef6e44 8814 bnx2x_netif_stop(bp, 1);
f3c87cdd
YG
8815
8816 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8817 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8818 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8819 }
8820
8821 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8822 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8823 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8824 }
8825
8826 bnx2x_netif_start(bp);
8827
8828 return rc;
8829}
8830
8831#define CRC32_RESIDUAL 0xdebb20e3
8832
8833static int bnx2x_test_nvram(struct bnx2x *bp)
8834{
8835 static const struct {
8836 int offset;
8837 int size;
8838 } nvram_tbl[] = {
8839 { 0, 0x14 }, /* bootstrap */
8840 { 0x14, 0xec }, /* dir */
8841 { 0x100, 0x350 }, /* manuf_info */
8842 { 0x450, 0xf0 }, /* feature_info */
8843 { 0x640, 0x64 }, /* upgrade_key_info */
8844 { 0x6a4, 0x64 },
8845 { 0x708, 0x70 }, /* manuf_key_info */
8846 { 0x778, 0x70 },
8847 { 0, 0 }
8848 };
8849 u32 buf[0x350 / 4];
8850 u8 *data = (u8 *)buf;
8851 int i, rc;
8852 u32 magic, csum;
8853
8854 rc = bnx2x_nvram_read(bp, 0, data, 4);
8855 if (rc) {
8856 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8857 goto test_nvram_exit;
8858 }
8859
8860 magic = be32_to_cpu(buf[0]);
8861 if (magic != 0x669955aa) {
8862 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8863 rc = -ENODEV;
8864 goto test_nvram_exit;
8865 }
8866
8867 for (i = 0; nvram_tbl[i].size; i++) {
8868
8869 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8870 nvram_tbl[i].size);
8871 if (rc) {
8872 DP(NETIF_MSG_PROBE,
8873 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8874 goto test_nvram_exit;
8875 }
8876
8877 csum = ether_crc_le(nvram_tbl[i].size, data);
8878 if (csum != CRC32_RESIDUAL) {
8879 DP(NETIF_MSG_PROBE,
8880 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8881 rc = -ENODEV;
8882 goto test_nvram_exit;
8883 }
8884 }
8885
8886test_nvram_exit:
8887 return rc;
8888}
8889
8890static int bnx2x_test_intr(struct bnx2x *bp)
8891{
8892 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8893 int i, rc;
8894
8895 if (!netif_running(bp->dev))
8896 return -ENODEV;
8897
8898 config->hdr.length_6b = 0;
8899 config->hdr.offset = 0;
8900 config->hdr.client_id = BP_CL_ID(bp);
8901 config->hdr.reserved1 = 0;
8902
8903 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8904 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8905 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8906 if (rc == 0) {
8907 bp->set_mac_pending++;
8908 for (i = 0; i < 10; i++) {
8909 if (!bp->set_mac_pending)
8910 break;
8911 msleep_interruptible(10);
8912 }
8913 if (i == 10)
8914 rc = -ENODEV;
8915 }
8916
8917 return rc;
8918}
8919
a2fbb9ea
ET
8920static void bnx2x_self_test(struct net_device *dev,
8921 struct ethtool_test *etest, u64 *buf)
8922{
8923 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8924
8925 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8926
f3c87cdd 8927 if (!netif_running(dev))
a2fbb9ea 8928 return;
a2fbb9ea 8929
33471629 8930 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8931 if (IS_E1HMF(bp))
8932 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8933
8934 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8935 u8 link_up;
8936
8937 link_up = bp->link_vars.link_up;
8938 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8939 bnx2x_nic_load(bp, LOAD_DIAG);
8940 /* wait until link state is restored */
8941 bnx2x_wait_for_link(bp, link_up);
8942
8943 if (bnx2x_test_registers(bp) != 0) {
8944 buf[0] = 1;
8945 etest->flags |= ETH_TEST_FL_FAILED;
8946 }
8947 if (bnx2x_test_memory(bp) != 0) {
8948 buf[1] = 1;
8949 etest->flags |= ETH_TEST_FL_FAILED;
8950 }
8951 buf[2] = bnx2x_test_loopback(bp, link_up);
8952 if (buf[2] != 0)
8953 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8954
f3c87cdd
YG
8955 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8956 bnx2x_nic_load(bp, LOAD_NORMAL);
8957 /* wait until link state is restored */
8958 bnx2x_wait_for_link(bp, link_up);
8959 }
8960 if (bnx2x_test_nvram(bp) != 0) {
8961 buf[3] = 1;
a2fbb9ea
ET
8962 etest->flags |= ETH_TEST_FL_FAILED;
8963 }
f3c87cdd
YG
8964 if (bnx2x_test_intr(bp) != 0) {
8965 buf[4] = 1;
8966 etest->flags |= ETH_TEST_FL_FAILED;
8967 }
8968 if (bp->port.pmf)
8969 if (bnx2x_link_test(bp) != 0) {
8970 buf[5] = 1;
8971 etest->flags |= ETH_TEST_FL_FAILED;
8972 }
8973 buf[7] = bnx2x_mc_assert(bp);
8974 if (buf[7] != 0)
8975 etest->flags |= ETH_TEST_FL_FAILED;
8976
8977#ifdef BNX2X_EXTRA_DEBUG
8978 bnx2x_panic_dump(bp);
8979#endif
a2fbb9ea
ET
8980}
8981
bb2a0f7a
YG
8982static const struct {
8983 long offset;
8984 int size;
8985 u32 flags;
66e855f3
YG
8986#define STATS_FLAGS_PORT 1
8987#define STATS_FLAGS_FUNC 2
8988 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8989} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8990/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8991 8, STATS_FLAGS_FUNC, "rx_bytes" },
8992 { STATS_OFFSET32(error_bytes_received_hi),
8993 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8994 { STATS_OFFSET32(total_bytes_transmitted_hi),
8995 8, STATS_FLAGS_FUNC, "tx_bytes" },
8996 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8997 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8998 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8999 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 9000 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 9001 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 9002 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 9003 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9004 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9005 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9006 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9007 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9008/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9009 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9010 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9011 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9012 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9013 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9014 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9015 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9016 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9017 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9018 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9019 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9020 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9021 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9022 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9023 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9024 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9025 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9026 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9027 8, STATS_FLAGS_PORT, "rx_fragments" },
9028/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9029 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9030 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9031 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9032 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9033 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9034 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9035 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9036 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9037 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9038 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9039 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9040 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9041 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9042 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9043 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9044 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9045 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9046 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9047 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9048/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9049 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9050 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9051 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9052 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9053 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9054 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9055 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9056 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9057 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9058 { STATS_OFFSET32(mac_filter_discard),
9059 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9060 { STATS_OFFSET32(no_buff_discard),
9061 4, STATS_FLAGS_FUNC, "rx_discards" },
9062 { STATS_OFFSET32(xxoverflow_discard),
9063 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9064 { STATS_OFFSET32(brb_drop_hi),
9065 8, STATS_FLAGS_PORT, "brb_discard" },
9066 { STATS_OFFSET32(brb_truncate_hi),
9067 8, STATS_FLAGS_PORT, "brb_truncate" },
9068/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9069 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9070 { STATS_OFFSET32(rx_skb_alloc_failed),
9071 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9072/* 42 */{ STATS_OFFSET32(hw_csum_err),
9073 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9074};
9075
66e855f3
YG
9076#define IS_NOT_E1HMF_STAT(bp, i) \
9077 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9078
a2fbb9ea
ET
9079static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9080{
bb2a0f7a
YG
9081 struct bnx2x *bp = netdev_priv(dev);
9082 int i, j;
9083
a2fbb9ea
ET
9084 switch (stringset) {
9085 case ETH_SS_STATS:
bb2a0f7a 9086 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9087 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9088 continue;
9089 strcpy(buf + j*ETH_GSTRING_LEN,
9090 bnx2x_stats_arr[i].string);
9091 j++;
9092 }
a2fbb9ea
ET
9093 break;
9094
9095 case ETH_SS_TEST:
9096 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9097 break;
9098 }
9099}
9100
9101static int bnx2x_get_stats_count(struct net_device *dev)
9102{
bb2a0f7a
YG
9103 struct bnx2x *bp = netdev_priv(dev);
9104 int i, num_stats = 0;
9105
9106 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9107 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9108 continue;
9109 num_stats++;
9110 }
9111 return num_stats;
a2fbb9ea
ET
9112}
9113
9114static void bnx2x_get_ethtool_stats(struct net_device *dev,
9115 struct ethtool_stats *stats, u64 *buf)
9116{
9117 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9118 u32 *hw_stats = (u32 *)&bp->eth_stats;
9119 int i, j;
a2fbb9ea 9120
bb2a0f7a 9121 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9122 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9123 continue;
bb2a0f7a
YG
9124
9125 if (bnx2x_stats_arr[i].size == 0) {
9126 /* skip this counter */
9127 buf[j] = 0;
9128 j++;
a2fbb9ea
ET
9129 continue;
9130 }
bb2a0f7a 9131 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9132 /* 4-byte counter */
bb2a0f7a
YG
9133 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9134 j++;
a2fbb9ea
ET
9135 continue;
9136 }
9137 /* 8-byte counter */
bb2a0f7a
YG
9138 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9139 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9140 j++;
a2fbb9ea
ET
9141 }
9142}
9143
9144static int bnx2x_phys_id(struct net_device *dev, u32 data)
9145{
9146 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9147 int port = BP_PORT(bp);
a2fbb9ea
ET
9148 int i;
9149
34f80b04
EG
9150 if (!netif_running(dev))
9151 return 0;
9152
9153 if (!bp->port.pmf)
9154 return 0;
9155
a2fbb9ea
ET
9156 if (data == 0)
9157 data = 2;
9158
9159 for (i = 0; i < (data * 2); i++) {
c18487ee 9160 if ((i % 2) == 0)
34f80b04 9161 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9162 bp->link_params.hw_led_mode,
9163 bp->link_params.chip_id);
9164 else
34f80b04 9165 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9166 bp->link_params.hw_led_mode,
9167 bp->link_params.chip_id);
9168
a2fbb9ea
ET
9169 msleep_interruptible(500);
9170 if (signal_pending(current))
9171 break;
9172 }
9173
c18487ee 9174 if (bp->link_vars.link_up)
34f80b04 9175 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9176 bp->link_vars.line_speed,
9177 bp->link_params.hw_led_mode,
9178 bp->link_params.chip_id);
a2fbb9ea
ET
9179
9180 return 0;
9181}
9182
9183static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9184 .get_settings = bnx2x_get_settings,
9185 .set_settings = bnx2x_set_settings,
9186 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9187 .get_wol = bnx2x_get_wol,
9188 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9189 .get_msglevel = bnx2x_get_msglevel,
9190 .set_msglevel = bnx2x_set_msglevel,
9191 .nway_reset = bnx2x_nway_reset,
9192 .get_link = ethtool_op_get_link,
9193 .get_eeprom_len = bnx2x_get_eeprom_len,
9194 .get_eeprom = bnx2x_get_eeprom,
9195 .set_eeprom = bnx2x_set_eeprom,
9196 .get_coalesce = bnx2x_get_coalesce,
9197 .set_coalesce = bnx2x_set_coalesce,
9198 .get_ringparam = bnx2x_get_ringparam,
9199 .set_ringparam = bnx2x_set_ringparam,
9200 .get_pauseparam = bnx2x_get_pauseparam,
9201 .set_pauseparam = bnx2x_set_pauseparam,
9202 .get_rx_csum = bnx2x_get_rx_csum,
9203 .set_rx_csum = bnx2x_set_rx_csum,
9204 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9205 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9206 .set_flags = bnx2x_set_flags,
9207 .get_flags = ethtool_op_get_flags,
9208 .get_sg = ethtool_op_get_sg,
9209 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9210 .get_tso = ethtool_op_get_tso,
9211 .set_tso = bnx2x_set_tso,
9212 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9213 .self_test = bnx2x_self_test,
9214 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9215 .phys_id = bnx2x_phys_id,
9216 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9217 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9218};
9219
9220/* end of ethtool_ops */
9221
9222/****************************************************************************
9223* General service functions
9224****************************************************************************/
9225
9226static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9227{
9228 u16 pmcsr;
9229
9230 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9231
9232 switch (state) {
9233 case PCI_D0:
34f80b04 9234 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9235 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9236 PCI_PM_CTRL_PME_STATUS));
9237
9238 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9239 /* delay required during transition out of D3hot */
a2fbb9ea 9240 msleep(20);
34f80b04 9241 break;
a2fbb9ea 9242
34f80b04
EG
9243 case PCI_D3hot:
9244 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9245 pmcsr |= 3;
a2fbb9ea 9246
34f80b04
EG
9247 if (bp->wol)
9248 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9249
34f80b04
EG
9250 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9251 pmcsr);
a2fbb9ea 9252
34f80b04
EG
9253 /* No more memory access after this point until
9254 * device is brought back to D0.
9255 */
9256 break;
9257
9258 default:
9259 return -EINVAL;
9260 }
9261 return 0;
a2fbb9ea
ET
9262}
9263
34f80b04
EG
9264/*
9265 * net_device service functions
9266 */
9267
a2fbb9ea
ET
9268static int bnx2x_poll(struct napi_struct *napi, int budget)
9269{
9270 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9271 napi);
9272 struct bnx2x *bp = fp->bp;
9273 int work_done = 0;
2772f903 9274 u16 rx_cons_sb;
a2fbb9ea
ET
9275
9276#ifdef BNX2X_STOP_ON_ERROR
9277 if (unlikely(bp->panic))
34f80b04 9278 goto poll_panic;
a2fbb9ea
ET
9279#endif
9280
9281 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9282 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9283 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9284
9285 bnx2x_update_fpsb_idx(fp);
9286
da5a662a 9287 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9288 bnx2x_tx_int(fp, budget);
9289
2772f903
EG
9290 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9291 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9292 rx_cons_sb++;
da5a662a 9293 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9294 work_done = bnx2x_rx_int(fp, budget);
9295
da5a662a 9296 rmb(); /* BNX2X_HAS_WORK() reads the status block */
2772f903
EG
9297 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9298 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9299 rx_cons_sb++;
a2fbb9ea
ET
9300
9301 /* must not complete if we consumed full budget */
da5a662a 9302 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9303
9304#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9305poll_panic:
a2fbb9ea 9306#endif
908a7a16 9307 netif_rx_complete(napi);
a2fbb9ea 9308
34f80b04 9309 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9310 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9311 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9312 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9313 }
a2fbb9ea
ET
9314 return work_done;
9315}
9316
755735eb
EG
9317
9318/* we split the first BD into headers and data BDs
33471629 9319 * to ease the pain of our fellow microcode engineers
755735eb
EG
9320 * we use one mapping for both BDs
9321 * So far this has only been observed to happen
9322 * in Other Operating Systems(TM)
9323 */
9324static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9325 struct bnx2x_fastpath *fp,
9326 struct eth_tx_bd **tx_bd, u16 hlen,
9327 u16 bd_prod, int nbd)
9328{
9329 struct eth_tx_bd *h_tx_bd = *tx_bd;
9330 struct eth_tx_bd *d_tx_bd;
9331 dma_addr_t mapping;
9332 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9333
9334 /* first fix first BD */
9335 h_tx_bd->nbd = cpu_to_le16(nbd);
9336 h_tx_bd->nbytes = cpu_to_le16(hlen);
9337
9338 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9339 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9340 h_tx_bd->addr_lo, h_tx_bd->nbd);
9341
9342 /* now get a new data BD
9343 * (after the pbd) and fill it */
9344 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9345 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9346
9347 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9348 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9349
9350 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9351 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9352 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9353 d_tx_bd->vlan = 0;
9354 /* this marks the BD as one that has no individual mapping
9355 * the FW ignores this flag in a BD not marked start
9356 */
9357 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9358 DP(NETIF_MSG_TX_QUEUED,
9359 "TSO split data size is %d (%x:%x)\n",
9360 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9361
9362 /* update tx_bd for marking the last BD flag */
9363 *tx_bd = d_tx_bd;
9364
9365 return bd_prod;
9366}
9367
9368static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9369{
9370 if (fix > 0)
9371 csum = (u16) ~csum_fold(csum_sub(csum,
9372 csum_partial(t_header - fix, fix, 0)));
9373
9374 else if (fix < 0)
9375 csum = (u16) ~csum_fold(csum_add(csum,
9376 csum_partial(t_header, -fix, 0)));
9377
9378 return swab16(csum);
9379}
9380
9381static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9382{
9383 u32 rc;
9384
9385 if (skb->ip_summed != CHECKSUM_PARTIAL)
9386 rc = XMIT_PLAIN;
9387
9388 else {
9389 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9390 rc = XMIT_CSUM_V6;
9391 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9392 rc |= XMIT_CSUM_TCP;
9393
9394 } else {
9395 rc = XMIT_CSUM_V4;
9396 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9397 rc |= XMIT_CSUM_TCP;
9398 }
9399 }
9400
9401 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9402 rc |= XMIT_GSO_V4;
9403
9404 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9405 rc |= XMIT_GSO_V6;
9406
9407 return rc;
9408}
9409
9410/* check if packet requires linearization (packet is too fragmented) */
9411static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9412 u32 xmit_type)
9413{
9414 int to_copy = 0;
9415 int hlen = 0;
9416 int first_bd_sz = 0;
9417
9418 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9419 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9420
9421 if (xmit_type & XMIT_GSO) {
9422 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9423 /* Check if LSO packet needs to be copied:
9424 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9425 int wnd_size = MAX_FETCH_BD - 3;
33471629 9426 /* Number of windows to check */
755735eb
EG
9427 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9428 int wnd_idx = 0;
9429 int frag_idx = 0;
9430 u32 wnd_sum = 0;
9431
9432 /* Headers length */
9433 hlen = (int)(skb_transport_header(skb) - skb->data) +
9434 tcp_hdrlen(skb);
9435
9436 /* Amount of data (w/o headers) on linear part of SKB*/
9437 first_bd_sz = skb_headlen(skb) - hlen;
9438
9439 wnd_sum = first_bd_sz;
9440
9441 /* Calculate the first sum - it's special */
9442 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9443 wnd_sum +=
9444 skb_shinfo(skb)->frags[frag_idx].size;
9445
9446 /* If there was data on linear skb data - check it */
9447 if (first_bd_sz > 0) {
9448 if (unlikely(wnd_sum < lso_mss)) {
9449 to_copy = 1;
9450 goto exit_lbl;
9451 }
9452
9453 wnd_sum -= first_bd_sz;
9454 }
9455
9456 /* Others are easier: run through the frag list and
9457 check all windows */
9458 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9459 wnd_sum +=
9460 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9461
9462 if (unlikely(wnd_sum < lso_mss)) {
9463 to_copy = 1;
9464 break;
9465 }
9466 wnd_sum -=
9467 skb_shinfo(skb)->frags[wnd_idx].size;
9468 }
9469
9470 } else {
9471 /* in non-LSO too fragmented packet should always
9472 be linearized */
9473 to_copy = 1;
9474 }
9475 }
9476
9477exit_lbl:
9478 if (unlikely(to_copy))
9479 DP(NETIF_MSG_TX_QUEUED,
9480 "Linearization IS REQUIRED for %s packet. "
9481 "num_frags %d hlen %d first_bd_sz %d\n",
9482 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9483 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9484
9485 return to_copy;
9486}
9487
9488/* called with netif_tx_lock
a2fbb9ea 9489 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9490 * netif_wake_queue()
a2fbb9ea
ET
9491 */
9492static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9493{
9494 struct bnx2x *bp = netdev_priv(dev);
9495 struct bnx2x_fastpath *fp;
9496 struct sw_tx_bd *tx_buf;
9497 struct eth_tx_bd *tx_bd;
9498 struct eth_tx_parse_bd *pbd = NULL;
9499 u16 pkt_prod, bd_prod;
755735eb 9500 int nbd, fp_index;
a2fbb9ea 9501 dma_addr_t mapping;
755735eb
EG
9502 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9503 int vlan_off = (bp->e1hov ? 4 : 0);
9504 int i;
9505 u8 hlen = 0;
a2fbb9ea
ET
9506
9507#ifdef BNX2X_STOP_ON_ERROR
9508 if (unlikely(bp->panic))
9509 return NETDEV_TX_BUSY;
9510#endif
9511
755735eb 9512 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9513 fp = &bp->fp[fp_index];
755735eb 9514
231fd58a 9515 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9516 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9517 netif_stop_queue(dev);
9518 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9519 return NETDEV_TX_BUSY;
9520 }
9521
755735eb
EG
9522 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9523 " gso type %x xmit_type %x\n",
9524 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9525 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9526
33471629 9527 /* First, check if we need to linearize the skb
755735eb
EG
9528 (due to FW restrictions) */
9529 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9530 /* Statistics of linearization */
9531 bp->lin_cnt++;
9532 if (skb_linearize(skb) != 0) {
9533 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9534 "silently dropping this SKB\n");
9535 dev_kfree_skb_any(skb);
da5a662a 9536 return NETDEV_TX_OK;
755735eb
EG
9537 }
9538 }
9539
a2fbb9ea 9540 /*
755735eb 9541 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9542 then for TSO or xsum we have a parsing info BD,
755735eb 9543 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9544 (don't forget to mark the last one as last,
9545 and to unmap only AFTER you write to the BD ...)
755735eb 9546 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9547 */
9548
9549 pkt_prod = fp->tx_pkt_prod++;
755735eb 9550 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9551
755735eb 9552 /* get a tx_buf and first BD */
a2fbb9ea
ET
9553 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9554 tx_bd = &fp->tx_desc_ring[bd_prod];
9555
9556 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9557 tx_bd->general_data = (UNICAST_ADDRESS <<
9558 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9559 /* header nbd */
9560 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9561
755735eb
EG
9562 /* remember the first BD of the packet */
9563 tx_buf->first_bd = fp->tx_bd_prod;
9564 tx_buf->skb = skb;
a2fbb9ea
ET
9565
9566 DP(NETIF_MSG_TX_QUEUED,
9567 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9568 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9569
755735eb
EG
9570 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9571 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9572 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9573 vlan_off += 4;
9574 } else
9575 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9576
755735eb 9577 if (xmit_type) {
755735eb 9578 /* turn on parsing and get a BD */
a2fbb9ea
ET
9579 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9580 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9581
9582 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9583 }
9584
9585 if (xmit_type & XMIT_CSUM) {
9586 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9587
9588 /* for now NS flag is not used in Linux */
755735eb 9589 pbd->global_data = (hlen |
96fc1784 9590 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9591 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9592
755735eb
EG
9593 pbd->ip_hlen = (skb_transport_header(skb) -
9594 skb_network_header(skb)) / 2;
9595
9596 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9597
755735eb
EG
9598 pbd->total_hlen = cpu_to_le16(hlen);
9599 hlen = hlen*2 - vlan_off;
a2fbb9ea 9600
755735eb
EG
9601 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9602
9603 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9604 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9605 ETH_TX_BD_FLAGS_IP_CSUM;
9606 else
9607 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9608
9609 if (xmit_type & XMIT_CSUM_TCP) {
9610 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9611
9612 } else {
9613 s8 fix = SKB_CS_OFF(skb); /* signed! */
9614
a2fbb9ea 9615 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9616 pbd->cs_offset = fix / 2;
a2fbb9ea 9617
755735eb
EG
9618 DP(NETIF_MSG_TX_QUEUED,
9619 "hlen %d offset %d fix %d csum before fix %x\n",
9620 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9621 SKB_CS(skb));
9622
9623 /* HW bug: fixup the CSUM */
9624 pbd->tcp_pseudo_csum =
9625 bnx2x_csum_fix(skb_transport_header(skb),
9626 SKB_CS(skb), fix);
9627
9628 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9629 pbd->tcp_pseudo_csum);
9630 }
a2fbb9ea
ET
9631 }
9632
9633 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9634 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9635
9636 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9637 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9638 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9639 tx_bd->nbd = cpu_to_le16(nbd);
9640 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9641
9642 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9643 " nbytes %d flags %x vlan %x\n",
9644 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9645 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9646 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9647
755735eb 9648 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9649
9650 DP(NETIF_MSG_TX_QUEUED,
9651 "TSO packet len %d hlen %d total len %d tso size %d\n",
9652 skb->len, hlen, skb_headlen(skb),
9653 skb_shinfo(skb)->gso_size);
9654
9655 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9656
755735eb
EG
9657 if (unlikely(skb_headlen(skb) > hlen))
9658 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9659 bd_prod, ++nbd);
a2fbb9ea
ET
9660
9661 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9662 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9663 pbd->tcp_flags = pbd_tcp_flags(skb);
9664
9665 if (xmit_type & XMIT_GSO_V4) {
9666 pbd->ip_id = swab16(ip_hdr(skb)->id);
9667 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9668 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9669 ip_hdr(skb)->daddr,
9670 0, IPPROTO_TCP, 0));
755735eb
EG
9671
9672 } else
9673 pbd->tcp_pseudo_csum =
9674 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9675 &ipv6_hdr(skb)->daddr,
9676 0, IPPROTO_TCP, 0));
9677
a2fbb9ea
ET
9678 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9679 }
9680
755735eb
EG
9681 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9682 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9683
755735eb
EG
9684 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9685 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9686
755735eb
EG
9687 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9688 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9689
755735eb
EG
9690 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9691 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9692 tx_bd->nbytes = cpu_to_le16(frag->size);
9693 tx_bd->vlan = cpu_to_le16(pkt_prod);
9694 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9695
755735eb
EG
9696 DP(NETIF_MSG_TX_QUEUED,
9697 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9698 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9699 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9700 }
9701
755735eb 9702 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9703 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9704
9705 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9706 tx_bd, tx_bd->bd_flags.as_bitfield);
9707
a2fbb9ea
ET
9708 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9709
755735eb 9710 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9711 * if the packet contains or ends with it
9712 */
9713 if (TX_BD_POFF(bd_prod) < nbd)
9714 nbd++;
9715
9716 if (pbd)
9717 DP(NETIF_MSG_TX_QUEUED,
9718 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9719 " tcp_flags %x xsum %x seq %u hlen %u\n",
9720 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9721 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9722 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9723
755735eb 9724 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9725
58f4c4cf
EG
9726 /*
9727 * Make sure that the BD data is updated before updating the producer
9728 * since FW might read the BD right after the producer is updated.
9729 * This is only applicable for weak-ordered memory model archs such
9730 * as IA-64. The following barrier is also mandatory since FW will
9731 * assumes packets must have BDs.
9732 */
9733 wmb();
9734
96fc1784
ET
9735 fp->hw_tx_prods->bds_prod =
9736 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9737 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9738 fp->hw_tx_prods->packets_prod =
9739 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9740 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9741
9742 mmiowb();
9743
755735eb 9744 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9745 dev->trans_start = jiffies;
9746
9747 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9748 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9749 if we put Tx into XOFF state. */
9750 smp_mb();
a2fbb9ea 9751 netif_stop_queue(dev);
bb2a0f7a 9752 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9753 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9754 netif_wake_queue(dev);
9755 }
9756 fp->tx_pkt++;
9757
9758 return NETDEV_TX_OK;
9759}
9760
bb2a0f7a 9761/* called with rtnl_lock */
a2fbb9ea
ET
9762static int bnx2x_open(struct net_device *dev)
9763{
9764 struct bnx2x *bp = netdev_priv(dev);
9765
9766 bnx2x_set_power_state(bp, PCI_D0);
9767
bb2a0f7a 9768 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9769}
9770
bb2a0f7a 9771/* called with rtnl_lock */
a2fbb9ea
ET
9772static int bnx2x_close(struct net_device *dev)
9773{
a2fbb9ea
ET
9774 struct bnx2x *bp = netdev_priv(dev);
9775
9776 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9777 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9778 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9779 if (!CHIP_REV_IS_SLOW(bp))
9780 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9781
9782 return 0;
9783}
9784
34f80b04
EG
9785/* called with netif_tx_lock from set_multicast */
9786static void bnx2x_set_rx_mode(struct net_device *dev)
9787{
9788 struct bnx2x *bp = netdev_priv(dev);
9789 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9790 int port = BP_PORT(bp);
9791
9792 if (bp->state != BNX2X_STATE_OPEN) {
9793 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9794 return;
9795 }
9796
9797 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9798
9799 if (dev->flags & IFF_PROMISC)
9800 rx_mode = BNX2X_RX_MODE_PROMISC;
9801
9802 else if ((dev->flags & IFF_ALLMULTI) ||
9803 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9804 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9805
9806 else { /* some multicasts */
9807 if (CHIP_IS_E1(bp)) {
9808 int i, old, offset;
9809 struct dev_mc_list *mclist;
9810 struct mac_configuration_cmd *config =
9811 bnx2x_sp(bp, mcast_config);
9812
9813 for (i = 0, mclist = dev->mc_list;
9814 mclist && (i < dev->mc_count);
9815 i++, mclist = mclist->next) {
9816
9817 config->config_table[i].
9818 cam_entry.msb_mac_addr =
9819 swab16(*(u16 *)&mclist->dmi_addr[0]);
9820 config->config_table[i].
9821 cam_entry.middle_mac_addr =
9822 swab16(*(u16 *)&mclist->dmi_addr[2]);
9823 config->config_table[i].
9824 cam_entry.lsb_mac_addr =
9825 swab16(*(u16 *)&mclist->dmi_addr[4]);
9826 config->config_table[i].cam_entry.flags =
9827 cpu_to_le16(port);
9828 config->config_table[i].
9829 target_table_entry.flags = 0;
9830 config->config_table[i].
9831 target_table_entry.client_id = 0;
9832 config->config_table[i].
9833 target_table_entry.vlan_id = 0;
9834
9835 DP(NETIF_MSG_IFUP,
9836 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9837 config->config_table[i].
9838 cam_entry.msb_mac_addr,
9839 config->config_table[i].
9840 cam_entry.middle_mac_addr,
9841 config->config_table[i].
9842 cam_entry.lsb_mac_addr);
9843 }
9844 old = config->hdr.length_6b;
9845 if (old > i) {
9846 for (; i < old; i++) {
9847 if (CAM_IS_INVALID(config->
9848 config_table[i])) {
9849 i--; /* already invalidated */
9850 break;
9851 }
9852 /* invalidate */
9853 CAM_INVALIDATE(config->
9854 config_table[i]);
9855 }
9856 }
9857
9858 if (CHIP_REV_IS_SLOW(bp))
9859 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9860 else
9861 offset = BNX2X_MAX_MULTICAST*(1 + port);
9862
9863 config->hdr.length_6b = i;
9864 config->hdr.offset = offset;
9865 config->hdr.client_id = BP_CL_ID(bp);
9866 config->hdr.reserved1 = 0;
9867
9868 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9869 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9870 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9871 0);
9872 } else { /* E1H */
9873 /* Accept one or more multicasts */
9874 struct dev_mc_list *mclist;
9875 u32 mc_filter[MC_HASH_SIZE];
9876 u32 crc, bit, regidx;
9877 int i;
9878
9879 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9880
9881 for (i = 0, mclist = dev->mc_list;
9882 mclist && (i < dev->mc_count);
9883 i++, mclist = mclist->next) {
9884
7c510e4b
JB
9885 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9886 mclist->dmi_addr);
34f80b04
EG
9887
9888 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9889 bit = (crc >> 24) & 0xff;
9890 regidx = bit >> 5;
9891 bit &= 0x1f;
9892 mc_filter[regidx] |= (1 << bit);
9893 }
9894
9895 for (i = 0; i < MC_HASH_SIZE; i++)
9896 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9897 mc_filter[i]);
9898 }
9899 }
9900
9901 bp->rx_mode = rx_mode;
9902 bnx2x_set_storm_rx_mode(bp);
9903}
9904
9905/* called with rtnl_lock */
a2fbb9ea
ET
9906static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9907{
9908 struct sockaddr *addr = p;
9909 struct bnx2x *bp = netdev_priv(dev);
9910
34f80b04 9911 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9912 return -EINVAL;
9913
9914 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9915 if (netif_running(dev)) {
9916 if (CHIP_IS_E1(bp))
3101c2bc 9917 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9918 else
3101c2bc 9919 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9920 }
a2fbb9ea
ET
9921
9922 return 0;
9923}
9924
c18487ee 9925/* called with rtnl_lock */
a2fbb9ea
ET
9926static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9927{
9928 struct mii_ioctl_data *data = if_mii(ifr);
9929 struct bnx2x *bp = netdev_priv(dev);
3196a88a 9930 int port = BP_PORT(bp);
a2fbb9ea
ET
9931 int err;
9932
9933 switch (cmd) {
9934 case SIOCGMIIPHY:
34f80b04 9935 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9936
c14423fe 9937 /* fallthrough */
c18487ee 9938
a2fbb9ea 9939 case SIOCGMIIREG: {
c18487ee 9940 u16 mii_regval;
a2fbb9ea 9941
c18487ee
YR
9942 if (!netif_running(dev))
9943 return -EAGAIN;
a2fbb9ea 9944
34f80b04 9945 mutex_lock(&bp->port.phy_mutex);
3196a88a 9946 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9947 DEFAULT_PHY_DEV_ADDR,
9948 (data->reg_num & 0x1f), &mii_regval);
9949 data->val_out = mii_regval;
34f80b04 9950 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9951 return err;
9952 }
9953
9954 case SIOCSMIIREG:
9955 if (!capable(CAP_NET_ADMIN))
9956 return -EPERM;
9957
c18487ee
YR
9958 if (!netif_running(dev))
9959 return -EAGAIN;
9960
34f80b04 9961 mutex_lock(&bp->port.phy_mutex);
3196a88a 9962 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9963 DEFAULT_PHY_DEV_ADDR,
9964 (data->reg_num & 0x1f), data->val_in);
34f80b04 9965 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9966 return err;
9967
9968 default:
9969 /* do nothing */
9970 break;
9971 }
9972
9973 return -EOPNOTSUPP;
9974}
9975
34f80b04 9976/* called with rtnl_lock */
a2fbb9ea
ET
9977static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9978{
9979 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9980 int rc = 0;
a2fbb9ea
ET
9981
9982 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9983 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9984 return -EINVAL;
9985
9986 /* This does not race with packet allocation
c14423fe 9987 * because the actual alloc size is
a2fbb9ea
ET
9988 * only updated as part of load
9989 */
9990 dev->mtu = new_mtu;
9991
9992 if (netif_running(dev)) {
34f80b04
EG
9993 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9994 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9995 }
34f80b04
EG
9996
9997 return rc;
a2fbb9ea
ET
9998}
9999
10000static void bnx2x_tx_timeout(struct net_device *dev)
10001{
10002 struct bnx2x *bp = netdev_priv(dev);
10003
10004#ifdef BNX2X_STOP_ON_ERROR
10005 if (!bp->panic)
10006 bnx2x_panic();
10007#endif
10008 /* This allows the netif to be shutdown gracefully before resetting */
10009 schedule_work(&bp->reset_task);
10010}
10011
10012#ifdef BCM_VLAN
34f80b04 10013/* called with rtnl_lock */
a2fbb9ea
ET
10014static void bnx2x_vlan_rx_register(struct net_device *dev,
10015 struct vlan_group *vlgrp)
10016{
10017 struct bnx2x *bp = netdev_priv(dev);
10018
10019 bp->vlgrp = vlgrp;
10020 if (netif_running(dev))
49d66772 10021 bnx2x_set_client_config(bp);
a2fbb9ea 10022}
34f80b04 10023
a2fbb9ea
ET
10024#endif
10025
10026#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10027static void poll_bnx2x(struct net_device *dev)
10028{
10029 struct bnx2x *bp = netdev_priv(dev);
10030
10031 disable_irq(bp->pdev->irq);
10032 bnx2x_interrupt(bp->pdev->irq, dev);
10033 enable_irq(bp->pdev->irq);
10034}
10035#endif
10036
c64213cd
SH
10037static const struct net_device_ops bnx2x_netdev_ops = {
10038 .ndo_open = bnx2x_open,
10039 .ndo_stop = bnx2x_close,
10040 .ndo_start_xmit = bnx2x_start_xmit,
10041 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10042 .ndo_set_mac_address = bnx2x_change_mac_addr,
10043 .ndo_validate_addr = eth_validate_addr,
10044 .ndo_do_ioctl = bnx2x_ioctl,
10045 .ndo_change_mtu = bnx2x_change_mtu,
10046 .ndo_tx_timeout = bnx2x_tx_timeout,
10047#ifdef BCM_VLAN
10048 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10049#endif
10050#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10051 .ndo_poll_controller = poll_bnx2x,
10052#endif
10053};
10054
10055
34f80b04
EG
10056static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10057 struct net_device *dev)
a2fbb9ea
ET
10058{
10059 struct bnx2x *bp;
10060 int rc;
10061
10062 SET_NETDEV_DEV(dev, &pdev->dev);
10063 bp = netdev_priv(dev);
10064
34f80b04
EG
10065 bp->dev = dev;
10066 bp->pdev = pdev;
a2fbb9ea 10067 bp->flags = 0;
34f80b04 10068 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10069
10070 rc = pci_enable_device(pdev);
10071 if (rc) {
10072 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10073 goto err_out;
10074 }
10075
10076 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10077 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10078 " aborting\n");
10079 rc = -ENODEV;
10080 goto err_out_disable;
10081 }
10082
10083 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10084 printk(KERN_ERR PFX "Cannot find second PCI device"
10085 " base address, aborting\n");
10086 rc = -ENODEV;
10087 goto err_out_disable;
10088 }
10089
34f80b04
EG
10090 if (atomic_read(&pdev->enable_cnt) == 1) {
10091 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10092 if (rc) {
10093 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10094 " aborting\n");
10095 goto err_out_disable;
10096 }
a2fbb9ea 10097
34f80b04
EG
10098 pci_set_master(pdev);
10099 pci_save_state(pdev);
10100 }
a2fbb9ea
ET
10101
10102 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10103 if (bp->pm_cap == 0) {
10104 printk(KERN_ERR PFX "Cannot find power management"
10105 " capability, aborting\n");
10106 rc = -EIO;
10107 goto err_out_release;
10108 }
10109
10110 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10111 if (bp->pcie_cap == 0) {
10112 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10113 " aborting\n");
10114 rc = -EIO;
10115 goto err_out_release;
10116 }
10117
10118 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10119 bp->flags |= USING_DAC_FLAG;
10120 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10121 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10122 " failed, aborting\n");
10123 rc = -EIO;
10124 goto err_out_release;
10125 }
10126
10127 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10128 printk(KERN_ERR PFX "System does not support DMA,"
10129 " aborting\n");
10130 rc = -EIO;
10131 goto err_out_release;
10132 }
10133
34f80b04
EG
10134 dev->mem_start = pci_resource_start(pdev, 0);
10135 dev->base_addr = dev->mem_start;
10136 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10137
10138 dev->irq = pdev->irq;
10139
275f165f 10140 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10141 if (!bp->regview) {
10142 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10143 rc = -ENOMEM;
10144 goto err_out_release;
10145 }
10146
34f80b04
EG
10147 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10148 min_t(u64, BNX2X_DB_SIZE,
10149 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10150 if (!bp->doorbells) {
10151 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10152 rc = -ENOMEM;
10153 goto err_out_unmap;
10154 }
10155
10156 bnx2x_set_power_state(bp, PCI_D0);
10157
34f80b04
EG
10158 /* clean indirect addresses */
10159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10160 PCICFG_VENDOR_ID_OFFSET);
10161 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10162 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10163 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10164 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10165
34f80b04 10166 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10167
c64213cd 10168 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10169 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10170 dev->features |= NETIF_F_SG;
10171 dev->features |= NETIF_F_HW_CSUM;
10172 if (bp->flags & USING_DAC_FLAG)
10173 dev->features |= NETIF_F_HIGHDMA;
10174#ifdef BCM_VLAN
10175 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10176#endif
10177 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10178 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10179
10180 return 0;
10181
10182err_out_unmap:
10183 if (bp->regview) {
10184 iounmap(bp->regview);
10185 bp->regview = NULL;
10186 }
a2fbb9ea
ET
10187 if (bp->doorbells) {
10188 iounmap(bp->doorbells);
10189 bp->doorbells = NULL;
10190 }
10191
10192err_out_release:
34f80b04
EG
10193 if (atomic_read(&pdev->enable_cnt) == 1)
10194 pci_release_regions(pdev);
a2fbb9ea
ET
10195
10196err_out_disable:
10197 pci_disable_device(pdev);
10198 pci_set_drvdata(pdev, NULL);
10199
10200err_out:
10201 return rc;
10202}
10203
25047950
ET
10204static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10205{
10206 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10207
10208 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10209 return val;
10210}
10211
10212/* return value of 1=2.5GHz 2=5GHz */
10213static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10214{
10215 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10216
10217 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10218 return val;
10219}
10220
a2fbb9ea
ET
10221static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10222 const struct pci_device_id *ent)
10223{
10224 static int version_printed;
10225 struct net_device *dev = NULL;
10226 struct bnx2x *bp;
25047950 10227 int rc;
a2fbb9ea
ET
10228
10229 if (version_printed++ == 0)
10230 printk(KERN_INFO "%s", version);
10231
10232 /* dev zeroed in init_etherdev */
10233 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10234 if (!dev) {
10235 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10236 return -ENOMEM;
34f80b04 10237 }
a2fbb9ea 10238
a2fbb9ea
ET
10239 bp = netdev_priv(dev);
10240 bp->msglevel = debug;
10241
34f80b04 10242 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10243 if (rc < 0) {
10244 free_netdev(dev);
10245 return rc;
10246 }
10247
a2fbb9ea
ET
10248 rc = register_netdev(dev);
10249 if (rc) {
c14423fe 10250 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10251 goto init_one_exit;
a2fbb9ea
ET
10252 }
10253
10254 pci_set_drvdata(pdev, dev);
10255
34f80b04
EG
10256 rc = bnx2x_init_bp(bp);
10257 if (rc) {
10258 unregister_netdev(dev);
10259 goto init_one_exit;
10260 }
10261
12b56ea8
EG
10262 netif_carrier_off(dev);
10263
34f80b04 10264 bp->common.name = board_info[ent->driver_data].name;
25047950 10265 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10266 " IRQ %d, ", dev->name, bp->common.name,
10267 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10268 bnx2x_get_pcie_width(bp),
10269 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10270 dev->base_addr, bp->pdev->irq);
e174961c 10271 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10272 return 0;
34f80b04
EG
10273
10274init_one_exit:
10275 if (bp->regview)
10276 iounmap(bp->regview);
10277
10278 if (bp->doorbells)
10279 iounmap(bp->doorbells);
10280
10281 free_netdev(dev);
10282
10283 if (atomic_read(&pdev->enable_cnt) == 1)
10284 pci_release_regions(pdev);
10285
10286 pci_disable_device(pdev);
10287 pci_set_drvdata(pdev, NULL);
10288
10289 return rc;
a2fbb9ea
ET
10290}
10291
10292static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10293{
10294 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10295 struct bnx2x *bp;
10296
10297 if (!dev) {
228241eb
ET
10298 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10299 return;
10300 }
228241eb 10301 bp = netdev_priv(dev);
a2fbb9ea 10302
a2fbb9ea
ET
10303 unregister_netdev(dev);
10304
10305 if (bp->regview)
10306 iounmap(bp->regview);
10307
10308 if (bp->doorbells)
10309 iounmap(bp->doorbells);
10310
10311 free_netdev(dev);
34f80b04
EG
10312
10313 if (atomic_read(&pdev->enable_cnt) == 1)
10314 pci_release_regions(pdev);
10315
a2fbb9ea
ET
10316 pci_disable_device(pdev);
10317 pci_set_drvdata(pdev, NULL);
10318}
10319
10320static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10321{
10322 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10323 struct bnx2x *bp;
10324
34f80b04
EG
10325 if (!dev) {
10326 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10327 return -ENODEV;
10328 }
10329 bp = netdev_priv(dev);
a2fbb9ea 10330
34f80b04 10331 rtnl_lock();
a2fbb9ea 10332
34f80b04 10333 pci_save_state(pdev);
228241eb 10334
34f80b04
EG
10335 if (!netif_running(dev)) {
10336 rtnl_unlock();
10337 return 0;
10338 }
a2fbb9ea
ET
10339
10340 netif_device_detach(dev);
a2fbb9ea 10341
da5a662a 10342 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10343
a2fbb9ea 10344 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10345
34f80b04
EG
10346 rtnl_unlock();
10347
a2fbb9ea
ET
10348 return 0;
10349}
10350
10351static int bnx2x_resume(struct pci_dev *pdev)
10352{
10353 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10354 struct bnx2x *bp;
a2fbb9ea
ET
10355 int rc;
10356
228241eb
ET
10357 if (!dev) {
10358 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10359 return -ENODEV;
10360 }
228241eb 10361 bp = netdev_priv(dev);
a2fbb9ea 10362
34f80b04
EG
10363 rtnl_lock();
10364
228241eb 10365 pci_restore_state(pdev);
34f80b04
EG
10366
10367 if (!netif_running(dev)) {
10368 rtnl_unlock();
10369 return 0;
10370 }
10371
a2fbb9ea
ET
10372 bnx2x_set_power_state(bp, PCI_D0);
10373 netif_device_attach(dev);
10374
da5a662a 10375 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10376
34f80b04
EG
10377 rtnl_unlock();
10378
10379 return rc;
a2fbb9ea
ET
10380}
10381
f8ef6e44
YG
10382static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10383{
10384 int i;
10385
10386 bp->state = BNX2X_STATE_ERROR;
10387
10388 bp->rx_mode = BNX2X_RX_MODE_NONE;
10389
10390 bnx2x_netif_stop(bp, 0);
10391
10392 del_timer_sync(&bp->timer);
10393 bp->stats_state = STATS_STATE_DISABLED;
10394 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10395
10396 /* Release IRQs */
10397 bnx2x_free_irq(bp);
10398
10399 if (CHIP_IS_E1(bp)) {
10400 struct mac_configuration_cmd *config =
10401 bnx2x_sp(bp, mcast_config);
10402
10403 for (i = 0; i < config->hdr.length_6b; i++)
10404 CAM_INVALIDATE(config->config_table[i]);
10405 }
10406
10407 /* Free SKBs, SGEs, TPA pool and driver internals */
10408 bnx2x_free_skbs(bp);
10409 for_each_queue(bp, i)
10410 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10411 bnx2x_free_mem(bp);
10412
10413 bp->state = BNX2X_STATE_CLOSED;
10414
10415 netif_carrier_off(bp->dev);
10416
10417 return 0;
10418}
10419
10420static void bnx2x_eeh_recover(struct bnx2x *bp)
10421{
10422 u32 val;
10423
10424 mutex_init(&bp->port.phy_mutex);
10425
10426 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10427 bp->link_params.shmem_base = bp->common.shmem_base;
10428 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10429
10430 if (!bp->common.shmem_base ||
10431 (bp->common.shmem_base < 0xA0000) ||
10432 (bp->common.shmem_base >= 0xC0000)) {
10433 BNX2X_DEV_INFO("MCP not active\n");
10434 bp->flags |= NO_MCP_FLAG;
10435 return;
10436 }
10437
10438 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10439 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10440 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10441 BNX2X_ERR("BAD MCP validity signature\n");
10442
10443 if (!BP_NOMCP(bp)) {
10444 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10445 & DRV_MSG_SEQ_NUMBER_MASK);
10446 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10447 }
10448}
10449
493adb1f
WX
10450/**
10451 * bnx2x_io_error_detected - called when PCI error is detected
10452 * @pdev: Pointer to PCI device
10453 * @state: The current pci connection state
10454 *
10455 * This function is called after a PCI bus error affecting
10456 * this device has been detected.
10457 */
10458static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10459 pci_channel_state_t state)
10460{
10461 struct net_device *dev = pci_get_drvdata(pdev);
10462 struct bnx2x *bp = netdev_priv(dev);
10463
10464 rtnl_lock();
10465
10466 netif_device_detach(dev);
10467
10468 if (netif_running(dev))
f8ef6e44 10469 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10470
10471 pci_disable_device(pdev);
10472
10473 rtnl_unlock();
10474
10475 /* Request a slot reset */
10476 return PCI_ERS_RESULT_NEED_RESET;
10477}
10478
10479/**
10480 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10481 * @pdev: Pointer to PCI device
10482 *
10483 * Restart the card from scratch, as if from a cold-boot.
10484 */
10485static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10486{
10487 struct net_device *dev = pci_get_drvdata(pdev);
10488 struct bnx2x *bp = netdev_priv(dev);
10489
10490 rtnl_lock();
10491
10492 if (pci_enable_device(pdev)) {
10493 dev_err(&pdev->dev,
10494 "Cannot re-enable PCI device after reset\n");
10495 rtnl_unlock();
10496 return PCI_ERS_RESULT_DISCONNECT;
10497 }
10498
10499 pci_set_master(pdev);
10500 pci_restore_state(pdev);
10501
10502 if (netif_running(dev))
10503 bnx2x_set_power_state(bp, PCI_D0);
10504
10505 rtnl_unlock();
10506
10507 return PCI_ERS_RESULT_RECOVERED;
10508}
10509
10510/**
10511 * bnx2x_io_resume - called when traffic can start flowing again
10512 * @pdev: Pointer to PCI device
10513 *
10514 * This callback is called when the error recovery driver tells us that
10515 * its OK to resume normal operation.
10516 */
10517static void bnx2x_io_resume(struct pci_dev *pdev)
10518{
10519 struct net_device *dev = pci_get_drvdata(pdev);
10520 struct bnx2x *bp = netdev_priv(dev);
10521
10522 rtnl_lock();
10523
f8ef6e44
YG
10524 bnx2x_eeh_recover(bp);
10525
493adb1f 10526 if (netif_running(dev))
f8ef6e44 10527 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10528
10529 netif_device_attach(dev);
10530
10531 rtnl_unlock();
10532}
10533
10534static struct pci_error_handlers bnx2x_err_handler = {
10535 .error_detected = bnx2x_io_error_detected,
10536 .slot_reset = bnx2x_io_slot_reset,
10537 .resume = bnx2x_io_resume,
10538};
10539
a2fbb9ea 10540static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10541 .name = DRV_MODULE_NAME,
10542 .id_table = bnx2x_pci_tbl,
10543 .probe = bnx2x_init_one,
10544 .remove = __devexit_p(bnx2x_remove_one),
10545 .suspend = bnx2x_suspend,
10546 .resume = bnx2x_resume,
10547 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10548};
10549
10550static int __init bnx2x_init(void)
10551{
1cf167f2
EG
10552 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10553 if (bnx2x_wq == NULL) {
10554 printk(KERN_ERR PFX "Cannot create workqueue\n");
10555 return -ENOMEM;
10556 }
10557
a2fbb9ea
ET
10558 return pci_register_driver(&bnx2x_pci_driver);
10559}
10560
10561static void __exit bnx2x_cleanup(void)
10562{
10563 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10564
10565 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10566}
10567
10568module_init(bnx2x_init);
10569module_exit(bnx2x_cleanup);
10570