]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Missing mask when calculating flow control
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea
ET
53#include <linux/io.h>
54
55#include "bnx2x_reg.h"
56#include "bnx2x_fw_defs.h"
57#include "bnx2x_hsi.h"
c18487ee 58#include "bnx2x_link.h"
a2fbb9ea
ET
59#include "bnx2x.h"
60#include "bnx2x_init.h"
61
ca8eac55
EG
62#define DRV_MODULE_VERSION "1.45.23"
63#define DRV_MODULE_RELDATE "2008/11/03"
34f80b04 64#define BNX2X_BC_VER 0x040200
a2fbb9ea 65
34f80b04
EG
66/* Time in jiffies before concluding the transmitter is hung */
67#define TX_TIMEOUT (5*HZ)
a2fbb9ea 68
53a10565 69static char version[] __devinitdata =
34f80b04 70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
24e3fcef 73MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 77
19680c48 78static int disable_tpa;
a2fbb9ea
ET
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
83static int use_multi;
84
19680c48 85module_param(disable_tpa, int, 0);
a2fbb9ea
ET
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
19680c48 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
93
94#ifdef BNX2X_MULTI
95module_param(use_multi, int, 0);
96MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97#endif
1cf167f2 98static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
473 u32 data[9];
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
526 fp->fp_u_idx,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
529
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
537 }
538
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 }
547
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
556 }
557
3196a88a
EG
558 start = RX_SGE(fp->rx_sge_prod);
559 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
566 }
567
a2fbb9ea
ET
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
575 }
576 }
577
49d66772
ET
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 580 " spq_prod_idx(%u)\n",
49d66772 581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
34f80b04 584 bnx2x_fw_dump(bp);
a2fbb9ea
ET
585 bnx2x_mc_assert(bp);
586 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
587}
588
615f8fd9 589static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 590{
34f80b04 591 int port = BP_PORT(bp);
a2fbb9ea
ET
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596 if (msix) {
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600 } else {
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 605
615f8fd9
ET
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
608
609 REG_WR(bp, addr, val);
610
a2fbb9ea
ET
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 }
613
615f8fd9 614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
615 val, port, addr, msix);
616
617 REG_WR(bp, addr, val);
34f80b04
EG
618
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
621 if (IS_E1HMF(bp)) {
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623 if (bp->port.pmf)
624 /* enable nig attention */
625 val |= 0x0100;
626 } else
627 val = 0xffff;
628
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631 }
a2fbb9ea
ET
632}
633
615f8fd9 634static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 635{
34f80b04 636 int port = BP_PORT(bp);
a2fbb9ea
ET
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 val, port, addr);
647
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651}
652
f8ef6e44 653static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 654{
a2fbb9ea
ET
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 int i;
657
34f80b04 658 /* disable interrupt handling */
a2fbb9ea 659 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
660 if (disable_hw)
661 /* prevent the HW from sending interrupts */
662 bnx2x_int_disable(bp);
a2fbb9ea
ET
663
664 /* make sure all ISRs are done */
665 if (msix) {
666 for_each_queue(bp, i)
667 synchronize_irq(bp->msix_table[i].vector);
668
669 /* one more for the Slow Path IRQ */
670 synchronize_irq(bp->msix_table[i].vector);
671 } else
672 synchronize_irq(bp->pdev->irq);
673
674 /* make sure sp_task is not running */
1cf167f2
EG
675 cancel_delayed_work(&bp->sp_task);
676 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
5c862848
EG
688 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
689 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
690 struct igu_ack_register igu_ack;
691
692 igu_ack.status_block_index = index;
693 igu_ack.sb_id_and_flags =
34f80b04 694 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
695 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
696 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
697 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698
5c862848
EG
699 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
700 (*(u32 *)&igu_ack), hc_addr);
701 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
702}
703
704static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705{
706 struct host_status_block *fpsb = fp->status_blk;
707 u16 rc = 0;
708
709 barrier(); /* status block is written to by the chip */
710 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
711 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 rc |= 1;
713 }
714 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
715 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
716 rc |= 2;
717 }
718 return rc;
719}
720
a2fbb9ea
ET
721static u16 bnx2x_ack_int(struct bnx2x *bp)
722{
5c862848
EG
723 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
724 COMMAND_REG_SIMD_MASK);
725 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 726
5c862848
EG
727 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
728 result, hc_addr);
a2fbb9ea 729
a2fbb9ea
ET
730 return result;
731}
732
733
734/*
735 * fast path service functions
736 */
737
738/* free skb in the packet ring at pos idx
739 * return idx of last bd freed
740 */
741static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742 u16 idx)
743{
744 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
745 struct eth_tx_bd *tx_bd;
746 struct sk_buff *skb = tx_buf->skb;
34f80b04 747 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
748 int nbd;
749
750 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
751 idx, tx_buf, skb);
752
753 /* unmap first bd */
754 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
755 tx_bd = &fp->tx_desc_ring[bd_idx];
756 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
757 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
758
759 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 760 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
761#ifdef BNX2X_STOP_ON_ERROR
762 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 763 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
764 bnx2x_panic();
765 }
766#endif
767
768 /* Skip a parse bd and the TSO split header bd
769 since they have no mapping */
770 if (nbd)
771 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
772
773 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
774 ETH_TX_BD_FLAGS_TCP_CSUM |
775 ETH_TX_BD_FLAGS_SW_LSO)) {
776 if (--nbd)
777 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 tx_bd = &fp->tx_desc_ring[bd_idx];
779 /* is this a TSO split header bd? */
780 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
781 if (--nbd)
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 }
784 }
785
786 /* now free frags */
787 while (nbd > 0) {
788
789 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
790 tx_bd = &fp->tx_desc_ring[bd_idx];
791 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
792 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
793 if (--nbd)
794 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795 }
796
797 /* release skb */
53e5e96e 798 WARN_ON(!skb);
a2fbb9ea
ET
799 dev_kfree_skb(skb);
800 tx_buf->first_bd = 0;
801 tx_buf->skb = NULL;
802
34f80b04 803 return new_cons;
a2fbb9ea
ET
804}
805
34f80b04 806static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 807{
34f80b04
EG
808 s16 used;
809 u16 prod;
810 u16 cons;
a2fbb9ea 811
34f80b04 812 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
813 prod = fp->tx_bd_prod;
814 cons = fp->tx_bd_cons;
815
34f80b04
EG
816 /* NUM_TX_RINGS = number of "next-page" entries
817 It will be used as a threshold */
818 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 819
34f80b04 820#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
821 WARN_ON(used < 0);
822 WARN_ON(used > fp->bp->tx_ring_size);
823 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 824#endif
a2fbb9ea 825
34f80b04 826 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
827}
828
829static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
830{
831 struct bnx2x *bp = fp->bp;
832 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833 int done = 0;
834
835#ifdef BNX2X_STOP_ON_ERROR
836 if (unlikely(bp->panic))
837 return;
838#endif
839
840 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
841 sw_cons = fp->tx_pkt_cons;
842
843 while (sw_cons != hw_cons) {
844 u16 pkt_cons;
845
846 pkt_cons = TX_BD(sw_cons);
847
848 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
849
34f80b04 850 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
851 hw_cons, sw_cons, pkt_cons);
852
34f80b04 853/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
854 rmb();
855 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 }
857*/
858 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
859 sw_cons++;
860 done++;
861
862 if (done == work)
863 break;
864 }
865
866 fp->tx_pkt_cons = sw_cons;
867 fp->tx_bd_cons = bd_cons;
868
869 /* Need to make the tx_cons update visible to start_xmit()
870 * before checking for netif_queue_stopped(). Without the
871 * memory barrier, there is a small possibility that start_xmit()
872 * will miss it and cause the queue to be stopped forever.
873 */
874 smp_mb();
875
876 /* TBD need a thresh? */
877 if (unlikely(netif_queue_stopped(bp->dev))) {
878
879 netif_tx_lock(bp->dev);
880
881 if (netif_queue_stopped(bp->dev) &&
da5a662a 882 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
883 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
884 netif_wake_queue(bp->dev);
885
886 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
887 }
888}
889
3196a88a 890
a2fbb9ea
ET
891static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
892 union eth_rx_cqe *rr_cqe)
893{
894 struct bnx2x *bp = fp->bp;
895 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
897
34f80b04 898 DP(BNX2X_MSG_SP,
a2fbb9ea 899 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
900 FP_IDX(fp), cid, command, bp->state,
901 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
902
903 bp->spq_left++;
904
34f80b04 905 if (FP_IDX(fp)) {
a2fbb9ea
ET
906 switch (command | fp->state) {
907 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
908 BNX2X_FP_STATE_OPENING):
909 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
910 cid);
911 fp->state = BNX2X_FP_STATE_OPEN;
912 break;
913
914 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
915 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
916 cid);
917 fp->state = BNX2X_FP_STATE_HALTED;
918 break;
919
920 default:
34f80b04
EG
921 BNX2X_ERR("unexpected MC reply (%d) "
922 "fp->state is %x\n", command, fp->state);
923 break;
a2fbb9ea 924 }
34f80b04 925 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
926 return;
927 }
c14423fe 928
a2fbb9ea
ET
929 switch (command | bp->state) {
930 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
931 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
932 bp->state = BNX2X_STATE_OPEN;
933 break;
934
935 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
936 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
937 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
938 fp->state = BNX2X_FP_STATE_HALTED;
939 break;
940
a2fbb9ea 941 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 942 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 943 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
944 break;
945
3196a88a 946
a2fbb9ea 947 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 948 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 949 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 950 bp->set_mac_pending = 0;
a2fbb9ea
ET
951 break;
952
49d66772 953 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 954 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
955 break;
956
a2fbb9ea 957 default:
34f80b04 958 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 959 command, bp->state);
34f80b04 960 break;
a2fbb9ea 961 }
34f80b04 962 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
963}
964
7a9b2557
VZ
965static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
966 struct bnx2x_fastpath *fp, u16 index)
967{
968 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
969 struct page *page = sw_buf->page;
970 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
971
972 /* Skip "next page" elements */
973 if (!page)
974 return;
975
976 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 977 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
978 __free_pages(page, PAGES_PER_SGE_SHIFT);
979
980 sw_buf->page = NULL;
981 sge->addr_hi = 0;
982 sge->addr_lo = 0;
983}
984
985static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
986 struct bnx2x_fastpath *fp, int last)
987{
988 int i;
989
990 for (i = 0; i < last; i++)
991 bnx2x_free_rx_sge(bp, fp, i);
992}
993
994static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
995 struct bnx2x_fastpath *fp, u16 index)
996{
997 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
998 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
999 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1000 dma_addr_t mapping;
1001
1002 if (unlikely(page == NULL))
1003 return -ENOMEM;
1004
4f40f2cb 1005 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1006 PCI_DMA_FROMDEVICE);
8d8bb39b 1007 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1008 __free_pages(page, PAGES_PER_SGE_SHIFT);
1009 return -ENOMEM;
1010 }
1011
1012 sw_buf->page = page;
1013 pci_unmap_addr_set(sw_buf, mapping, mapping);
1014
1015 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1016 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1017
1018 return 0;
1019}
1020
a2fbb9ea
ET
1021static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct sk_buff *skb;
1025 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1026 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1027 dma_addr_t mapping;
1028
1029 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1030 if (unlikely(skb == NULL))
1031 return -ENOMEM;
1032
437cf2f1 1033 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1034 PCI_DMA_FROMDEVICE);
8d8bb39b 1035 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1036 dev_kfree_skb(skb);
1037 return -ENOMEM;
1038 }
1039
1040 rx_buf->skb = skb;
1041 pci_unmap_addr_set(rx_buf, mapping, mapping);
1042
1043 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1044 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1045
1046 return 0;
1047}
1048
1049/* note that we are not allocating a new skb,
1050 * we are just moving one from cons to prod
1051 * we are not creating a new mapping,
1052 * so there is no need to check for dma_mapping_error().
1053 */
1054static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1055 struct sk_buff *skb, u16 cons, u16 prod)
1056{
1057 struct bnx2x *bp = fp->bp;
1058 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1059 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1060 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1061 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1062
1063 pci_dma_sync_single_for_device(bp->pdev,
1064 pci_unmap_addr(cons_rx_buf, mapping),
1065 bp->rx_offset + RX_COPY_THRESH,
1066 PCI_DMA_FROMDEVICE);
1067
1068 prod_rx_buf->skb = cons_rx_buf->skb;
1069 pci_unmap_addr_set(prod_rx_buf, mapping,
1070 pci_unmap_addr(cons_rx_buf, mapping));
1071 *prod_bd = *cons_bd;
1072}
1073
7a9b2557
VZ
1074static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1075 u16 idx)
1076{
1077 u16 last_max = fp->last_max_sge;
1078
1079 if (SUB_S16(idx, last_max) > 0)
1080 fp->last_max_sge = idx;
1081}
1082
1083static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1084{
1085 int i, j;
1086
1087 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1088 int idx = RX_SGE_CNT * i - 1;
1089
1090 for (j = 0; j < 2; j++) {
1091 SGE_MASK_CLEAR_BIT(fp, idx);
1092 idx--;
1093 }
1094 }
1095}
1096
1097static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1098 struct eth_fast_path_rx_cqe *fp_cqe)
1099{
1100 struct bnx2x *bp = fp->bp;
4f40f2cb 1101 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1102 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1103 SGE_PAGE_SHIFT;
7a9b2557
VZ
1104 u16 last_max, last_elem, first_elem;
1105 u16 delta = 0;
1106 u16 i;
1107
1108 if (!sge_len)
1109 return;
1110
1111 /* First mark all used pages */
1112 for (i = 0; i < sge_len; i++)
1113 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1114
1115 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1116 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118 /* Here we assume that the last SGE index is the biggest */
1119 prefetch((void *)(fp->sge_mask));
1120 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1121
1122 last_max = RX_SGE(fp->last_max_sge);
1123 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1124 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1125
1126 /* If ring is not full */
1127 if (last_elem + 1 != first_elem)
1128 last_elem++;
1129
1130 /* Now update the prod */
1131 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1132 if (likely(fp->sge_mask[i]))
1133 break;
1134
1135 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1136 delta += RX_SGE_MASK_ELEM_SZ;
1137 }
1138
1139 if (delta > 0) {
1140 fp->rx_sge_prod += delta;
1141 /* clear page-end entries */
1142 bnx2x_clear_sge_mask_next_elems(fp);
1143 }
1144
1145 DP(NETIF_MSG_RX_STATUS,
1146 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1147 fp->last_max_sge, fp->rx_sge_prod);
1148}
1149
1150static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1151{
1152 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1153 memset(fp->sge_mask, 0xff,
1154 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1155
33471629
EG
1156 /* Clear the two last indices in the page to 1:
1157 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1158 hence will never be indicated and should be removed from
1159 the calculations. */
1160 bnx2x_clear_sge_mask_next_elems(fp);
1161}
1162
1163static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1164 struct sk_buff *skb, u16 cons, u16 prod)
1165{
1166 struct bnx2x *bp = fp->bp;
1167 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1168 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1169 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1170 dma_addr_t mapping;
1171
1172 /* move empty skb from pool to prod and map it */
1173 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1174 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1175 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1176 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1177
1178 /* move partial skb from cons to pool (don't unmap yet) */
1179 fp->tpa_pool[queue] = *cons_rx_buf;
1180
1181 /* mark bin state as start - print error if current state != stop */
1182 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1183 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1184
1185 fp->tpa_state[queue] = BNX2X_TPA_START;
1186
1187 /* point prod_bd to new skb */
1188 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1189 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1190
1191#ifdef BNX2X_STOP_ON_ERROR
1192 fp->tpa_queue_used |= (1 << queue);
1193#ifdef __powerpc64__
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1195#else
1196 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1197#endif
1198 fp->tpa_queue_used);
1199#endif
1200}
1201
1202static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1203 struct sk_buff *skb,
1204 struct eth_fast_path_rx_cqe *fp_cqe,
1205 u16 cqe_idx)
1206{
1207 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1208 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1209 u32 i, frag_len, frag_size, pages;
1210 int err;
1211 int j;
1212
1213 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1214 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1215
1216 /* This is needed in order to enable forwarding support */
1217 if (frag_size)
4f40f2cb 1218 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1219 max(frag_size, (u32)len_on_bd));
1220
1221#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1222 if (pages >
1223 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1224 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1225 pages, cqe_idx);
1226 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1227 fp_cqe->pkt_len, len_on_bd);
1228 bnx2x_panic();
1229 return -EINVAL;
1230 }
1231#endif
1232
1233 /* Run through the SGL and compose the fragmented skb */
1234 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1235 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1236
1237 /* FW gives the indices of the SGE as if the ring is an array
1238 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1239 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1240 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1241 old_rx_pg = *rx_pg;
1242
1243 /* If we fail to allocate a substitute page, we simply stop
1244 where we are and drop the whole packet */
1245 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1246 if (unlikely(err)) {
66e855f3 1247 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1248 return err;
1249 }
1250
1251 /* Unmap the page as we r going to pass it to the stack */
1252 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1253 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1254
1255 /* Add one frag and update the appropriate fields in the skb */
1256 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1257
1258 skb->data_len += frag_len;
1259 skb->truesize += frag_len;
1260 skb->len += frag_len;
1261
1262 frag_size -= frag_len;
1263 }
1264
1265 return 0;
1266}
1267
1268static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1269 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1270 u16 cqe_idx)
1271{
1272 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1273 struct sk_buff *skb = rx_buf->skb;
1274 /* alloc new skb */
1275 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1276
1277 /* Unmap skb in the pool anyway, as we are going to change
1278 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1279 fails. */
1280 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1281 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1282
7a9b2557 1283 if (likely(new_skb)) {
66e855f3
YG
1284 /* fix ip xsum and give it to the stack */
1285 /* (no need to map the new skb) */
7a9b2557
VZ
1286
1287 prefetch(skb);
1288 prefetch(((char *)(skb)) + 128);
1289
7a9b2557
VZ
1290#ifdef BNX2X_STOP_ON_ERROR
1291 if (pad + len > bp->rx_buf_size) {
1292 BNX2X_ERR("skb_put is about to fail... "
1293 "pad %d len %d rx_buf_size %d\n",
1294 pad, len, bp->rx_buf_size);
1295 bnx2x_panic();
1296 return;
1297 }
1298#endif
1299
1300 skb_reserve(skb, pad);
1301 skb_put(skb, len);
1302
1303 skb->protocol = eth_type_trans(skb, bp->dev);
1304 skb->ip_summed = CHECKSUM_UNNECESSARY;
1305
1306 {
1307 struct iphdr *iph;
1308
1309 iph = (struct iphdr *)skb->data;
1310 iph->check = 0;
1311 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1312 }
1313
1314 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1315 &cqe->fast_path_cqe, cqe_idx)) {
1316#ifdef BCM_VLAN
1317 if ((bp->vlgrp != NULL) &&
1318 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1319 PARSING_FLAGS_VLAN))
1320 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1321 le16_to_cpu(cqe->fast_path_cqe.
1322 vlan_tag));
1323 else
1324#endif
1325 netif_receive_skb(skb);
1326 } else {
1327 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1328 " - dropping packet!\n");
1329 dev_kfree_skb(skb);
1330 }
1331
7a9b2557
VZ
1332
1333 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb;
1335
1336 } else {
66e855f3 1337 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1338 DP(NETIF_MSG_RX_STATUS,
1339 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1340 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1341 }
1342
1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344}
1345
1346static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347 struct bnx2x_fastpath *fp,
1348 u16 bd_prod, u16 rx_comp_prod,
1349 u16 rx_sge_prod)
1350{
1351 struct tstorm_eth_rx_producers rx_prods = {0};
1352 int i;
1353
1354 /* Update producers */
1355 rx_prods.bd_prod = bd_prod;
1356 rx_prods.cqe_prod = rx_comp_prod;
1357 rx_prods.sge_prod = rx_sge_prod;
1358
58f4c4cf
EG
1359 /*
1360 * Make sure that the BD and SGE data is updated before updating the
1361 * producers since FW might read the BD/SGE right after the producer
1362 * is updated.
1363 * This is only applicable for weak-ordered memory model archs such
1364 * as IA-64. The following barrier is also mandatory since FW will
1365 * assumes BDs must have buffers.
1366 */
1367 wmb();
1368
7a9b2557
VZ
1369 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1370 REG_WR(bp, BAR_TSTRORM_INTMEM +
1371 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1372 ((u32 *)&rx_prods)[i]);
1373
58f4c4cf
EG
1374 mmiowb(); /* keep prod updates ordered */
1375
7a9b2557
VZ
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1378 bd_prod, rx_comp_prod, rx_sge_prod);
1379}
1380
a2fbb9ea
ET
1381static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1382{
1383 struct bnx2x *bp = fp->bp;
34f80b04 1384 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1385 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1386 int rx_pkt = 0;
1387
1388#ifdef BNX2X_STOP_ON_ERROR
1389 if (unlikely(bp->panic))
1390 return 0;
1391#endif
1392
34f80b04
EG
1393 /* CQ "next element" is of the size of the regular element,
1394 that's why it's ok here */
a2fbb9ea
ET
1395 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1396 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1397 hw_comp_cons++;
1398
1399 bd_cons = fp->rx_bd_cons;
1400 bd_prod = fp->rx_bd_prod;
34f80b04 1401 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1402 sw_comp_cons = fp->rx_comp_cons;
1403 sw_comp_prod = fp->rx_comp_prod;
1404
1405 /* Memory barrier necessary as speculative reads of the rx
1406 * buffer can be ahead of the index in the status block
1407 */
1408 rmb();
1409
1410 DP(NETIF_MSG_RX_STATUS,
1411 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1412 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1413
1414 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1415 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1416 struct sk_buff *skb;
1417 union eth_rx_cqe *cqe;
34f80b04
EG
1418 u8 cqe_fp_flags;
1419 u16 len, pad;
a2fbb9ea
ET
1420
1421 comp_ring_cons = RCQ_BD(sw_comp_cons);
1422 bd_prod = RX_BD(bd_prod);
1423 bd_cons = RX_BD(bd_cons);
1424
1425 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1426 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1427
a2fbb9ea 1428 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1429 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1430 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1431 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1432 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1433 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1434
1435 /* is this a slowpath msg? */
34f80b04 1436 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1437 bnx2x_sp_event(fp, cqe);
1438 goto next_cqe;
1439
1440 /* this is an rx packet */
1441 } else {
1442 rx_buf = &fp->rx_buf_ring[bd_cons];
1443 skb = rx_buf->skb;
a2fbb9ea
ET
1444 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1445 pad = cqe->fast_path_cqe.placement_offset;
1446
7a9b2557
VZ
1447 /* If CQE is marked both TPA_START and TPA_END
1448 it is a non-TPA CQE */
1449 if ((!fp->disable_tpa) &&
1450 (TPA_TYPE(cqe_fp_flags) !=
1451 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1452 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1453
1454 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1455 DP(NETIF_MSG_RX_STATUS,
1456 "calling tpa_start on queue %d\n",
1457 queue);
1458
1459 bnx2x_tpa_start(fp, queue, skb,
1460 bd_cons, bd_prod);
1461 goto next_rx;
1462 }
1463
1464 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1465 DP(NETIF_MSG_RX_STATUS,
1466 "calling tpa_stop on queue %d\n",
1467 queue);
1468
1469 if (!BNX2X_RX_SUM_FIX(cqe))
1470 BNX2X_ERR("STOP on none TCP "
1471 "data\n");
1472
1473 /* This is a size of the linear data
1474 on this skb */
1475 len = le16_to_cpu(cqe->fast_path_cqe.
1476 len_on_bd);
1477 bnx2x_tpa_stop(bp, fp, queue, pad,
1478 len, cqe, comp_ring_cons);
1479#ifdef BNX2X_STOP_ON_ERROR
1480 if (bp->panic)
1481 return -EINVAL;
1482#endif
1483
1484 bnx2x_update_sge_prod(fp,
1485 &cqe->fast_path_cqe);
1486 goto next_cqe;
1487 }
1488 }
1489
a2fbb9ea
ET
1490 pci_dma_sync_single_for_device(bp->pdev,
1491 pci_unmap_addr(rx_buf, mapping),
1492 pad + RX_COPY_THRESH,
1493 PCI_DMA_FROMDEVICE);
1494 prefetch(skb);
1495 prefetch(((char *)(skb)) + 128);
1496
1497 /* is this an error packet? */
34f80b04 1498 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1499 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1500 "ERROR flags %x rx packet %u\n",
1501 cqe_fp_flags, sw_comp_cons);
66e855f3 1502 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1503 goto reuse_rx;
1504 }
1505
1506 /* Since we don't have a jumbo ring
1507 * copy small packets if mtu > 1500
1508 */
1509 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1510 (len <= RX_COPY_THRESH)) {
1511 struct sk_buff *new_skb;
1512
1513 new_skb = netdev_alloc_skb(bp->dev,
1514 len + pad);
1515 if (new_skb == NULL) {
1516 DP(NETIF_MSG_RX_ERR,
34f80b04 1517 "ERROR packet dropped "
a2fbb9ea 1518 "because of alloc failure\n");
66e855f3 1519 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1520 goto reuse_rx;
1521 }
1522
1523 /* aligned copy */
1524 skb_copy_from_linear_data_offset(skb, pad,
1525 new_skb->data + pad, len);
1526 skb_reserve(new_skb, pad);
1527 skb_put(new_skb, len);
1528
1529 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1530
1531 skb = new_skb;
1532
1533 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1534 pci_unmap_single(bp->pdev,
1535 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1536 bp->rx_buf_size,
a2fbb9ea
ET
1537 PCI_DMA_FROMDEVICE);
1538 skb_reserve(skb, pad);
1539 skb_put(skb, len);
1540
1541 } else {
1542 DP(NETIF_MSG_RX_ERR,
34f80b04 1543 "ERROR packet dropped because "
a2fbb9ea 1544 "of alloc failure\n");
66e855f3 1545 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1546reuse_rx:
1547 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1548 goto next_rx;
1549 }
1550
1551 skb->protocol = eth_type_trans(skb, bp->dev);
1552
1553 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1554 if (bp->rx_csum) {
1adcd8be
EG
1555 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1556 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1557 else
1558 bp->eth_stats.hw_csum_err++;
1559 }
a2fbb9ea
ET
1560 }
1561
1562#ifdef BCM_VLAN
34f80b04
EG
1563 if ((bp->vlgrp != NULL) &&
1564 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1565 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1566 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1567 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1568 else
1569#endif
34f80b04 1570 netif_receive_skb(skb);
a2fbb9ea 1571
a2fbb9ea
ET
1572
1573next_rx:
1574 rx_buf->skb = NULL;
1575
1576 bd_cons = NEXT_RX_IDX(bd_cons);
1577 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1578 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1579 rx_pkt++;
a2fbb9ea
ET
1580next_cqe:
1581 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1582 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1583
34f80b04 1584 if (rx_pkt == budget)
a2fbb9ea
ET
1585 break;
1586 } /* while */
1587
1588 fp->rx_bd_cons = bd_cons;
34f80b04 1589 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1590 fp->rx_comp_cons = sw_comp_cons;
1591 fp->rx_comp_prod = sw_comp_prod;
1592
7a9b2557
VZ
1593 /* Update producers */
1594 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1595 fp->rx_sge_prod);
a2fbb9ea
ET
1596
1597 fp->rx_pkt += rx_pkt;
1598 fp->rx_calls++;
1599
1600 return rx_pkt;
1601}
1602
1603static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1604{
1605 struct bnx2x_fastpath *fp = fp_cookie;
1606 struct bnx2x *bp = fp->bp;
34f80b04 1607 int index = FP_IDX(fp);
a2fbb9ea 1608
da5a662a
VZ
1609 /* Return here if interrupt is disabled */
1610 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1611 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1612 return IRQ_HANDLED;
1613 }
1614
34f80b04
EG
1615 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1616 index, FP_SB_ID(fp));
1617 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1618
1619#ifdef BNX2X_STOP_ON_ERROR
1620 if (unlikely(bp->panic))
1621 return IRQ_HANDLED;
1622#endif
1623
1624 prefetch(fp->rx_cons_sb);
1625 prefetch(fp->tx_cons_sb);
1626 prefetch(&fp->status_blk->c_status_block.status_block_index);
1627 prefetch(&fp->status_blk->u_status_block.status_block_index);
1628
908a7a16 1629 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1630
a2fbb9ea
ET
1631 return IRQ_HANDLED;
1632}
1633
1634static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1635{
1636 struct net_device *dev = dev_instance;
1637 struct bnx2x *bp = netdev_priv(dev);
1638 u16 status = bnx2x_ack_int(bp);
34f80b04 1639 u16 mask;
a2fbb9ea 1640
34f80b04 1641 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1642 if (unlikely(status == 0)) {
1643 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1644 return IRQ_NONE;
1645 }
34f80b04 1646 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1647
34f80b04 1648 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651 return IRQ_HANDLED;
1652 }
1653
3196a88a
EG
1654#ifdef BNX2X_STOP_ON_ERROR
1655 if (unlikely(bp->panic))
1656 return IRQ_HANDLED;
1657#endif
1658
34f80b04
EG
1659 mask = 0x2 << bp->fp[0].sb_id;
1660 if (status & mask) {
a2fbb9ea
ET
1661 struct bnx2x_fastpath *fp = &bp->fp[0];
1662
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
908a7a16 1668 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1669
34f80b04 1670 status &= ~mask;
a2fbb9ea
ET
1671 }
1672
a2fbb9ea 1673
34f80b04 1674 if (unlikely(status & 0x1)) {
1cf167f2 1675 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1676
1677 status &= ~0x1;
1678 if (!status)
1679 return IRQ_HANDLED;
1680 }
1681
34f80b04
EG
1682 if (status)
1683 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1684 status);
a2fbb9ea 1685
c18487ee 1686 return IRQ_HANDLED;
a2fbb9ea
ET
1687}
1688
c18487ee 1689/* end of fast path */
a2fbb9ea 1690
bb2a0f7a 1691static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1692
c18487ee
YR
1693/* Link */
1694
1695/*
1696 * General service functions
1697 */
a2fbb9ea 1698
4a37fb66 1699static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1700{
1701 u32 lock_status;
1702 u32 resource_bit = (1 << resource);
4a37fb66
YG
1703 int func = BP_FUNC(bp);
1704 u32 hw_lock_control_reg;
c18487ee 1705 int cnt;
a2fbb9ea 1706
c18487ee
YR
1707 /* Validating that the resource is within range */
1708 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1709 DP(NETIF_MSG_HW,
1710 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1711 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1712 return -EINVAL;
1713 }
a2fbb9ea 1714
4a37fb66
YG
1715 if (func <= 5) {
1716 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1717 } else {
1718 hw_lock_control_reg =
1719 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1720 }
1721
c18487ee 1722 /* Validating that the resource is not already taken */
4a37fb66 1723 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1724 if (lock_status & resource_bit) {
1725 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1726 lock_status, resource_bit);
1727 return -EEXIST;
1728 }
a2fbb9ea 1729
46230476
EG
1730 /* Try for 5 second every 5ms */
1731 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1732 /* Try to acquire the lock */
4a37fb66
YG
1733 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1734 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1735 if (lock_status & resource_bit)
1736 return 0;
a2fbb9ea 1737
c18487ee 1738 msleep(5);
a2fbb9ea 1739 }
c18487ee
YR
1740 DP(NETIF_MSG_HW, "Timeout\n");
1741 return -EAGAIN;
1742}
a2fbb9ea 1743
4a37fb66 1744static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1745{
1746 u32 lock_status;
1747 u32 resource_bit = (1 << resource);
4a37fb66
YG
1748 int func = BP_FUNC(bp);
1749 u32 hw_lock_control_reg;
a2fbb9ea 1750
c18487ee
YR
1751 /* Validating that the resource is within range */
1752 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1753 DP(NETIF_MSG_HW,
1754 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1755 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1756 return -EINVAL;
1757 }
1758
4a37fb66
YG
1759 if (func <= 5) {
1760 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1761 } else {
1762 hw_lock_control_reg =
1763 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1764 }
1765
c18487ee 1766 /* Validating that the resource is currently taken */
4a37fb66 1767 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1768 if (!(lock_status & resource_bit)) {
1769 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1770 lock_status, resource_bit);
1771 return -EFAULT;
a2fbb9ea
ET
1772 }
1773
4a37fb66 1774 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1775 return 0;
1776}
1777
1778/* HW Lock for shared dual port PHYs */
4a37fb66 1779static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1780{
1781 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1782
34f80b04 1783 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1784
c18487ee
YR
1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1787 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1788}
a2fbb9ea 1789
4a37fb66 1790static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1791{
1792 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1793
c18487ee
YR
1794 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1795 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1796 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1797
34f80b04 1798 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1799}
a2fbb9ea 1800
17de50b7 1801int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1802{
1803 /* The GPIO should be swapped if swap register is set and active */
1804 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1805 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1806 int gpio_shift = gpio_num +
1807 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1808 u32 gpio_mask = (1 << gpio_shift);
1809 u32 gpio_reg;
a2fbb9ea 1810
c18487ee
YR
1811 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1812 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1813 return -EINVAL;
1814 }
a2fbb9ea 1815
4a37fb66 1816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1817 /* read GPIO and mask except the float bits */
1818 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1819
c18487ee
YR
1820 switch (mode) {
1821 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1822 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1823 gpio_num, gpio_shift);
1824 /* clear FLOAT and set CLR */
1825 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1826 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1827 break;
a2fbb9ea 1828
c18487ee
YR
1829 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1830 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1831 gpio_num, gpio_shift);
1832 /* clear FLOAT and set SET */
1833 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1834 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1835 break;
a2fbb9ea 1836
17de50b7 1837 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1838 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1839 gpio_num, gpio_shift);
1840 /* set FLOAT */
1841 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1842 break;
a2fbb9ea 1843
c18487ee
YR
1844 default:
1845 break;
a2fbb9ea
ET
1846 }
1847
c18487ee 1848 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1849 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1850
c18487ee 1851 return 0;
a2fbb9ea
ET
1852}
1853
c18487ee 1854static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1855{
c18487ee
YR
1856 u32 spio_mask = (1 << spio_num);
1857 u32 spio_reg;
a2fbb9ea 1858
c18487ee
YR
1859 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1860 (spio_num > MISC_REGISTERS_SPIO_7)) {
1861 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1862 return -EINVAL;
a2fbb9ea
ET
1863 }
1864
4a37fb66 1865 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1866 /* read SPIO and mask except the float bits */
1867 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1868
c18487ee 1869 switch (mode) {
6378c025 1870 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1871 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1872 /* clear FLOAT and set CLR */
1873 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1874 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1875 break;
a2fbb9ea 1876
6378c025 1877 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1878 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1879 /* clear FLOAT and set SET */
1880 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1881 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1882 break;
a2fbb9ea 1883
c18487ee
YR
1884 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1885 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1886 /* set FLOAT */
1887 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1888 break;
a2fbb9ea 1889
c18487ee
YR
1890 default:
1891 break;
a2fbb9ea
ET
1892 }
1893
c18487ee 1894 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1895 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1896
a2fbb9ea
ET
1897 return 0;
1898}
1899
c18487ee 1900static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1901{
ad33ea3a
EG
1902 switch (bp->link_vars.ieee_fc &
1903 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1909 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1910 ADVERTISED_Pause);
1911 break;
1912 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1913 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1914 break;
1915 default:
34f80b04 1916 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1917 ADVERTISED_Pause);
1918 break;
1919 }
1920}
f1410647 1921
c18487ee
YR
1922static void bnx2x_link_report(struct bnx2x *bp)
1923{
1924 if (bp->link_vars.link_up) {
1925 if (bp->state == BNX2X_STATE_OPEN)
1926 netif_carrier_on(bp->dev);
1927 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1928
c18487ee 1929 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1930
c18487ee
YR
1931 if (bp->link_vars.duplex == DUPLEX_FULL)
1932 printk("full duplex");
1933 else
1934 printk("half duplex");
f1410647 1935
c0700f90
DM
1936 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1937 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1938 printk(", receive ");
c0700f90 1939 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1940 printk("& transmit ");
1941 } else {
1942 printk(", transmit ");
1943 }
1944 printk("flow control ON");
1945 }
1946 printk("\n");
f1410647 1947
c18487ee
YR
1948 } else { /* link_down */
1949 netif_carrier_off(bp->dev);
1950 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1951 }
c18487ee
YR
1952}
1953
1954static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1955{
19680c48
EG
1956 if (!BP_NOMCP(bp)) {
1957 u8 rc;
a2fbb9ea 1958
19680c48 1959 /* Initialize link parameters structure variables */
8c99e7b0
YR
1960 /* It is recommended to turn off RX FC for jumbo frames
1961 for better performance */
1962 if (IS_E1HMF(bp))
c0700f90 1963 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1964 else if (bp->dev->mtu > 5000)
c0700f90 1965 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1966 else
c0700f90 1967 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1968
4a37fb66 1969 bnx2x_acquire_phy_lock(bp);
19680c48 1970 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1971 bnx2x_release_phy_lock(bp);
a2fbb9ea 1972
19680c48
EG
1973 if (bp->link_vars.link_up)
1974 bnx2x_link_report(bp);
a2fbb9ea 1975
19680c48 1976 bnx2x_calc_fc_adv(bp);
34f80b04 1977
19680c48
EG
1978 return rc;
1979 }
1980 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1981 return -EINVAL;
a2fbb9ea
ET
1982}
1983
c18487ee 1984static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1985{
19680c48 1986 if (!BP_NOMCP(bp)) {
4a37fb66 1987 bnx2x_acquire_phy_lock(bp);
19680c48 1988 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1989 bnx2x_release_phy_lock(bp);
a2fbb9ea 1990
19680c48
EG
1991 bnx2x_calc_fc_adv(bp);
1992 } else
1993 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1994}
a2fbb9ea 1995
c18487ee
YR
1996static void bnx2x__link_reset(struct bnx2x *bp)
1997{
19680c48 1998 if (!BP_NOMCP(bp)) {
4a37fb66 1999 bnx2x_acquire_phy_lock(bp);
19680c48 2000 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2001 bnx2x_release_phy_lock(bp);
19680c48
EG
2002 } else
2003 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2004}
a2fbb9ea 2005
c18487ee
YR
2006static u8 bnx2x_link_test(struct bnx2x *bp)
2007{
2008 u8 rc;
a2fbb9ea 2009
4a37fb66 2010 bnx2x_acquire_phy_lock(bp);
c18487ee 2011 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2012 bnx2x_release_phy_lock(bp);
a2fbb9ea 2013
c18487ee
YR
2014 return rc;
2015}
a2fbb9ea 2016
34f80b04
EG
2017/* Calculates the sum of vn_min_rates.
2018 It's needed for further normalizing of the min_rates.
2019
2020 Returns:
2021 sum of vn_min_rates
2022 or
2023 0 - if all the min_rates are 0.
33471629 2024 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2025 If not all min_rates are zero then those that are zeroes will
2026 be set to 1.
2027 */
2028static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2029{
2030 int i, port = BP_PORT(bp);
2031 u32 wsum = 0;
2032 int all_zero = 1;
2033
2034 for (i = 0; i < E1HVN_MAX; i++) {
2035 u32 vn_cfg =
2036 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2037 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2038 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2039 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2040 /* If min rate is zero - set it to 1 */
2041 if (!vn_min_rate)
2042 vn_min_rate = DEF_MIN_RATE;
2043 else
2044 all_zero = 0;
2045
2046 wsum += vn_min_rate;
2047 }
2048 }
2049
2050 /* ... only if all min rates are zeros - disable FAIRNESS */
2051 if (all_zero)
2052 return 0;
2053
2054 return wsum;
2055}
2056
2057static void bnx2x_init_port_minmax(struct bnx2x *bp,
2058 int en_fness,
2059 u16 port_rate,
2060 struct cmng_struct_per_port *m_cmng_port)
2061{
2062 u32 r_param = port_rate / 8;
2063 int port = BP_PORT(bp);
2064 int i;
2065
2066 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2067
2068 /* Enable minmax only if we are in e1hmf mode */
2069 if (IS_E1HMF(bp)) {
2070 u32 fair_periodic_timeout_usec;
2071 u32 t_fair;
2072
2073 /* Enable rate shaping and fairness */
2074 m_cmng_port->flags.cmng_vn_enable = 1;
2075 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2076 m_cmng_port->flags.rate_shaping_enable = 1;
2077
2078 if (!en_fness)
2079 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2080 " fairness will be disabled\n");
2081
2082 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2083 m_cmng_port->rs_vars.rs_periodic_timeout =
2084 RS_PERIODIC_TIMEOUT_USEC / 4;
2085
2086 /* this is the threshold below which no timer arming will occur
2087 1.25 coefficient is for the threshold to be a little bigger
2088 than the real time, to compensate for timer in-accuracy */
2089 m_cmng_port->rs_vars.rs_threshold =
2090 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2091
2092 /* resolution of fairness timer */
2093 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2094 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2095 t_fair = T_FAIR_COEF / port_rate;
2096
2097 /* this is the threshold below which we won't arm
2098 the timer anymore */
2099 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2100
2101 /* we multiply by 1e3/8 to get bytes/msec.
2102 We don't want the credits to pass a credit
2103 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2104 m_cmng_port->fair_vars.upper_bound =
2105 r_param * t_fair * FAIR_MEM;
2106 /* since each tick is 4 usec */
2107 m_cmng_port->fair_vars.fairness_timeout =
2108 fair_periodic_timeout_usec / 4;
2109
2110 } else {
2111 /* Disable rate shaping and fairness */
2112 m_cmng_port->flags.cmng_vn_enable = 0;
2113 m_cmng_port->flags.fairness_enable = 0;
2114 m_cmng_port->flags.rate_shaping_enable = 0;
2115
2116 DP(NETIF_MSG_IFUP,
2117 "Single function mode minmax will be disabled\n");
2118 }
2119
2120 /* Store it to internal memory */
2121 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2122 REG_WR(bp, BAR_XSTRORM_INTMEM +
2123 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2124 ((u32 *)(m_cmng_port))[i]);
2125}
2126
2127static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2128 u32 wsum, u16 port_rate,
2129 struct cmng_struct_per_port *m_cmng_port)
2130{
2131 struct rate_shaping_vars_per_vn m_rs_vn;
2132 struct fairness_vars_per_vn m_fair_vn;
2133 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2134 u16 vn_min_rate, vn_max_rate;
2135 int i;
2136
2137 /* If function is hidden - set min and max to zeroes */
2138 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2139 vn_min_rate = 0;
2140 vn_max_rate = 0;
2141
2142 } else {
2143 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2144 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2145 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2146 if current min rate is zero - set it to 1.
33471629 2147 This is a requirement of the algorithm. */
34f80b04
EG
2148 if ((vn_min_rate == 0) && wsum)
2149 vn_min_rate = DEF_MIN_RATE;
2150 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2151 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2152 }
2153
2154 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2155 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2156
2157 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2158 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2159
2160 /* global vn counter - maximal Mbps for this vn */
2161 m_rs_vn.vn_counter.rate = vn_max_rate;
2162
2163 /* quota - number of bytes transmitted in this period */
2164 m_rs_vn.vn_counter.quota =
2165 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2166
2167#ifdef BNX2X_PER_PROT_QOS
2168 /* per protocol counter */
2169 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2170 /* maximal Mbps for this protocol */
2171 m_rs_vn.protocol_counters[protocol].rate =
2172 protocol_max_rate[protocol];
2173 /* the quota in each timer period -
2174 number of bytes transmitted in this period */
2175 m_rs_vn.protocol_counters[protocol].quota =
2176 (u32)(rs_periodic_timeout_usec *
2177 ((double)m_rs_vn.
2178 protocol_counters[protocol].rate/8));
2179 }
2180#endif
2181
2182 if (wsum) {
2183 /* credit for each period of the fairness algorithm:
2184 number of bytes in T_FAIR (the vn share the port rate).
2185 wsum should not be larger than 10000, thus
2186 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2187 m_fair_vn.vn_credit_delta =
2188 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2189 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2190 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2191 m_fair_vn.vn_credit_delta);
2192 }
2193
2194#ifdef BNX2X_PER_PROT_QOS
2195 do {
2196 u32 protocolWeightSum = 0;
2197
2198 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2199 protocolWeightSum +=
2200 drvInit.protocol_min_rate[protocol];
2201 /* per protocol counter -
2202 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2203 if (protocolWeightSum > 0) {
2204 for (protocol = 0;
2205 protocol < NUM_OF_PROTOCOLS; protocol++)
2206 /* credit for each period of the
2207 fairness algorithm - number of bytes in
2208 T_FAIR (the protocol share the vn rate) */
2209 m_fair_vn.protocol_credit_delta[protocol] =
2210 (u32)((vn_min_rate / 8) * t_fair *
2211 protocol_min_rate / protocolWeightSum);
2212 }
2213 } while (0);
2214#endif
2215
2216 /* Store it to internal memory */
2217 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2218 REG_WR(bp, BAR_XSTRORM_INTMEM +
2219 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2220 ((u32 *)(&m_rs_vn))[i]);
2221
2222 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2223 REG_WR(bp, BAR_XSTRORM_INTMEM +
2224 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2225 ((u32 *)(&m_fair_vn))[i]);
2226}
2227
c18487ee
YR
2228/* This function is called upon link interrupt */
2229static void bnx2x_link_attn(struct bnx2x *bp)
2230{
34f80b04
EG
2231 int vn;
2232
bb2a0f7a
YG
2233 /* Make sure that we are synced with the current statistics */
2234 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2235
4a37fb66 2236 bnx2x_acquire_phy_lock(bp);
c18487ee 2237 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2238 bnx2x_release_phy_lock(bp);
a2fbb9ea 2239
bb2a0f7a
YG
2240 if (bp->link_vars.link_up) {
2241
2242 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2243 struct host_port_stats *pstats;
2244
2245 pstats = bnx2x_sp(bp, port_stats);
2246 /* reset old bmac stats */
2247 memset(&(pstats->mac_stx[0]), 0,
2248 sizeof(struct mac_stx));
2249 }
2250 if ((bp->state == BNX2X_STATE_OPEN) ||
2251 (bp->state == BNX2X_STATE_DISABLED))
2252 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2253 }
2254
c18487ee
YR
2255 /* indicate link status */
2256 bnx2x_link_report(bp);
34f80b04
EG
2257
2258 if (IS_E1HMF(bp)) {
2259 int func;
2260
2261 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2262 if (vn == BP_E1HVN(bp))
2263 continue;
2264
2265 func = ((vn << 1) | BP_PORT(bp));
2266
2267 /* Set the attention towards other drivers
2268 on the same port */
2269 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2270 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2271 }
2272 }
2273
2274 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2275 struct cmng_struct_per_port m_cmng_port;
2276 u32 wsum;
2277 int port = BP_PORT(bp);
2278
2279 /* Init RATE SHAPING and FAIRNESS contexts */
2280 wsum = bnx2x_calc_vn_wsum(bp);
2281 bnx2x_init_port_minmax(bp, (int)wsum,
2282 bp->link_vars.line_speed,
2283 &m_cmng_port);
2284 if (IS_E1HMF(bp))
2285 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2286 bnx2x_init_vn_minmax(bp, 2*vn + port,
2287 wsum, bp->link_vars.line_speed,
2288 &m_cmng_port);
2289 }
c18487ee 2290}
a2fbb9ea 2291
c18487ee
YR
2292static void bnx2x__link_status_update(struct bnx2x *bp)
2293{
2294 if (bp->state != BNX2X_STATE_OPEN)
2295 return;
a2fbb9ea 2296
c18487ee 2297 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2298
bb2a0f7a
YG
2299 if (bp->link_vars.link_up)
2300 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2301 else
2302 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2303
c18487ee
YR
2304 /* indicate link status */
2305 bnx2x_link_report(bp);
a2fbb9ea 2306}
a2fbb9ea 2307
34f80b04
EG
2308static void bnx2x_pmf_update(struct bnx2x *bp)
2309{
2310 int port = BP_PORT(bp);
2311 u32 val;
2312
2313 bp->port.pmf = 1;
2314 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2315
2316 /* enable nig attention */
2317 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2318 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2319 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2320
2321 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2322}
2323
c18487ee 2324/* end of Link */
a2fbb9ea
ET
2325
2326/* slow path */
2327
2328/*
2329 * General service functions
2330 */
2331
2332/* the slow path queue is odd since completions arrive on the fastpath ring */
2333static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2334 u32 data_hi, u32 data_lo, int common)
2335{
34f80b04 2336 int func = BP_FUNC(bp);
a2fbb9ea 2337
34f80b04
EG
2338 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2339 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2340 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2341 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2342 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2343
2344#ifdef BNX2X_STOP_ON_ERROR
2345 if (unlikely(bp->panic))
2346 return -EIO;
2347#endif
2348
34f80b04 2349 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2350
2351 if (!bp->spq_left) {
2352 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2353 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2354 bnx2x_panic();
2355 return -EBUSY;
2356 }
f1410647 2357
a2fbb9ea
ET
2358 /* CID needs port number to be encoded int it */
2359 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2360 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2361 HW_CID(bp, cid)));
2362 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2363 if (common)
2364 bp->spq_prod_bd->hdr.type |=
2365 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2366
2367 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2368 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2369
2370 bp->spq_left--;
2371
2372 if (bp->spq_prod_bd == bp->spq_last_bd) {
2373 bp->spq_prod_bd = bp->spq;
2374 bp->spq_prod_idx = 0;
2375 DP(NETIF_MSG_TIMER, "end of spq\n");
2376
2377 } else {
2378 bp->spq_prod_bd++;
2379 bp->spq_prod_idx++;
2380 }
2381
34f80b04 2382 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2383 bp->spq_prod_idx);
2384
34f80b04 2385 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2386 return 0;
2387}
2388
2389/* acquire split MCP access lock register */
4a37fb66 2390static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2391{
a2fbb9ea 2392 u32 i, j, val;
34f80b04 2393 int rc = 0;
a2fbb9ea
ET
2394
2395 might_sleep();
2396 i = 100;
2397 for (j = 0; j < i*10; j++) {
2398 val = (1UL << 31);
2399 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2400 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2401 if (val & (1L << 31))
2402 break;
2403
2404 msleep(5);
2405 }
a2fbb9ea 2406 if (!(val & (1L << 31))) {
19680c48 2407 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2408 rc = -EBUSY;
2409 }
2410
2411 return rc;
2412}
2413
4a37fb66
YG
2414/* release split MCP access lock register */
2415static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2416{
2417 u32 val = 0;
2418
2419 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2420}
2421
2422static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2423{
2424 struct host_def_status_block *def_sb = bp->def_status_blk;
2425 u16 rc = 0;
2426
2427 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2428 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2429 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2430 rc |= 1;
2431 }
2432 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2433 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2434 rc |= 2;
2435 }
2436 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2437 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2438 rc |= 4;
2439 }
2440 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2441 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2442 rc |= 8;
2443 }
2444 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2445 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2446 rc |= 16;
2447 }
2448 return rc;
2449}
2450
2451/*
2452 * slow path service functions
2453 */
2454
2455static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2456{
34f80b04 2457 int port = BP_PORT(bp);
5c862848
EG
2458 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2459 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2460 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2461 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2462 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2463 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2464 u32 aeu_mask;
a2fbb9ea 2465
a2fbb9ea
ET
2466 if (bp->attn_state & asserted)
2467 BNX2X_ERR("IGU ERROR\n");
2468
3fcaf2e5
EG
2469 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2470 aeu_mask = REG_RD(bp, aeu_addr);
2471
a2fbb9ea 2472 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2473 aeu_mask, asserted);
2474 aeu_mask &= ~(asserted & 0xff);
2475 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2476
3fcaf2e5
EG
2477 REG_WR(bp, aeu_addr, aeu_mask);
2478 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2479
3fcaf2e5 2480 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2481 bp->attn_state |= asserted;
3fcaf2e5 2482 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2483
2484 if (asserted & ATTN_HARD_WIRED_MASK) {
2485 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2486
877e9aa4
ET
2487 /* save nig interrupt mask */
2488 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2489 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2490
c18487ee 2491 bnx2x_link_attn(bp);
a2fbb9ea
ET
2492
2493 /* handle unicore attn? */
2494 }
2495 if (asserted & ATTN_SW_TIMER_4_FUNC)
2496 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2497
2498 if (asserted & GPIO_2_FUNC)
2499 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2500
2501 if (asserted & GPIO_3_FUNC)
2502 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2503
2504 if (asserted & GPIO_4_FUNC)
2505 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2506
2507 if (port == 0) {
2508 if (asserted & ATTN_GENERAL_ATTN_1) {
2509 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2510 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2511 }
2512 if (asserted & ATTN_GENERAL_ATTN_2) {
2513 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2514 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2515 }
2516 if (asserted & ATTN_GENERAL_ATTN_3) {
2517 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2518 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2519 }
2520 } else {
2521 if (asserted & ATTN_GENERAL_ATTN_4) {
2522 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2524 }
2525 if (asserted & ATTN_GENERAL_ATTN_5) {
2526 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2527 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2528 }
2529 if (asserted & ATTN_GENERAL_ATTN_6) {
2530 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2531 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2532 }
2533 }
2534
2535 } /* if hardwired */
2536
5c862848
EG
2537 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2538 asserted, hc_addr);
2539 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2540
2541 /* now set back the mask */
2542 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2543 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2544}
2545
877e9aa4 2546static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2547{
34f80b04 2548 int port = BP_PORT(bp);
877e9aa4
ET
2549 int reg_offset;
2550 u32 val;
2551
34f80b04
EG
2552 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2553 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2554
34f80b04 2555 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2556
2557 val = REG_RD(bp, reg_offset);
2558 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2559 REG_WR(bp, reg_offset, val);
2560
2561 BNX2X_ERR("SPIO5 hw attention\n");
2562
34f80b04 2563 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2564 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2565 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2566 /* Fan failure attention */
2567
17de50b7 2568 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2569 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2570 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2571 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2572 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2573 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2574 /* mark the failure */
c18487ee 2575 bp->link_params.ext_phy_config &=
877e9aa4 2576 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2577 bp->link_params.ext_phy_config |=
877e9aa4
ET
2578 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2579 SHMEM_WR(bp,
2580 dev_info.port_hw_config[port].
2581 external_phy_config,
c18487ee 2582 bp->link_params.ext_phy_config);
877e9aa4
ET
2583 /* log the failure */
2584 printk(KERN_ERR PFX "Fan Failure on Network"
2585 " Controller %s has caused the driver to"
2586 " shutdown the card to prevent permanent"
2587 " damage. Please contact Dell Support for"
2588 " assistance\n", bp->dev->name);
2589 break;
2590
2591 default:
2592 break;
2593 }
2594 }
34f80b04
EG
2595
2596 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2597
2598 val = REG_RD(bp, reg_offset);
2599 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2600 REG_WR(bp, reg_offset, val);
2601
2602 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2603 (attn & HW_INTERRUT_ASSERT_SET_0));
2604 bnx2x_panic();
2605 }
877e9aa4
ET
2606}
2607
2608static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2609{
2610 u32 val;
2611
2612 if (attn & BNX2X_DOORQ_ASSERT) {
2613
2614 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2615 BNX2X_ERR("DB hw attention 0x%x\n", val);
2616 /* DORQ discard attention */
2617 if (val & 0x2)
2618 BNX2X_ERR("FATAL error from DORQ\n");
2619 }
34f80b04
EG
2620
2621 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2622
2623 int port = BP_PORT(bp);
2624 int reg_offset;
2625
2626 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2627 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2628
2629 val = REG_RD(bp, reg_offset);
2630 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2631 REG_WR(bp, reg_offset, val);
2632
2633 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2634 (attn & HW_INTERRUT_ASSERT_SET_1));
2635 bnx2x_panic();
2636 }
877e9aa4
ET
2637}
2638
2639static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2640{
2641 u32 val;
2642
2643 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2644
2645 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2646 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2647 /* CFC error attention */
2648 if (val & 0x2)
2649 BNX2X_ERR("FATAL error from CFC\n");
2650 }
2651
2652 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2653
2654 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2655 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2656 /* RQ_USDMDP_FIFO_OVERFLOW */
2657 if (val & 0x18000)
2658 BNX2X_ERR("FATAL error from PXP\n");
2659 }
34f80b04
EG
2660
2661 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2662
2663 int port = BP_PORT(bp);
2664 int reg_offset;
2665
2666 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2667 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2668
2669 val = REG_RD(bp, reg_offset);
2670 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2671 REG_WR(bp, reg_offset, val);
2672
2673 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2674 (attn & HW_INTERRUT_ASSERT_SET_2));
2675 bnx2x_panic();
2676 }
877e9aa4
ET
2677}
2678
2679static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2680{
34f80b04
EG
2681 u32 val;
2682
877e9aa4
ET
2683 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2684
34f80b04
EG
2685 if (attn & BNX2X_PMF_LINK_ASSERT) {
2686 int func = BP_FUNC(bp);
2687
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2689 bnx2x__link_status_update(bp);
2690 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2691 DRV_STATUS_PMF)
2692 bnx2x_pmf_update(bp);
2693
2694 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2695
2696 BNX2X_ERR("MC assert!\n");
2697 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2698 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2699 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2700 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2701 bnx2x_panic();
2702
2703 } else if (attn & BNX2X_MCP_ASSERT) {
2704
2705 BNX2X_ERR("MCP assert!\n");
2706 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2707 bnx2x_fw_dump(bp);
877e9aa4
ET
2708
2709 } else
2710 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2711 }
2712
2713 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2714 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2715 if (attn & BNX2X_GRC_TIMEOUT) {
2716 val = CHIP_IS_E1H(bp) ?
2717 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2718 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2719 }
2720 if (attn & BNX2X_GRC_RSV) {
2721 val = CHIP_IS_E1H(bp) ?
2722 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2723 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2724 }
877e9aa4 2725 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2726 }
2727}
2728
2729static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2730{
a2fbb9ea
ET
2731 struct attn_route attn;
2732 struct attn_route group_mask;
34f80b04 2733 int port = BP_PORT(bp);
877e9aa4 2734 int index;
a2fbb9ea
ET
2735 u32 reg_addr;
2736 u32 val;
3fcaf2e5 2737 u32 aeu_mask;
a2fbb9ea
ET
2738
2739 /* need to take HW lock because MCP or other port might also
2740 try to handle this event */
4a37fb66 2741 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2742
2743 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2744 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2745 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2746 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2747 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2748 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2749
2750 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2751 if (deasserted & (1 << index)) {
2752 group_mask = bp->attn_group[index];
2753
34f80b04
EG
2754 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2755 index, group_mask.sig[0], group_mask.sig[1],
2756 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2757
877e9aa4
ET
2758 bnx2x_attn_int_deasserted3(bp,
2759 attn.sig[3] & group_mask.sig[3]);
2760 bnx2x_attn_int_deasserted1(bp,
2761 attn.sig[1] & group_mask.sig[1]);
2762 bnx2x_attn_int_deasserted2(bp,
2763 attn.sig[2] & group_mask.sig[2]);
2764 bnx2x_attn_int_deasserted0(bp,
2765 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2766
a2fbb9ea
ET
2767 if ((attn.sig[0] & group_mask.sig[0] &
2768 HW_PRTY_ASSERT_SET_0) ||
2769 (attn.sig[1] & group_mask.sig[1] &
2770 HW_PRTY_ASSERT_SET_1) ||
2771 (attn.sig[2] & group_mask.sig[2] &
2772 HW_PRTY_ASSERT_SET_2))
6378c025 2773 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2774 }
2775 }
2776
4a37fb66 2777 bnx2x_release_alr(bp);
a2fbb9ea 2778
5c862848 2779 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2780
2781 val = ~deasserted;
3fcaf2e5
EG
2782 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2783 val, reg_addr);
5c862848 2784 REG_WR(bp, reg_addr, val);
a2fbb9ea 2785
a2fbb9ea 2786 if (~bp->attn_state & deasserted)
3fcaf2e5 2787 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2788
2789 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2790 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2791
3fcaf2e5
EG
2792 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2793 aeu_mask = REG_RD(bp, reg_addr);
2794
2795 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2796 aeu_mask, deasserted);
2797 aeu_mask |= (deasserted & 0xff);
2798 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2799
3fcaf2e5
EG
2800 REG_WR(bp, reg_addr, aeu_mask);
2801 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2802
2803 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2804 bp->attn_state &= ~deasserted;
2805 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2806}
2807
2808static void bnx2x_attn_int(struct bnx2x *bp)
2809{
2810 /* read local copy of bits */
2811 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2812 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2813 u32 attn_state = bp->attn_state;
2814
2815 /* look for changed bits */
2816 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2817 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2818
2819 DP(NETIF_MSG_HW,
2820 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2821 attn_bits, attn_ack, asserted, deasserted);
2822
2823 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2824 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2825
2826 /* handle bits that were raised */
2827 if (asserted)
2828 bnx2x_attn_int_asserted(bp, asserted);
2829
2830 if (deasserted)
2831 bnx2x_attn_int_deasserted(bp, deasserted);
2832}
2833
2834static void bnx2x_sp_task(struct work_struct *work)
2835{
1cf167f2 2836 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2837 u16 status;
2838
34f80b04 2839
a2fbb9ea
ET
2840 /* Return here if interrupt is disabled */
2841 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2842 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2843 return;
2844 }
2845
2846 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2847/* if (status == 0) */
2848/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2849
3196a88a 2850 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2851
877e9aa4
ET
2852 /* HW attentions */
2853 if (status & 0x1)
a2fbb9ea 2854 bnx2x_attn_int(bp);
a2fbb9ea 2855
bb2a0f7a
YG
2856 /* CStorm events: query_stats, port delete ramrod */
2857 if (status & 0x2)
2858 bp->stats_pending = 0;
2859
a2fbb9ea
ET
2860 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2861 IGU_INT_NOP, 1);
2862 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2863 IGU_INT_NOP, 1);
2864 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2865 IGU_INT_NOP, 1);
2866 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2867 IGU_INT_NOP, 1);
2868 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2869 IGU_INT_ENABLE, 1);
877e9aa4 2870
a2fbb9ea
ET
2871}
2872
2873static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2874{
2875 struct net_device *dev = dev_instance;
2876 struct bnx2x *bp = netdev_priv(dev);
2877
2878 /* Return here if interrupt is disabled */
2879 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2880 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2881 return IRQ_HANDLED;
2882 }
2883
877e9aa4 2884 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2885
2886#ifdef BNX2X_STOP_ON_ERROR
2887 if (unlikely(bp->panic))
2888 return IRQ_HANDLED;
2889#endif
2890
1cf167f2 2891 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2892
2893 return IRQ_HANDLED;
2894}
2895
2896/* end of slow path */
2897
2898/* Statistics */
2899
2900/****************************************************************************
2901* Macros
2902****************************************************************************/
2903
a2fbb9ea
ET
2904/* sum[hi:lo] += add[hi:lo] */
2905#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2906 do { \
2907 s_lo += a_lo; \
2908 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2909 } while (0)
2910
2911/* difference = minuend - subtrahend */
2912#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2913 do { \
bb2a0f7a
YG
2914 if (m_lo < s_lo) { \
2915 /* underflow */ \
a2fbb9ea 2916 d_hi = m_hi - s_hi; \
bb2a0f7a 2917 if (d_hi > 0) { \
6378c025 2918 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2919 d_hi--; \
2920 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2921 } else { \
6378c025 2922 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2923 d_hi = 0; \
2924 d_lo = 0; \
2925 } \
bb2a0f7a
YG
2926 } else { \
2927 /* m_lo >= s_lo */ \
a2fbb9ea 2928 if (m_hi < s_hi) { \
bb2a0f7a
YG
2929 d_hi = 0; \
2930 d_lo = 0; \
2931 } else { \
6378c025 2932 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2933 d_hi = m_hi - s_hi; \
2934 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2935 } \
2936 } \
2937 } while (0)
2938
bb2a0f7a 2939#define UPDATE_STAT64(s, t) \
a2fbb9ea 2940 do { \
bb2a0f7a
YG
2941 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2942 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2943 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2944 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2945 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2946 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2947 } while (0)
2948
bb2a0f7a 2949#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2950 do { \
bb2a0f7a
YG
2951 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2952 diff.lo, new->s##_lo, old->s##_lo); \
2953 ADD_64(estats->t##_hi, diff.hi, \
2954 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2955 } while (0)
2956
2957/* sum[hi:lo] += add */
2958#define ADD_EXTEND_64(s_hi, s_lo, a) \
2959 do { \
2960 s_lo += a; \
2961 s_hi += (s_lo < a) ? 1 : 0; \
2962 } while (0)
2963
bb2a0f7a 2964#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2965 do { \
bb2a0f7a
YG
2966 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2967 pstats->mac_stx[1].s##_lo, \
2968 new->s); \
a2fbb9ea
ET
2969 } while (0)
2970
bb2a0f7a 2971#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2972 do { \
2973 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2974 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2975 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2976 } while (0)
2977
2978#define UPDATE_EXTEND_XSTAT(s, t) \
2979 do { \
2980 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2981 old_xclient->s = le32_to_cpu(xclient->s); \
2982 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2983 } while (0)
2984
2985/*
2986 * General service functions
2987 */
2988
2989static inline long bnx2x_hilo(u32 *hiref)
2990{
2991 u32 lo = *(hiref + 1);
2992#if (BITS_PER_LONG == 64)
2993 u32 hi = *hiref;
2994
2995 return HILO_U64(hi, lo);
2996#else
2997 return lo;
2998#endif
2999}
3000
3001/*
3002 * Init service functions
3003 */
3004
bb2a0f7a
YG
3005static void bnx2x_storm_stats_post(struct bnx2x *bp)
3006{
3007 if (!bp->stats_pending) {
3008 struct eth_query_ramrod_data ramrod_data = {0};
3009 int rc;
3010
3011 ramrod_data.drv_counter = bp->stats_counter++;
3012 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3013 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3014
3015 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3016 ((u32 *)&ramrod_data)[1],
3017 ((u32 *)&ramrod_data)[0], 0);
3018 if (rc == 0) {
3019 /* stats ramrod has it's own slot on the spq */
3020 bp->spq_left++;
3021 bp->stats_pending = 1;
3022 }
3023 }
3024}
3025
3026static void bnx2x_stats_init(struct bnx2x *bp)
3027{
3028 int port = BP_PORT(bp);
3029
3030 bp->executer_idx = 0;
3031 bp->stats_counter = 0;
3032
3033 /* port stats */
3034 if (!BP_NOMCP(bp))
3035 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3036 else
3037 bp->port.port_stx = 0;
3038 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3039
3040 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3041 bp->port.old_nig_stats.brb_discard =
3042 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3043 bp->port.old_nig_stats.brb_truncate =
3044 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3045 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3046 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3047 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3048 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3049
3050 /* function stats */
3051 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3052 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3053 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3054 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3055
3056 bp->stats_state = STATS_STATE_DISABLED;
3057 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3058 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3059}
3060
3061static void bnx2x_hw_stats_post(struct bnx2x *bp)
3062{
3063 struct dmae_command *dmae = &bp->stats_dmae;
3064 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3065
3066 *stats_comp = DMAE_COMP_VAL;
3067
3068 /* loader */
3069 if (bp->executer_idx) {
3070 int loader_idx = PMF_DMAE_C(bp);
3071
3072 memset(dmae, 0, sizeof(struct dmae_command));
3073
3074 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3075 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3076 DMAE_CMD_DST_RESET |
3077#ifdef __BIG_ENDIAN
3078 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3079#else
3080 DMAE_CMD_ENDIANITY_DW_SWAP |
3081#endif
3082 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3083 DMAE_CMD_PORT_0) |
3084 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3085 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3086 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3087 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3088 sizeof(struct dmae_command) *
3089 (loader_idx + 1)) >> 2;
3090 dmae->dst_addr_hi = 0;
3091 dmae->len = sizeof(struct dmae_command) >> 2;
3092 if (CHIP_IS_E1(bp))
3093 dmae->len--;
3094 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3095 dmae->comp_addr_hi = 0;
3096 dmae->comp_val = 1;
3097
3098 *stats_comp = 0;
3099 bnx2x_post_dmae(bp, dmae, loader_idx);
3100
3101 } else if (bp->func_stx) {
3102 *stats_comp = 0;
3103 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3104 }
3105}
3106
3107static int bnx2x_stats_comp(struct bnx2x *bp)
3108{
3109 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3110 int cnt = 10;
3111
3112 might_sleep();
3113 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3114 if (!cnt) {
3115 BNX2X_ERR("timeout waiting for stats finished\n");
3116 break;
3117 }
3118 cnt--;
12469401 3119 msleep(1);
bb2a0f7a
YG
3120 }
3121 return 1;
3122}
3123
3124/*
3125 * Statistics service functions
3126 */
3127
3128static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3129{
3130 struct dmae_command *dmae;
3131 u32 opcode;
3132 int loader_idx = PMF_DMAE_C(bp);
3133 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3134
3135 /* sanity */
3136 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3137 BNX2X_ERR("BUG!\n");
3138 return;
3139 }
3140
3141 bp->executer_idx = 0;
3142
3143 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3144 DMAE_CMD_C_ENABLE |
3145 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3146#ifdef __BIG_ENDIAN
3147 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3148#else
3149 DMAE_CMD_ENDIANITY_DW_SWAP |
3150#endif
3151 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3152 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3153
3154 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3155 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3156 dmae->src_addr_lo = bp->port.port_stx >> 2;
3157 dmae->src_addr_hi = 0;
3158 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3159 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3160 dmae->len = DMAE_LEN32_RD_MAX;
3161 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3162 dmae->comp_addr_hi = 0;
3163 dmae->comp_val = 1;
3164
3165 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3166 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3167 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3168 dmae->src_addr_hi = 0;
7a9b2557
VZ
3169 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3170 DMAE_LEN32_RD_MAX * 4);
3171 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3172 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3173 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3174 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3175 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3176 dmae->comp_val = DMAE_COMP_VAL;
3177
3178 *stats_comp = 0;
3179 bnx2x_hw_stats_post(bp);
3180 bnx2x_stats_comp(bp);
3181}
3182
3183static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3184{
3185 struct dmae_command *dmae;
34f80b04 3186 int port = BP_PORT(bp);
bb2a0f7a 3187 int vn = BP_E1HVN(bp);
a2fbb9ea 3188 u32 opcode;
bb2a0f7a 3189 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3190 u32 mac_addr;
bb2a0f7a
YG
3191 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3192
3193 /* sanity */
3194 if (!bp->link_vars.link_up || !bp->port.pmf) {
3195 BNX2X_ERR("BUG!\n");
3196 return;
3197 }
a2fbb9ea
ET
3198
3199 bp->executer_idx = 0;
bb2a0f7a
YG
3200
3201 /* MCP */
3202 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3203 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3205#ifdef __BIG_ENDIAN
bb2a0f7a 3206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3207#else
bb2a0f7a 3208 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3209#endif
bb2a0f7a
YG
3210 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3211 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3212
bb2a0f7a 3213 if (bp->port.port_stx) {
a2fbb9ea
ET
3214
3215 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3216 dmae->opcode = opcode;
bb2a0f7a
YG
3217 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3218 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3219 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3220 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3221 dmae->len = sizeof(struct host_port_stats) >> 2;
3222 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3223 dmae->comp_addr_hi = 0;
3224 dmae->comp_val = 1;
a2fbb9ea
ET
3225 }
3226
bb2a0f7a
YG
3227 if (bp->func_stx) {
3228
3229 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3230 dmae->opcode = opcode;
3231 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3232 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3233 dmae->dst_addr_lo = bp->func_stx >> 2;
3234 dmae->dst_addr_hi = 0;
3235 dmae->len = sizeof(struct host_func_stats) >> 2;
3236 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3237 dmae->comp_addr_hi = 0;
3238 dmae->comp_val = 1;
a2fbb9ea
ET
3239 }
3240
bb2a0f7a 3241 /* MAC */
a2fbb9ea
ET
3242 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3243 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3244 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3245#ifdef __BIG_ENDIAN
3246 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3247#else
3248 DMAE_CMD_ENDIANITY_DW_SWAP |
3249#endif
bb2a0f7a
YG
3250 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3251 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3252
c18487ee 3253 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3254
3255 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3256 NIG_REG_INGRESS_BMAC0_MEM);
3257
3258 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3259 BIGMAC_REGISTER_TX_STAT_GTBYT */
3260 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3261 dmae->opcode = opcode;
3262 dmae->src_addr_lo = (mac_addr +
3263 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3264 dmae->src_addr_hi = 0;
3265 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3266 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3267 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3268 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3270 dmae->comp_addr_hi = 0;
3271 dmae->comp_val = 1;
3272
3273 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3274 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3275 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276 dmae->opcode = opcode;
3277 dmae->src_addr_lo = (mac_addr +
3278 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3279 dmae->src_addr_hi = 0;
3280 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3281 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3282 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3283 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3284 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3285 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3286 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3287 dmae->comp_addr_hi = 0;
3288 dmae->comp_val = 1;
3289
c18487ee 3290 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3291
3292 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3293
3294 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3295 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296 dmae->opcode = opcode;
3297 dmae->src_addr_lo = (mac_addr +
3298 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3299 dmae->src_addr_hi = 0;
3300 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3301 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3302 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3303 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3304 dmae->comp_addr_hi = 0;
3305 dmae->comp_val = 1;
3306
3307 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3309 dmae->opcode = opcode;
3310 dmae->src_addr_lo = (mac_addr +
3311 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3312 dmae->src_addr_hi = 0;
3313 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3314 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3315 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3316 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3317 dmae->len = 1;
3318 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319 dmae->comp_addr_hi = 0;
3320 dmae->comp_val = 1;
3321
3322 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3323 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324 dmae->opcode = opcode;
3325 dmae->src_addr_lo = (mac_addr +
3326 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3327 dmae->src_addr_hi = 0;
3328 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3329 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3330 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3331 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3332 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3333 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334 dmae->comp_addr_hi = 0;
3335 dmae->comp_val = 1;
3336 }
3337
3338 /* NIG */
bb2a0f7a
YG
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3342 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3346 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3347 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3348 dmae->comp_addr_hi = 0;
3349 dmae->comp_val = 1;
3350
3351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3352 dmae->opcode = opcode;
3353 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3354 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3355 dmae->src_addr_hi = 0;
3356 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3357 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3358 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3359 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3360 dmae->len = (2*sizeof(u32)) >> 2;
3361 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362 dmae->comp_addr_hi = 0;
3363 dmae->comp_val = 1;
3364
a2fbb9ea
ET
3365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3367 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3368 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3369#ifdef __BIG_ENDIAN
3370 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3371#else
3372 DMAE_CMD_ENDIANITY_DW_SWAP |
3373#endif
bb2a0f7a
YG
3374 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3375 (vn << DMAE_CMD_E1HVN_SHIFT));
3376 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3377 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3378 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3379 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3380 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3381 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3382 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3383 dmae->len = (2*sizeof(u32)) >> 2;
3384 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3385 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3386 dmae->comp_val = DMAE_COMP_VAL;
3387
3388 *stats_comp = 0;
a2fbb9ea
ET
3389}
3390
bb2a0f7a 3391static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3392{
bb2a0f7a
YG
3393 struct dmae_command *dmae = &bp->stats_dmae;
3394 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3395
bb2a0f7a
YG
3396 /* sanity */
3397 if (!bp->func_stx) {
3398 BNX2X_ERR("BUG!\n");
3399 return;
3400 }
a2fbb9ea 3401
bb2a0f7a
YG
3402 bp->executer_idx = 0;
3403 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3404
bb2a0f7a
YG
3405 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3406 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3407 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3408#ifdef __BIG_ENDIAN
3409 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3410#else
3411 DMAE_CMD_ENDIANITY_DW_SWAP |
3412#endif
3413 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3414 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3415 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3416 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3417 dmae->dst_addr_lo = bp->func_stx >> 2;
3418 dmae->dst_addr_hi = 0;
3419 dmae->len = sizeof(struct host_func_stats) >> 2;
3420 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3421 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3422 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3423
bb2a0f7a
YG
3424 *stats_comp = 0;
3425}
a2fbb9ea 3426
bb2a0f7a
YG
3427static void bnx2x_stats_start(struct bnx2x *bp)
3428{
3429 if (bp->port.pmf)
3430 bnx2x_port_stats_init(bp);
3431
3432 else if (bp->func_stx)
3433 bnx2x_func_stats_init(bp);
3434
3435 bnx2x_hw_stats_post(bp);
3436 bnx2x_storm_stats_post(bp);
3437}
3438
3439static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3440{
3441 bnx2x_stats_comp(bp);
3442 bnx2x_stats_pmf_update(bp);
3443 bnx2x_stats_start(bp);
3444}
3445
3446static void bnx2x_stats_restart(struct bnx2x *bp)
3447{
3448 bnx2x_stats_comp(bp);
3449 bnx2x_stats_start(bp);
3450}
3451
3452static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3453{
3454 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3455 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3456 struct regpair diff;
3457
3458 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3459 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3460 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3461 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3462 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3463 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3464 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3465 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3466 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3467 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3468 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3469 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3470 UPDATE_STAT64(tx_stat_gt127,
3471 tx_stat_etherstatspkts65octetsto127octets);
3472 UPDATE_STAT64(tx_stat_gt255,
3473 tx_stat_etherstatspkts128octetsto255octets);
3474 UPDATE_STAT64(tx_stat_gt511,
3475 tx_stat_etherstatspkts256octetsto511octets);
3476 UPDATE_STAT64(tx_stat_gt1023,
3477 tx_stat_etherstatspkts512octetsto1023octets);
3478 UPDATE_STAT64(tx_stat_gt1518,
3479 tx_stat_etherstatspkts1024octetsto1522octets);
3480 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3481 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3482 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3483 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3484 UPDATE_STAT64(tx_stat_gterr,
3485 tx_stat_dot3statsinternalmactransmiterrors);
3486 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3487}
3488
3489static void bnx2x_emac_stats_update(struct bnx2x *bp)
3490{
3491 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3492 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3493
3494 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3495 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3496 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3497 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3498 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3499 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3500 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3501 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3502 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3503 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3504 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3505 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3506 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3507 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3508 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3509 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3510 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3516 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3517 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3518 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3519 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3520 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3523 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3524 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3525}
3526
3527static int bnx2x_hw_stats_update(struct bnx2x *bp)
3528{
3529 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3530 struct nig_stats *old = &(bp->port.old_nig_stats);
3531 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3532 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3533 struct regpair diff;
3534
3535 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3536 bnx2x_bmac_stats_update(bp);
3537
3538 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3539 bnx2x_emac_stats_update(bp);
3540
3541 else { /* unreached */
3542 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3543 return -1;
3544 }
a2fbb9ea 3545
bb2a0f7a
YG
3546 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3547 new->brb_discard - old->brb_discard);
66e855f3
YG
3548 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3549 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3550
bb2a0f7a
YG
3551 UPDATE_STAT64_NIG(egress_mac_pkt0,
3552 etherstatspkts1024octetsto1522octets);
3553 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3554
bb2a0f7a 3555 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3556
bb2a0f7a
YG
3557 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3558 sizeof(struct mac_stx));
3559 estats->brb_drop_hi = pstats->brb_drop_hi;
3560 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3561
bb2a0f7a 3562 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3563
bb2a0f7a 3564 return 0;
a2fbb9ea
ET
3565}
3566
bb2a0f7a 3567static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3568{
3569 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3570 int cl_id = BP_CL_ID(bp);
3571 struct tstorm_per_port_stats *tport =
3572 &stats->tstorm_common.port_statistics;
a2fbb9ea 3573 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3574 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3575 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3576 struct xstorm_per_client_stats *xclient =
3577 &stats->xstorm_common.client_statistics[cl_id];
3578 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3579 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3580 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3581 u32 diff;
3582
bb2a0f7a
YG
3583 /* are storm stats valid? */
3584 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3585 bp->stats_counter) {
3586 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3587 " tstorm counter (%d) != stats_counter (%d)\n",
3588 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3589 return -1;
3590 }
bb2a0f7a
YG
3591 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3592 bp->stats_counter) {
3593 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3594 " xstorm counter (%d) != stats_counter (%d)\n",
3595 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3596 return -2;
3597 }
a2fbb9ea 3598
bb2a0f7a
YG
3599 fstats->total_bytes_received_hi =
3600 fstats->valid_bytes_received_hi =
a2fbb9ea 3601 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3602 fstats->total_bytes_received_lo =
3603 fstats->valid_bytes_received_lo =
a2fbb9ea 3604 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3605
3606 estats->error_bytes_received_hi =
3607 le32_to_cpu(tclient->rcv_error_bytes.hi);
3608 estats->error_bytes_received_lo =
3609 le32_to_cpu(tclient->rcv_error_bytes.lo);
3610 ADD_64(estats->error_bytes_received_hi,
3611 estats->rx_stat_ifhcinbadoctets_hi,
3612 estats->error_bytes_received_lo,
3613 estats->rx_stat_ifhcinbadoctets_lo);
3614
3615 ADD_64(fstats->total_bytes_received_hi,
3616 estats->error_bytes_received_hi,
3617 fstats->total_bytes_received_lo,
3618 estats->error_bytes_received_lo);
3619
3620 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3621 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3622 total_multicast_packets_received);
a2fbb9ea 3623 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3624 total_broadcast_packets_received);
3625
3626 fstats->total_bytes_transmitted_hi =
3627 le32_to_cpu(xclient->total_sent_bytes.hi);
3628 fstats->total_bytes_transmitted_lo =
3629 le32_to_cpu(xclient->total_sent_bytes.lo);
3630
3631 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3632 total_unicast_packets_transmitted);
3633 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3634 total_multicast_packets_transmitted);
3635 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3636 total_broadcast_packets_transmitted);
3637
3638 memcpy(estats, &(fstats->total_bytes_received_hi),
3639 sizeof(struct host_func_stats) - 2*sizeof(u32));
3640
3641 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3642 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3643 estats->brb_truncate_discard =
3644 le32_to_cpu(tport->brb_truncate_discard);
3645 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3646
3647 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3648 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3649 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3650 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3651 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3652 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3653 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3654 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3655 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3656 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3657 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3658 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3659 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3660
bb2a0f7a
YG
3661 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3662 old_tclient->packets_too_big_discard =
a2fbb9ea 3663 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3664 estats->no_buff_discard =
3665 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3666 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3667
3668 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3669 old_xclient->unicast_bytes_sent.hi =
3670 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3671 old_xclient->unicast_bytes_sent.lo =
3672 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3673 old_xclient->multicast_bytes_sent.hi =
3674 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3675 old_xclient->multicast_bytes_sent.lo =
3676 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3677 old_xclient->broadcast_bytes_sent.hi =
3678 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3679 old_xclient->broadcast_bytes_sent.lo =
3680 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3681
3682 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3683
3684 return 0;
3685}
3686
bb2a0f7a 3687static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3688{
bb2a0f7a
YG
3689 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3690 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3691 struct net_device_stats *nstats = &bp->dev->stats;
3692
3693 nstats->rx_packets =
3694 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3695 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3696 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3697
3698 nstats->tx_packets =
3699 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3700 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3701 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3702
bb2a0f7a 3703 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3704
0e39e645 3705 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3706
bb2a0f7a
YG
3707 nstats->rx_dropped = old_tclient->checksum_discard +
3708 estats->mac_discard;
a2fbb9ea
ET
3709 nstats->tx_dropped = 0;
3710
3711 nstats->multicast =
3712 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3713
bb2a0f7a
YG
3714 nstats->collisions =
3715 estats->tx_stat_dot3statssinglecollisionframes_lo +
3716 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3717 estats->tx_stat_dot3statslatecollisions_lo +
3718 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3719
bb2a0f7a
YG
3720 estats->jabber_packets_received =
3721 old_tclient->packets_too_big_discard +
3722 estats->rx_stat_dot3statsframestoolong_lo;
3723
3724 nstats->rx_length_errors =
3725 estats->rx_stat_etherstatsundersizepkts_lo +
3726 estats->jabber_packets_received;
66e855f3 3727 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3728 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3729 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3730 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3731 nstats->rx_missed_errors = estats->xxoverflow_discard;
3732
3733 nstats->rx_errors = nstats->rx_length_errors +
3734 nstats->rx_over_errors +
3735 nstats->rx_crc_errors +
3736 nstats->rx_frame_errors +
0e39e645
ET
3737 nstats->rx_fifo_errors +
3738 nstats->rx_missed_errors;
a2fbb9ea 3739
bb2a0f7a
YG
3740 nstats->tx_aborted_errors =
3741 estats->tx_stat_dot3statslatecollisions_lo +
3742 estats->tx_stat_dot3statsexcessivecollisions_lo;
3743 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3744 nstats->tx_fifo_errors = 0;
3745 nstats->tx_heartbeat_errors = 0;
3746 nstats->tx_window_errors = 0;
3747
3748 nstats->tx_errors = nstats->tx_aborted_errors +
3749 nstats->tx_carrier_errors;
a2fbb9ea
ET
3750}
3751
bb2a0f7a 3752static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3753{
bb2a0f7a
YG
3754 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3755 int update = 0;
a2fbb9ea 3756
bb2a0f7a
YG
3757 if (*stats_comp != DMAE_COMP_VAL)
3758 return;
3759
3760 if (bp->port.pmf)
3761 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3762
bb2a0f7a 3763 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3764
bb2a0f7a
YG
3765 if (update)
3766 bnx2x_net_stats_update(bp);
a2fbb9ea 3767
bb2a0f7a
YG
3768 else {
3769 if (bp->stats_pending) {
3770 bp->stats_pending++;
3771 if (bp->stats_pending == 3) {
3772 BNX2X_ERR("stats not updated for 3 times\n");
3773 bnx2x_panic();
3774 return;
3775 }
3776 }
a2fbb9ea
ET
3777 }
3778
3779 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3780 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3781 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3782 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3783 int i;
a2fbb9ea
ET
3784
3785 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3786 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3787 " tx pkt (%lx)\n",
3788 bnx2x_tx_avail(bp->fp),
7a9b2557 3789 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3790 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3791 " rx pkt (%lx)\n",
7a9b2557
VZ
3792 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3793 bp->fp->rx_comp_cons),
3794 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3795 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3796 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3797 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3798 printk(KERN_DEBUG "tstats: checksum_discard %u "
3799 "packets_too_big_discard %u no_buff_discard %u "
3800 "mac_discard %u mac_filter_discard %u "
3801 "xxovrflow_discard %u brb_truncate_discard %u "
3802 "ttl0_discard %u\n",
bb2a0f7a
YG
3803 old_tclient->checksum_discard,
3804 old_tclient->packets_too_big_discard,
3805 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3806 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3807 estats->brb_truncate_discard,
3808 old_tclient->ttl0_discard);
a2fbb9ea
ET
3809
3810 for_each_queue(bp, i) {
3811 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3812 bnx2x_fp(bp, i, tx_pkt),
3813 bnx2x_fp(bp, i, rx_pkt),
3814 bnx2x_fp(bp, i, rx_calls));
3815 }
3816 }
3817
bb2a0f7a
YG
3818 bnx2x_hw_stats_post(bp);
3819 bnx2x_storm_stats_post(bp);
3820}
a2fbb9ea 3821
bb2a0f7a
YG
3822static void bnx2x_port_stats_stop(struct bnx2x *bp)
3823{
3824 struct dmae_command *dmae;
3825 u32 opcode;
3826 int loader_idx = PMF_DMAE_C(bp);
3827 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3828
bb2a0f7a 3829 bp->executer_idx = 0;
a2fbb9ea 3830
bb2a0f7a
YG
3831 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3832 DMAE_CMD_C_ENABLE |
3833 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3834#ifdef __BIG_ENDIAN
bb2a0f7a 3835 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3836#else
bb2a0f7a 3837 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3838#endif
bb2a0f7a
YG
3839 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3840 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3841
3842 if (bp->port.port_stx) {
3843
3844 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3845 if (bp->func_stx)
3846 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3847 else
3848 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3849 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3850 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3851 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3852 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3853 dmae->len = sizeof(struct host_port_stats) >> 2;
3854 if (bp->func_stx) {
3855 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3856 dmae->comp_addr_hi = 0;
3857 dmae->comp_val = 1;
3858 } else {
3859 dmae->comp_addr_lo =
3860 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3861 dmae->comp_addr_hi =
3862 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3863 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3864
bb2a0f7a
YG
3865 *stats_comp = 0;
3866 }
a2fbb9ea
ET
3867 }
3868
bb2a0f7a
YG
3869 if (bp->func_stx) {
3870
3871 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3872 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3873 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3874 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3875 dmae->dst_addr_lo = bp->func_stx >> 2;
3876 dmae->dst_addr_hi = 0;
3877 dmae->len = sizeof(struct host_func_stats) >> 2;
3878 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3879 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3880 dmae->comp_val = DMAE_COMP_VAL;
3881
3882 *stats_comp = 0;
a2fbb9ea 3883 }
bb2a0f7a
YG
3884}
3885
3886static void bnx2x_stats_stop(struct bnx2x *bp)
3887{
3888 int update = 0;
3889
3890 bnx2x_stats_comp(bp);
3891
3892 if (bp->port.pmf)
3893 update = (bnx2x_hw_stats_update(bp) == 0);
3894
3895 update |= (bnx2x_storm_stats_update(bp) == 0);
3896
3897 if (update) {
3898 bnx2x_net_stats_update(bp);
a2fbb9ea 3899
bb2a0f7a
YG
3900 if (bp->port.pmf)
3901 bnx2x_port_stats_stop(bp);
3902
3903 bnx2x_hw_stats_post(bp);
3904 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3905 }
3906}
3907
bb2a0f7a
YG
3908static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3909{
3910}
3911
3912static const struct {
3913 void (*action)(struct bnx2x *bp);
3914 enum bnx2x_stats_state next_state;
3915} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3916/* state event */
3917{
3918/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3919/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3920/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3921/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3922},
3923{
3924/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3925/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3926/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3927/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3928}
3929};
3930
3931static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3932{
3933 enum bnx2x_stats_state state = bp->stats_state;
3934
3935 bnx2x_stats_stm[state][event].action(bp);
3936 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3937
3938 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3939 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3940 state, event, bp->stats_state);
3941}
3942
a2fbb9ea
ET
3943static void bnx2x_timer(unsigned long data)
3944{
3945 struct bnx2x *bp = (struct bnx2x *) data;
3946
3947 if (!netif_running(bp->dev))
3948 return;
3949
3950 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3951 goto timer_restart;
a2fbb9ea
ET
3952
3953 if (poll) {
3954 struct bnx2x_fastpath *fp = &bp->fp[0];
3955 int rc;
3956
3957 bnx2x_tx_int(fp, 1000);
3958 rc = bnx2x_rx_int(fp, 1000);
3959 }
3960
34f80b04
EG
3961 if (!BP_NOMCP(bp)) {
3962 int func = BP_FUNC(bp);
a2fbb9ea
ET
3963 u32 drv_pulse;
3964 u32 mcp_pulse;
3965
3966 ++bp->fw_drv_pulse_wr_seq;
3967 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3968 /* TBD - add SYSTEM_TIME */
3969 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3970 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3971
34f80b04 3972 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3973 MCP_PULSE_SEQ_MASK);
3974 /* The delta between driver pulse and mcp response
3975 * should be 1 (before mcp response) or 0 (after mcp response)
3976 */
3977 if ((drv_pulse != mcp_pulse) &&
3978 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3979 /* someone lost a heartbeat... */
3980 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3981 drv_pulse, mcp_pulse);
3982 }
3983 }
3984
bb2a0f7a
YG
3985 if ((bp->state == BNX2X_STATE_OPEN) ||
3986 (bp->state == BNX2X_STATE_DISABLED))
3987 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3988
f1410647 3989timer_restart:
a2fbb9ea
ET
3990 mod_timer(&bp->timer, jiffies + bp->current_interval);
3991}
3992
3993/* end of Statistics */
3994
3995/* nic init */
3996
3997/*
3998 * nic init service functions
3999 */
4000
34f80b04 4001static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4002{
34f80b04
EG
4003 int port = BP_PORT(bp);
4004
4005 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4006 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4007 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4008 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4009 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4010 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4011}
4012
5c862848
EG
4013static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4014 dma_addr_t mapping, int sb_id)
34f80b04
EG
4015{
4016 int port = BP_PORT(bp);
bb2a0f7a 4017 int func = BP_FUNC(bp);
a2fbb9ea 4018 int index;
34f80b04 4019 u64 section;
a2fbb9ea
ET
4020
4021 /* USTORM */
4022 section = ((u64)mapping) + offsetof(struct host_status_block,
4023 u_status_block);
34f80b04 4024 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4025
4026 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4027 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4028 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4029 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4030 U64_HI(section));
bb2a0f7a
YG
4031 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4032 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4033
4034 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4035 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4036 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4037
4038 /* CSTORM */
4039 section = ((u64)mapping) + offsetof(struct host_status_block,
4040 c_status_block);
34f80b04 4041 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4042
4043 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4044 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4045 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4046 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4047 U64_HI(section));
7a9b2557
VZ
4048 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4049 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4050
4051 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4052 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4053 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4054
4055 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4056}
4057
4058static void bnx2x_zero_def_sb(struct bnx2x *bp)
4059{
4060 int func = BP_FUNC(bp);
a2fbb9ea 4061
34f80b04
EG
4062 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4063 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4064 sizeof(struct ustorm_def_status_block)/4);
4065 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4066 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4067 sizeof(struct cstorm_def_status_block)/4);
4068 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4069 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4070 sizeof(struct xstorm_def_status_block)/4);
4071 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4072 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4073 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4074}
4075
4076static void bnx2x_init_def_sb(struct bnx2x *bp,
4077 struct host_def_status_block *def_sb,
34f80b04 4078 dma_addr_t mapping, int sb_id)
a2fbb9ea 4079{
34f80b04
EG
4080 int port = BP_PORT(bp);
4081 int func = BP_FUNC(bp);
a2fbb9ea
ET
4082 int index, val, reg_offset;
4083 u64 section;
4084
4085 /* ATTN */
4086 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4087 atten_status_block);
34f80b04 4088 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4089
49d66772
ET
4090 bp->attn_state = 0;
4091
a2fbb9ea
ET
4092 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4093 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4094
34f80b04 4095 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4096 bp->attn_group[index].sig[0] = REG_RD(bp,
4097 reg_offset + 0x10*index);
4098 bp->attn_group[index].sig[1] = REG_RD(bp,
4099 reg_offset + 0x4 + 0x10*index);
4100 bp->attn_group[index].sig[2] = REG_RD(bp,
4101 reg_offset + 0x8 + 0x10*index);
4102 bp->attn_group[index].sig[3] = REG_RD(bp,
4103 reg_offset + 0xc + 0x10*index);
4104 }
4105
a2fbb9ea
ET
4106 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4107 HC_REG_ATTN_MSG0_ADDR_L);
4108
4109 REG_WR(bp, reg_offset, U64_LO(section));
4110 REG_WR(bp, reg_offset + 4, U64_HI(section));
4111
4112 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4113
4114 val = REG_RD(bp, reg_offset);
34f80b04 4115 val |= sb_id;
a2fbb9ea
ET
4116 REG_WR(bp, reg_offset, val);
4117
4118 /* USTORM */
4119 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4120 u_def_status_block);
34f80b04 4121 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4122
4123 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4124 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4125 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4126 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4127 U64_HI(section));
5c862848 4128 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4129 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4130
4131 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4132 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4133 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4134
4135 /* CSTORM */
4136 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4137 c_def_status_block);
34f80b04 4138 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4139
4140 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4142 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4144 U64_HI(section));
5c862848 4145 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4147
4148 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4149 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4150 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4151
4152 /* TSTORM */
4153 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4154 t_def_status_block);
34f80b04 4155 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4156
4157 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4158 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4159 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4160 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4161 U64_HI(section));
5c862848 4162 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4163 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4164
4165 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4166 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4167 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4168
4169 /* XSTORM */
4170 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4171 x_def_status_block);
34f80b04 4172 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4173
4174 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4175 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4176 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4177 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4178 U64_HI(section));
5c862848 4179 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4180 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4181
4182 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4183 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4184 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4185
bb2a0f7a 4186 bp->stats_pending = 0;
66e855f3 4187 bp->set_mac_pending = 0;
bb2a0f7a 4188
34f80b04 4189 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4190}
4191
4192static void bnx2x_update_coalesce(struct bnx2x *bp)
4193{
34f80b04 4194 int port = BP_PORT(bp);
a2fbb9ea
ET
4195 int i;
4196
4197 for_each_queue(bp, i) {
34f80b04 4198 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4199
4200 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4201 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4202 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4203 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4204 bp->rx_ticks/12);
a2fbb9ea 4205 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4206 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4207 U_SB_ETH_RX_CQ_INDEX),
4208 bp->rx_ticks ? 0 : 1);
4209 REG_WR16(bp, BAR_USTRORM_INTMEM +
4210 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4211 U_SB_ETH_RX_BD_INDEX),
34f80b04 4212 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4213
4214 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4215 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4216 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4217 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4218 bp->tx_ticks/12);
a2fbb9ea 4219 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4220 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4221 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4222 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4223 }
4224}
4225
7a9b2557
VZ
4226static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4227 struct bnx2x_fastpath *fp, int last)
4228{
4229 int i;
4230
4231 for (i = 0; i < last; i++) {
4232 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4233 struct sk_buff *skb = rx_buf->skb;
4234
4235 if (skb == NULL) {
4236 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4237 continue;
4238 }
4239
4240 if (fp->tpa_state[i] == BNX2X_TPA_START)
4241 pci_unmap_single(bp->pdev,
4242 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4243 bp->rx_buf_size,
7a9b2557
VZ
4244 PCI_DMA_FROMDEVICE);
4245
4246 dev_kfree_skb(skb);
4247 rx_buf->skb = NULL;
4248 }
4249}
4250
a2fbb9ea
ET
4251static void bnx2x_init_rx_rings(struct bnx2x *bp)
4252{
7a9b2557 4253 int func = BP_FUNC(bp);
32626230
EG
4254 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4255 ETH_MAX_AGGREGATION_QUEUES_E1H;
4256 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4257 int i, j;
a2fbb9ea 4258
437cf2f1
EG
4259 bp->rx_buf_size = bp->dev->mtu;
4260 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4261 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4262
7a9b2557
VZ
4263 if (bp->flags & TPA_ENABLE_FLAG) {
4264 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4265 "rx_buf_size %d effective_mtu %d\n",
4266 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4267
4268 for_each_queue(bp, j) {
32626230 4269 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4270
32626230 4271 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4272 fp->tpa_pool[i].skb =
4273 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4274 if (!fp->tpa_pool[i].skb) {
4275 BNX2X_ERR("Failed to allocate TPA "
4276 "skb pool for queue[%d] - "
4277 "disabling TPA on this "
4278 "queue!\n", j);
4279 bnx2x_free_tpa_pool(bp, fp, i);
4280 fp->disable_tpa = 1;
4281 break;
4282 }
4283 pci_unmap_addr_set((struct sw_rx_bd *)
4284 &bp->fp->tpa_pool[i],
4285 mapping, 0);
4286 fp->tpa_state[i] = BNX2X_TPA_STOP;
4287 }
4288 }
4289 }
4290
a2fbb9ea
ET
4291 for_each_queue(bp, j) {
4292 struct bnx2x_fastpath *fp = &bp->fp[j];
4293
4294 fp->rx_bd_cons = 0;
4295 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4296 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4297
4298 /* "next page" elements initialization */
4299 /* SGE ring */
4300 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4301 struct eth_rx_sge *sge;
4302
4303 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4304 sge->addr_hi =
4305 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4306 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4307 sge->addr_lo =
4308 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4309 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4310 }
4311
4312 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4313
7a9b2557 4314 /* RX BD ring */
a2fbb9ea
ET
4315 for (i = 1; i <= NUM_RX_RINGS; i++) {
4316 struct eth_rx_bd *rx_bd;
4317
4318 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4319 rx_bd->addr_hi =
4320 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4321 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4322 rx_bd->addr_lo =
4323 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4324 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4325 }
4326
34f80b04 4327 /* CQ ring */
a2fbb9ea
ET
4328 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4329 struct eth_rx_cqe_next_page *nextpg;
4330
4331 nextpg = (struct eth_rx_cqe_next_page *)
4332 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4333 nextpg->addr_hi =
4334 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4335 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4336 nextpg->addr_lo =
4337 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4338 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4339 }
4340
7a9b2557
VZ
4341 /* Allocate SGEs and initialize the ring elements */
4342 for (i = 0, ring_prod = 0;
4343 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4344
7a9b2557
VZ
4345 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4346 BNX2X_ERR("was only able to allocate "
4347 "%d rx sges\n", i);
4348 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4349 /* Cleanup already allocated elements */
4350 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4351 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4352 fp->disable_tpa = 1;
4353 ring_prod = 0;
4354 break;
4355 }
4356 ring_prod = NEXT_SGE_IDX(ring_prod);
4357 }
4358 fp->rx_sge_prod = ring_prod;
4359
4360 /* Allocate BDs and initialize BD ring */
66e855f3 4361 fp->rx_comp_cons = 0;
7a9b2557 4362 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4363 for (i = 0; i < bp->rx_ring_size; i++) {
4364 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4365 BNX2X_ERR("was only able to allocate "
4366 "%d rx skbs\n", i);
66e855f3 4367 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4368 break;
4369 }
4370 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4371 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4372 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4373 }
4374
7a9b2557
VZ
4375 fp->rx_bd_prod = ring_prod;
4376 /* must not have more available CQEs than BDs */
4377 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4378 cqe_ring_prod);
a2fbb9ea
ET
4379 fp->rx_pkt = fp->rx_calls = 0;
4380
7a9b2557
VZ
4381 /* Warning!
4382 * this will generate an interrupt (to the TSTORM)
4383 * must only be done after chip is initialized
4384 */
4385 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4386 fp->rx_sge_prod);
a2fbb9ea
ET
4387 if (j != 0)
4388 continue;
4389
4390 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4391 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4392 U64_LO(fp->rx_comp_mapping));
4393 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4394 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4395 U64_HI(fp->rx_comp_mapping));
4396 }
4397}
4398
4399static void bnx2x_init_tx_ring(struct bnx2x *bp)
4400{
4401 int i, j;
4402
4403 for_each_queue(bp, j) {
4404 struct bnx2x_fastpath *fp = &bp->fp[j];
4405
4406 for (i = 1; i <= NUM_TX_RINGS; i++) {
4407 struct eth_tx_bd *tx_bd =
4408 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4409
4410 tx_bd->addr_hi =
4411 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4412 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4413 tx_bd->addr_lo =
4414 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4415 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4416 }
4417
4418 fp->tx_pkt_prod = 0;
4419 fp->tx_pkt_cons = 0;
4420 fp->tx_bd_prod = 0;
4421 fp->tx_bd_cons = 0;
4422 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4423 fp->tx_pkt = 0;
4424 }
4425}
4426
4427static void bnx2x_init_sp_ring(struct bnx2x *bp)
4428{
34f80b04 4429 int func = BP_FUNC(bp);
a2fbb9ea
ET
4430
4431 spin_lock_init(&bp->spq_lock);
4432
4433 bp->spq_left = MAX_SPQ_PENDING;
4434 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4435 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4436 bp->spq_prod_bd = bp->spq;
4437 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4438
34f80b04 4439 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4440 U64_LO(bp->spq_mapping));
34f80b04
EG
4441 REG_WR(bp,
4442 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4443 U64_HI(bp->spq_mapping));
4444
34f80b04 4445 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4446 bp->spq_prod_idx);
4447}
4448
4449static void bnx2x_init_context(struct bnx2x *bp)
4450{
4451 int i;
4452
4453 for_each_queue(bp, i) {
4454 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4455 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4456 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4457
4458 context->xstorm_st_context.tx_bd_page_base_hi =
4459 U64_HI(fp->tx_desc_mapping);
4460 context->xstorm_st_context.tx_bd_page_base_lo =
4461 U64_LO(fp->tx_desc_mapping);
4462 context->xstorm_st_context.db_data_addr_hi =
4463 U64_HI(fp->tx_prods_mapping);
4464 context->xstorm_st_context.db_data_addr_lo =
4465 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4466 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4467 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4468
4469 context->ustorm_st_context.common.sb_index_numbers =
4470 BNX2X_RX_SB_INDEX_NUM;
4471 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4472 context->ustorm_st_context.common.status_block_id = sb_id;
4473 context->ustorm_st_context.common.flags =
4474 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4475 context->ustorm_st_context.common.mc_alignment_size =
4476 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4477 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4478 bp->rx_buf_size;
34f80b04 4479 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4480 U64_HI(fp->rx_desc_mapping);
34f80b04 4481 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4482 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4483 if (!fp->disable_tpa) {
4484 context->ustorm_st_context.common.flags |=
4485 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4486 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4487 context->ustorm_st_context.common.sge_buff_size =
4488 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4489 context->ustorm_st_context.common.sge_page_base_hi =
4490 U64_HI(fp->rx_sge_mapping);
4491 context->ustorm_st_context.common.sge_page_base_lo =
4492 U64_LO(fp->rx_sge_mapping);
4493 }
4494
a2fbb9ea 4495 context->cstorm_st_context.sb_index_number =
5c862848 4496 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4497 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4498
4499 context->xstorm_ag_context.cdu_reserved =
4500 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4501 CDU_REGION_NUMBER_XCM_AG,
4502 ETH_CONNECTION_TYPE);
4503 context->ustorm_ag_context.cdu_usage =
4504 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4505 CDU_REGION_NUMBER_UCM_AG,
4506 ETH_CONNECTION_TYPE);
4507 }
4508}
4509
4510static void bnx2x_init_ind_table(struct bnx2x *bp)
4511{
34f80b04 4512 int port = BP_PORT(bp);
a2fbb9ea
ET
4513 int i;
4514
4515 if (!is_multi(bp))
4516 return;
4517
34f80b04 4518 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4519 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4520 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4521 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4522 i % bp->num_queues);
4523
4524 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4525}
4526
49d66772
ET
4527static void bnx2x_set_client_config(struct bnx2x *bp)
4528{
49d66772 4529 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4530 int port = BP_PORT(bp);
4531 int i;
49d66772 4532
34f80b04 4533 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4534 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4535 tstorm_client.config_flags =
4536 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4537#ifdef BCM_VLAN
34f80b04 4538 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4539 tstorm_client.config_flags |=
4540 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4541 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4542 }
4543#endif
49d66772 4544
7a9b2557
VZ
4545 if (bp->flags & TPA_ENABLE_FLAG) {
4546 tstorm_client.max_sges_for_packet =
4f40f2cb 4547 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4548 tstorm_client.max_sges_for_packet =
4549 ((tstorm_client.max_sges_for_packet +
4550 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4551 PAGES_PER_SGE_SHIFT;
4552
4553 tstorm_client.config_flags |=
4554 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4555 }
4556
49d66772
ET
4557 for_each_queue(bp, i) {
4558 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4559 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4560 ((u32 *)&tstorm_client)[0]);
4561 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4562 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4563 ((u32 *)&tstorm_client)[1]);
4564 }
4565
34f80b04
EG
4566 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4567 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4568}
4569
a2fbb9ea
ET
4570static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4571{
a2fbb9ea 4572 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4573 int mode = bp->rx_mode;
4574 int mask = (1 << BP_L_ID(bp));
4575 int func = BP_FUNC(bp);
a2fbb9ea
ET
4576 int i;
4577
3196a88a 4578 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4579
4580 switch (mode) {
4581 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4582 tstorm_mac_filter.ucast_drop_all = mask;
4583 tstorm_mac_filter.mcast_drop_all = mask;
4584 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4585 break;
4586 case BNX2X_RX_MODE_NORMAL:
34f80b04 4587 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4588 break;
4589 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4590 tstorm_mac_filter.mcast_accept_all = mask;
4591 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4592 break;
4593 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4594 tstorm_mac_filter.ucast_accept_all = mask;
4595 tstorm_mac_filter.mcast_accept_all = mask;
4596 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4597 break;
4598 default:
34f80b04
EG
4599 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4600 break;
a2fbb9ea
ET
4601 }
4602
4603 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4604 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4605 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4606 ((u32 *)&tstorm_mac_filter)[i]);
4607
34f80b04 4608/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4609 ((u32 *)&tstorm_mac_filter)[i]); */
4610 }
a2fbb9ea 4611
49d66772
ET
4612 if (mode != BNX2X_RX_MODE_NONE)
4613 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4614}
4615
471de716
EG
4616static void bnx2x_init_internal_common(struct bnx2x *bp)
4617{
4618 int i;
4619
3cdf1db7
YG
4620 if (bp->flags & TPA_ENABLE_FLAG) {
4621 struct tstorm_eth_tpa_exist tpa = {0};
4622
4623 tpa.tpa_exist = 1;
4624
4625 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4626 ((u32 *)&tpa)[0]);
4627 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4628 ((u32 *)&tpa)[1]);
4629 }
4630
471de716
EG
4631 /* Zero this manually as its initialization is
4632 currently missing in the initTool */
4633 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4634 REG_WR(bp, BAR_USTRORM_INTMEM +
4635 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4636}
4637
4638static void bnx2x_init_internal_port(struct bnx2x *bp)
4639{
4640 int port = BP_PORT(bp);
4641
4642 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4643 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4644 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4645 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4646}
4647
4648static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4649{
a2fbb9ea
ET
4650 struct tstorm_eth_function_common_config tstorm_config = {0};
4651 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4652 int port = BP_PORT(bp);
4653 int func = BP_FUNC(bp);
4654 int i;
471de716 4655 u16 max_agg_size;
a2fbb9ea
ET
4656
4657 if (is_multi(bp)) {
4658 tstorm_config.config_flags = MULTI_FLAGS;
4659 tstorm_config.rss_result_mask = MULTI_MASK;
4660 }
4661
34f80b04
EG
4662 tstorm_config.leading_client_id = BP_L_ID(bp);
4663
a2fbb9ea 4664 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4665 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4666 (*(u32 *)&tstorm_config));
4667
c14423fe 4668 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4669 bnx2x_set_storm_rx_mode(bp);
4670
66e855f3
YG
4671 /* reset xstorm per client statistics */
4672 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4673 REG_WR(bp, BAR_XSTRORM_INTMEM +
4674 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4675 i*4, 0);
4676 }
4677 /* reset tstorm per client statistics */
4678 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4679 REG_WR(bp, BAR_TSTRORM_INTMEM +
4680 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4681 i*4, 0);
4682 }
4683
4684 /* Init statistics related context */
34f80b04 4685 stats_flags.collect_eth = 1;
a2fbb9ea 4686
66e855f3 4687 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4688 ((u32 *)&stats_flags)[0]);
66e855f3 4689 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4690 ((u32 *)&stats_flags)[1]);
4691
66e855f3 4692 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4693 ((u32 *)&stats_flags)[0]);
66e855f3 4694 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4695 ((u32 *)&stats_flags)[1]);
4696
66e855f3 4697 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4698 ((u32 *)&stats_flags)[0]);
66e855f3 4699 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4700 ((u32 *)&stats_flags)[1]);
4701
66e855f3
YG
4702 REG_WR(bp, BAR_XSTRORM_INTMEM +
4703 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4704 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4705 REG_WR(bp, BAR_XSTRORM_INTMEM +
4706 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4707 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4708
4709 REG_WR(bp, BAR_TSTRORM_INTMEM +
4710 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4711 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4712 REG_WR(bp, BAR_TSTRORM_INTMEM +
4713 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4714 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4715
4716 if (CHIP_IS_E1H(bp)) {
4717 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4718 IS_E1HMF(bp));
4719 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4720 IS_E1HMF(bp));
4721 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4722 IS_E1HMF(bp));
4723 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4724 IS_E1HMF(bp));
4725
7a9b2557
VZ
4726 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4727 bp->e1hov);
34f80b04
EG
4728 }
4729
4f40f2cb
EG
4730 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4731 max_agg_size =
4732 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4733 SGE_PAGE_SIZE * PAGES_PER_SGE),
4734 (u32)0xffff);
7a9b2557
VZ
4735 for_each_queue(bp, i) {
4736 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4737
4738 REG_WR(bp, BAR_USTRORM_INTMEM +
4739 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4740 U64_LO(fp->rx_comp_mapping));
4741 REG_WR(bp, BAR_USTRORM_INTMEM +
4742 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4743 U64_HI(fp->rx_comp_mapping));
4744
7a9b2557
VZ
4745 REG_WR16(bp, BAR_USTRORM_INTMEM +
4746 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4747 max_agg_size);
4748 }
a2fbb9ea
ET
4749}
4750
471de716
EG
4751static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4752{
4753 switch (load_code) {
4754 case FW_MSG_CODE_DRV_LOAD_COMMON:
4755 bnx2x_init_internal_common(bp);
4756 /* no break */
4757
4758 case FW_MSG_CODE_DRV_LOAD_PORT:
4759 bnx2x_init_internal_port(bp);
4760 /* no break */
4761
4762 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4763 bnx2x_init_internal_func(bp);
4764 break;
4765
4766 default:
4767 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4768 break;
4769 }
4770}
4771
4772static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4773{
4774 int i;
4775
4776 for_each_queue(bp, i) {
4777 struct bnx2x_fastpath *fp = &bp->fp[i];
4778
34f80b04 4779 fp->bp = bp;
a2fbb9ea 4780 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4781 fp->index = i;
34f80b04
EG
4782 fp->cl_id = BP_L_ID(bp) + i;
4783 fp->sb_id = fp->cl_id;
4784 DP(NETIF_MSG_IFUP,
4785 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4786 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4787 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4788 FP_SB_ID(fp));
4789 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4790 }
4791
5c862848
EG
4792 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4793 DEF_SB_ID);
4794 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4795 bnx2x_update_coalesce(bp);
4796 bnx2x_init_rx_rings(bp);
4797 bnx2x_init_tx_ring(bp);
4798 bnx2x_init_sp_ring(bp);
4799 bnx2x_init_context(bp);
471de716 4800 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4801 bnx2x_init_ind_table(bp);
615f8fd9 4802 bnx2x_int_enable(bp);
a2fbb9ea
ET
4803}
4804
4805/* end of nic init */
4806
4807/*
4808 * gzip service functions
4809 */
4810
4811static int bnx2x_gunzip_init(struct bnx2x *bp)
4812{
4813 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4814 &bp->gunzip_mapping);
4815 if (bp->gunzip_buf == NULL)
4816 goto gunzip_nomem1;
4817
4818 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4819 if (bp->strm == NULL)
4820 goto gunzip_nomem2;
4821
4822 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4823 GFP_KERNEL);
4824 if (bp->strm->workspace == NULL)
4825 goto gunzip_nomem3;
4826
4827 return 0;
4828
4829gunzip_nomem3:
4830 kfree(bp->strm);
4831 bp->strm = NULL;
4832
4833gunzip_nomem2:
4834 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4835 bp->gunzip_mapping);
4836 bp->gunzip_buf = NULL;
4837
4838gunzip_nomem1:
4839 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4840 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4841 return -ENOMEM;
4842}
4843
4844static void bnx2x_gunzip_end(struct bnx2x *bp)
4845{
4846 kfree(bp->strm->workspace);
4847
4848 kfree(bp->strm);
4849 bp->strm = NULL;
4850
4851 if (bp->gunzip_buf) {
4852 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4853 bp->gunzip_mapping);
4854 bp->gunzip_buf = NULL;
4855 }
4856}
4857
4858static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4859{
4860 int n, rc;
4861
4862 /* check gzip header */
4863 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4864 return -EINVAL;
4865
4866 n = 10;
4867
34f80b04 4868#define FNAME 0x8
a2fbb9ea
ET
4869
4870 if (zbuf[3] & FNAME)
4871 while ((zbuf[n++] != 0) && (n < len));
4872
4873 bp->strm->next_in = zbuf + n;
4874 bp->strm->avail_in = len - n;
4875 bp->strm->next_out = bp->gunzip_buf;
4876 bp->strm->avail_out = FW_BUF_SIZE;
4877
4878 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4879 if (rc != Z_OK)
4880 return rc;
4881
4882 rc = zlib_inflate(bp->strm, Z_FINISH);
4883 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4884 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4885 bp->dev->name, bp->strm->msg);
4886
4887 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4888 if (bp->gunzip_outlen & 0x3)
4889 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4890 " gunzip_outlen (%d) not aligned\n",
4891 bp->dev->name, bp->gunzip_outlen);
4892 bp->gunzip_outlen >>= 2;
4893
4894 zlib_inflateEnd(bp->strm);
4895
4896 if (rc == Z_STREAM_END)
4897 return 0;
4898
4899 return rc;
4900}
4901
4902/* nic load/unload */
4903
4904/*
34f80b04 4905 * General service functions
a2fbb9ea
ET
4906 */
4907
4908/* send a NIG loopback debug packet */
4909static void bnx2x_lb_pckt(struct bnx2x *bp)
4910{
a2fbb9ea 4911 u32 wb_write[3];
a2fbb9ea
ET
4912
4913 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4914 wb_write[0] = 0x55555555;
4915 wb_write[1] = 0x55555555;
34f80b04 4916 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4917 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4918
4919 /* NON-IP protocol */
a2fbb9ea
ET
4920 wb_write[0] = 0x09000000;
4921 wb_write[1] = 0x55555555;
34f80b04 4922 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4923 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4924}
4925
4926/* some of the internal memories
4927 * are not directly readable from the driver
4928 * to test them we send debug packets
4929 */
4930static int bnx2x_int_mem_test(struct bnx2x *bp)
4931{
4932 int factor;
4933 int count, i;
4934 u32 val = 0;
4935
ad8d3948 4936 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4937 factor = 120;
ad8d3948
EG
4938 else if (CHIP_REV_IS_EMUL(bp))
4939 factor = 200;
4940 else
a2fbb9ea 4941 factor = 1;
a2fbb9ea
ET
4942
4943 DP(NETIF_MSG_HW, "start part1\n");
4944
4945 /* Disable inputs of parser neighbor blocks */
4946 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4947 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4948 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4949 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4950
4951 /* Write 0 to parser credits for CFC search request */
4952 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4953
4954 /* send Ethernet packet */
4955 bnx2x_lb_pckt(bp);
4956
4957 /* TODO do i reset NIG statistic? */
4958 /* Wait until NIG register shows 1 packet of size 0x10 */
4959 count = 1000 * factor;
4960 while (count) {
34f80b04 4961
a2fbb9ea
ET
4962 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4963 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4964 if (val == 0x10)
4965 break;
4966
4967 msleep(10);
4968 count--;
4969 }
4970 if (val != 0x10) {
4971 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4972 return -1;
4973 }
4974
4975 /* Wait until PRS register shows 1 packet */
4976 count = 1000 * factor;
4977 while (count) {
4978 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4979 if (val == 1)
4980 break;
4981
4982 msleep(10);
4983 count--;
4984 }
4985 if (val != 0x1) {
4986 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4987 return -2;
4988 }
4989
4990 /* Reset and init BRB, PRS */
34f80b04 4991 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4992 msleep(50);
34f80b04 4993 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4994 msleep(50);
4995 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4996 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4997
4998 DP(NETIF_MSG_HW, "part2\n");
4999
5000 /* Disable inputs of parser neighbor blocks */
5001 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5002 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5003 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5004 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5005
5006 /* Write 0 to parser credits for CFC search request */
5007 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5008
5009 /* send 10 Ethernet packets */
5010 for (i = 0; i < 10; i++)
5011 bnx2x_lb_pckt(bp);
5012
5013 /* Wait until NIG register shows 10 + 1
5014 packets of size 11*0x10 = 0xb0 */
5015 count = 1000 * factor;
5016 while (count) {
34f80b04 5017
a2fbb9ea
ET
5018 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5019 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5020 if (val == 0xb0)
5021 break;
5022
5023 msleep(10);
5024 count--;
5025 }
5026 if (val != 0xb0) {
5027 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5028 return -3;
5029 }
5030
5031 /* Wait until PRS register shows 2 packets */
5032 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5033 if (val != 2)
5034 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5035
5036 /* Write 1 to parser credits for CFC search request */
5037 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5038
5039 /* Wait until PRS register shows 3 packets */
5040 msleep(10 * factor);
5041 /* Wait until NIG register shows 1 packet of size 0x10 */
5042 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5043 if (val != 3)
5044 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5045
5046 /* clear NIG EOP FIFO */
5047 for (i = 0; i < 11; i++)
5048 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5049 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5050 if (val != 1) {
5051 BNX2X_ERR("clear of NIG failed\n");
5052 return -4;
5053 }
5054
5055 /* Reset and init BRB, PRS, NIG */
5056 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5057 msleep(50);
5058 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5059 msleep(50);
5060 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5061 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5062#ifndef BCM_ISCSI
5063 /* set NIC mode */
5064 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5065#endif
5066
5067 /* Enable inputs of parser neighbor blocks */
5068 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5069 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5070 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5071 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5072
5073 DP(NETIF_MSG_HW, "done\n");
5074
5075 return 0; /* OK */
5076}
5077
5078static void enable_blocks_attention(struct bnx2x *bp)
5079{
5080 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5081 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5082 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5083 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5084 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5085 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5086 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5087 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5088 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5089/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5090/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5091 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5092 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5093 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5094/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5095/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5096 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5097 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5098 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5099 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5100/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5101/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5102 if (CHIP_REV_IS_FPGA(bp))
5103 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5104 else
5105 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5106 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5107 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5108 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5109/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5110/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5111 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5112 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5113/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5114 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5115}
5116
34f80b04
EG
5117
5118static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5119{
a2fbb9ea 5120 u32 val, i;
a2fbb9ea 5121
34f80b04 5122 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5123
34f80b04
EG
5124 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5125 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5126
34f80b04
EG
5127 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5128 if (CHIP_IS_E1H(bp))
5129 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5130
34f80b04
EG
5131 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5132 msleep(30);
5133 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5134
34f80b04
EG
5135 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5136 if (CHIP_IS_E1(bp)) {
5137 /* enable HW interrupt from PXP on USDM overflow
5138 bit 16 on INT_MASK_0 */
5139 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5140 }
a2fbb9ea 5141
34f80b04
EG
5142 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5143 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5144
5145#ifdef __BIG_ENDIAN
34f80b04
EG
5146 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5147 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5148 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5149 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5150 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5151 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5152
5153/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5154 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5155 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5156 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5157 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5158#endif
5159
34f80b04 5160 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5161#ifdef BCM_ISCSI
34f80b04
EG
5162 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5163 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5164 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5165#endif
5166
34f80b04
EG
5167 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5168 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5169
34f80b04
EG
5170 /* let the HW do it's magic ... */
5171 msleep(100);
5172 /* finish PXP init */
5173 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5174 if (val != 1) {
5175 BNX2X_ERR("PXP2 CFG failed\n");
5176 return -EBUSY;
5177 }
5178 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5179 if (val != 1) {
5180 BNX2X_ERR("PXP2 RD_INIT failed\n");
5181 return -EBUSY;
5182 }
a2fbb9ea 5183
34f80b04
EG
5184 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5185 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5186
34f80b04 5187 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5188
34f80b04
EG
5189 /* clean the DMAE memory */
5190 bp->dmae_ready = 1;
5191 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5192
34f80b04
EG
5193 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5194 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5195 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5196 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5197
34f80b04
EG
5198 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5199 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5200 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5201 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5202
5203 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5204 /* soft reset pulse */
5205 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5206 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5207
5208#ifdef BCM_ISCSI
34f80b04 5209 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5210#endif
a2fbb9ea 5211
34f80b04
EG
5212 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5213 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5214 if (!CHIP_REV_IS_SLOW(bp)) {
5215 /* enable hw interrupt from doorbell Q */
5216 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5217 }
a2fbb9ea 5218
34f80b04
EG
5219 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5220 if (CHIP_REV_IS_SLOW(bp)) {
5221 /* fix for emulation and FPGA for no pause */
5222 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5223 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5224 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5225 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5226 }
a2fbb9ea 5227
34f80b04 5228 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3196a88a
EG
5229 /* set NIC mode */
5230 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5231 if (CHIP_IS_E1H(bp))
5232 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5233
34f80b04
EG
5234 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5235 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5236 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5237 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5238
34f80b04
EG
5239 if (CHIP_IS_E1H(bp)) {
5240 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5241 STORM_INTMEM_SIZE_E1H/2);
5242 bnx2x_init_fill(bp,
5243 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5244 0, STORM_INTMEM_SIZE_E1H/2);
5245 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5246 STORM_INTMEM_SIZE_E1H/2);
5247 bnx2x_init_fill(bp,
5248 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5249 0, STORM_INTMEM_SIZE_E1H/2);
5250 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5251 STORM_INTMEM_SIZE_E1H/2);
5252 bnx2x_init_fill(bp,
5253 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5254 0, STORM_INTMEM_SIZE_E1H/2);
5255 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5256 STORM_INTMEM_SIZE_E1H/2);
5257 bnx2x_init_fill(bp,
5258 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5259 0, STORM_INTMEM_SIZE_E1H/2);
5260 } else { /* E1 */
ad8d3948
EG
5261 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5262 STORM_INTMEM_SIZE_E1);
5263 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5264 STORM_INTMEM_SIZE_E1);
5265 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5266 STORM_INTMEM_SIZE_E1);
5267 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5268 STORM_INTMEM_SIZE_E1);
34f80b04 5269 }
a2fbb9ea 5270
34f80b04
EG
5271 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5272 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5273 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5274 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5275
34f80b04
EG
5276 /* sync semi rtc */
5277 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5278 0x80000000);
5279 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5280 0x80000000);
a2fbb9ea 5281
34f80b04
EG
5282 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5283 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5284 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5285
34f80b04
EG
5286 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5287 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5288 REG_WR(bp, i, 0xc0cac01a);
5289 /* TODO: replace with something meaningful */
5290 }
5291 if (CHIP_IS_E1H(bp))
5292 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5293 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5294
34f80b04
EG
5295 if (sizeof(union cdu_context) != 1024)
5296 /* we currently assume that a context is 1024 bytes */
5297 printk(KERN_ALERT PFX "please adjust the size of"
5298 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5299
34f80b04
EG
5300 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5301 val = (4 << 24) + (0 << 12) + 1024;
5302 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5303 if (CHIP_IS_E1(bp)) {
5304 /* !!! fix pxp client crdit until excel update */
5305 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5306 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5307 }
a2fbb9ea 5308
34f80b04
EG
5309 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5310 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5311
34f80b04
EG
5312 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5313 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5314
34f80b04
EG
5315 /* PXPCS COMMON comes here */
5316 /* Reset PCIE errors for debug */
5317 REG_WR(bp, 0x2814, 0xffffffff);
5318 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5319
34f80b04
EG
5320 /* EMAC0 COMMON comes here */
5321 /* EMAC1 COMMON comes here */
5322 /* DBU COMMON comes here */
5323 /* DBG COMMON comes here */
5324
5325 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5326 if (CHIP_IS_E1H(bp)) {
5327 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5328 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5329 }
5330
5331 if (CHIP_REV_IS_SLOW(bp))
5332 msleep(200);
5333
5334 /* finish CFC init */
5335 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5336 if (val != 1) {
5337 BNX2X_ERR("CFC LL_INIT failed\n");
5338 return -EBUSY;
5339 }
5340 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5341 if (val != 1) {
5342 BNX2X_ERR("CFC AC_INIT failed\n");
5343 return -EBUSY;
5344 }
5345 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5346 if (val != 1) {
5347 BNX2X_ERR("CFC CAM_INIT failed\n");
5348 return -EBUSY;
5349 }
5350 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5351
34f80b04
EG
5352 /* read NIG statistic
5353 to see if this is our first up since powerup */
5354 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5355 val = *bnx2x_sp(bp, wb_data[0]);
5356
5357 /* do internal memory self test */
5358 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5359 BNX2X_ERR("internal mem self test failed\n");
5360 return -EBUSY;
5361 }
5362
5363 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5364 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5365 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5366 /* Fan failure is indicated by SPIO 5 */
5367 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5368 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5369
5370 /* set to active low mode */
5371 val = REG_RD(bp, MISC_REG_SPIO_INT);
5372 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5373 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5374 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5375
34f80b04
EG
5376 /* enable interrupt to signal the IGU */
5377 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5378 val |= (1 << MISC_REGISTERS_SPIO_5);
5379 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5380 break;
f1410647 5381
34f80b04
EG
5382 default:
5383 break;
5384 }
f1410647 5385
34f80b04
EG
5386 /* clear PXP2 attentions */
5387 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5388
34f80b04 5389 enable_blocks_attention(bp);
a2fbb9ea 5390
6bbca910
YR
5391 if (!BP_NOMCP(bp)) {
5392 bnx2x_acquire_phy_lock(bp);
5393 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5394 bnx2x_release_phy_lock(bp);
5395 } else
5396 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5397
34f80b04
EG
5398 return 0;
5399}
a2fbb9ea 5400
34f80b04
EG
5401static int bnx2x_init_port(struct bnx2x *bp)
5402{
5403 int port = BP_PORT(bp);
5404 u32 val;
a2fbb9ea 5405
34f80b04
EG
5406 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5407
5408 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5409
5410 /* Port PXP comes here */
5411 /* Port PXP2 comes here */
a2fbb9ea
ET
5412#ifdef BCM_ISCSI
5413 /* Port0 1
5414 * Port1 385 */
5415 i++;
5416 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5417 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5418 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5419 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5420
5421 /* Port0 2
5422 * Port1 386 */
5423 i++;
5424 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5425 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5426 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5427 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5428
5429 /* Port0 3
5430 * Port1 387 */
5431 i++;
5432 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5433 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5434 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5435 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5436#endif
34f80b04 5437 /* Port CMs come here */
a2fbb9ea
ET
5438
5439 /* Port QM comes here */
a2fbb9ea
ET
5440#ifdef BCM_ISCSI
5441 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5442 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5443
5444 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5445 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5446#endif
5447 /* Port DQ comes here */
5448 /* Port BRB1 comes here */
ad8d3948 5449 /* Port PRS comes here */
a2fbb9ea
ET
5450 /* Port TSDM comes here */
5451 /* Port CSDM comes here */
5452 /* Port USDM comes here */
5453 /* Port XSDM comes here */
34f80b04
EG
5454 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5455 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5456 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5457 port ? USEM_PORT1_END : USEM_PORT0_END);
5458 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5459 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5460 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5461 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5462 /* Port UPB comes here */
34f80b04
EG
5463 /* Port XPB comes here */
5464
5465 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5466 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5467
5468 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5469 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5470
5471 /* update threshold */
34f80b04 5472 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5473 /* update init credit */
34f80b04 5474 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5475
5476 /* probe changes */
34f80b04 5477 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5478 msleep(5);
34f80b04 5479 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5480
5481#ifdef BCM_ISCSI
5482 /* tell the searcher where the T2 table is */
5483 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5484
5485 wb_write[0] = U64_LO(bp->t2_mapping);
5486 wb_write[1] = U64_HI(bp->t2_mapping);
5487 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5488 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5489 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5490 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5491
5492 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5493 /* Port SRCH comes here */
5494#endif
5495 /* Port CDU comes here */
5496 /* Port CFC comes here */
34f80b04
EG
5497
5498 if (CHIP_IS_E1(bp)) {
5499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5500 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5501 }
5502 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5503 port ? HC_PORT1_END : HC_PORT0_END);
5504
5505 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5506 MISC_AEU_PORT0_START,
34f80b04
EG
5507 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5508 /* init aeu_mask_attn_func_0/1:
5509 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5510 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5511 * bits 4-7 are used for "per vn group attention" */
5512 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5513 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5514
a2fbb9ea
ET
5515 /* Port PXPCS comes here */
5516 /* Port EMAC0 comes here */
5517 /* Port EMAC1 comes here */
5518 /* Port DBU comes here */
5519 /* Port DBG comes here */
34f80b04
EG
5520 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5521 port ? NIG_PORT1_END : NIG_PORT0_END);
5522
5523 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5524
5525 if (CHIP_IS_E1H(bp)) {
5526 u32 wsum;
5527 struct cmng_struct_per_port m_cmng_port;
5528 int vn;
5529
5530 /* 0x2 disable e1hov, 0x1 enable */
5531 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5532 (IS_E1HMF(bp) ? 0x1 : 0x2));
5533
5534 /* Init RATE SHAPING and FAIRNESS contexts.
5535 Initialize as if there is 10G link. */
5536 wsum = bnx2x_calc_vn_wsum(bp);
5537 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5538 if (IS_E1HMF(bp))
5539 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5540 bnx2x_init_vn_minmax(bp, 2*vn + port,
5541 wsum, 10000, &m_cmng_port);
5542 }
5543
a2fbb9ea
ET
5544 /* Port MCP comes here */
5545 /* Port DMAE comes here */
5546
34f80b04 5547 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5548 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5549 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5550 /* add SPIO 5 to group 0 */
5551 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5552 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5553 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5554 break;
5555
5556 default:
5557 break;
5558 }
5559
c18487ee 5560 bnx2x__link_reset(bp);
a2fbb9ea 5561
34f80b04
EG
5562 return 0;
5563}
5564
5565#define ILT_PER_FUNC (768/2)
5566#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5567/* the phys address is shifted right 12 bits and has an added
5568 1=valid bit added to the 53rd bit
5569 then since this is a wide register(TM)
5570 we split it into two 32 bit writes
5571 */
5572#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5573#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5574#define PXP_ONE_ILT(x) (((x) << 10) | x)
5575#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5576
5577#define CNIC_ILT_LINES 0
5578
5579static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5580{
5581 int reg;
5582
5583 if (CHIP_IS_E1H(bp))
5584 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5585 else /* E1 */
5586 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5587
5588 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5589}
5590
5591static int bnx2x_init_func(struct bnx2x *bp)
5592{
5593 int port = BP_PORT(bp);
5594 int func = BP_FUNC(bp);
5595 int i;
5596
5597 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5598
5599 i = FUNC_ILT_BASE(func);
5600
5601 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5602 if (CHIP_IS_E1H(bp)) {
5603 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5604 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5605 } else /* E1 */
5606 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5607 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5608
5609
5610 if (CHIP_IS_E1H(bp)) {
5611 for (i = 0; i < 9; i++)
5612 bnx2x_init_block(bp,
5613 cm_start[func][i], cm_end[func][i]);
5614
5615 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5616 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5617 }
5618
5619 /* HC init per function */
5620 if (CHIP_IS_E1H(bp)) {
5621 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5622
5623 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5624 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5625 }
5626 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5627
5628 if (CHIP_IS_E1H(bp))
5629 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5630
c14423fe 5631 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5632 REG_WR(bp, 0x2114, 0xffffffff);
5633 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5634
34f80b04
EG
5635 return 0;
5636}
5637
5638static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5639{
5640 int i, rc = 0;
a2fbb9ea 5641
34f80b04
EG
5642 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5643 BP_FUNC(bp), load_code);
a2fbb9ea 5644
34f80b04
EG
5645 bp->dmae_ready = 0;
5646 mutex_init(&bp->dmae_mutex);
5647 bnx2x_gunzip_init(bp);
a2fbb9ea 5648
34f80b04
EG
5649 switch (load_code) {
5650 case FW_MSG_CODE_DRV_LOAD_COMMON:
5651 rc = bnx2x_init_common(bp);
5652 if (rc)
5653 goto init_hw_err;
5654 /* no break */
5655
5656 case FW_MSG_CODE_DRV_LOAD_PORT:
5657 bp->dmae_ready = 1;
5658 rc = bnx2x_init_port(bp);
5659 if (rc)
5660 goto init_hw_err;
5661 /* no break */
5662
5663 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5664 bp->dmae_ready = 1;
5665 rc = bnx2x_init_func(bp);
5666 if (rc)
5667 goto init_hw_err;
5668 break;
5669
5670 default:
5671 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5672 break;
5673 }
5674
5675 if (!BP_NOMCP(bp)) {
5676 int func = BP_FUNC(bp);
a2fbb9ea
ET
5677
5678 bp->fw_drv_pulse_wr_seq =
34f80b04 5679 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5680 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5681 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5682 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5683 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5684 } else
5685 bp->func_stx = 0;
a2fbb9ea 5686
34f80b04
EG
5687 /* this needs to be done before gunzip end */
5688 bnx2x_zero_def_sb(bp);
5689 for_each_queue(bp, i)
5690 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5691
5692init_hw_err:
5693 bnx2x_gunzip_end(bp);
5694
5695 return rc;
a2fbb9ea
ET
5696}
5697
c14423fe 5698/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5699static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5700{
34f80b04 5701 int func = BP_FUNC(bp);
f1410647
ET
5702 u32 seq = ++bp->fw_seq;
5703 u32 rc = 0;
19680c48
EG
5704 u32 cnt = 1;
5705 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5706
34f80b04 5707 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5708 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5709
19680c48
EG
5710 do {
5711 /* let the FW do it's magic ... */
5712 msleep(delay);
a2fbb9ea 5713
19680c48 5714 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5715
19680c48
EG
5716 /* Give the FW up to 2 second (200*10ms) */
5717 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5718
5719 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5720 cnt*delay, rc, seq);
a2fbb9ea
ET
5721
5722 /* is this a reply to our command? */
5723 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5724 rc &= FW_MSG_CODE_MASK;
f1410647 5725
a2fbb9ea
ET
5726 } else {
5727 /* FW BUG! */
5728 BNX2X_ERR("FW failed to respond!\n");
5729 bnx2x_fw_dump(bp);
5730 rc = 0;
5731 }
f1410647 5732
a2fbb9ea
ET
5733 return rc;
5734}
5735
5736static void bnx2x_free_mem(struct bnx2x *bp)
5737{
5738
5739#define BNX2X_PCI_FREE(x, y, size) \
5740 do { \
5741 if (x) { \
5742 pci_free_consistent(bp->pdev, size, x, y); \
5743 x = NULL; \
5744 y = 0; \
5745 } \
5746 } while (0)
5747
5748#define BNX2X_FREE(x) \
5749 do { \
5750 if (x) { \
5751 vfree(x); \
5752 x = NULL; \
5753 } \
5754 } while (0)
5755
5756 int i;
5757
5758 /* fastpath */
5759 for_each_queue(bp, i) {
5760
5761 /* Status blocks */
5762 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5763 bnx2x_fp(bp, i, status_blk_mapping),
5764 sizeof(struct host_status_block) +
5765 sizeof(struct eth_tx_db_data));
5766
5767 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5768 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5769 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5770 bnx2x_fp(bp, i, tx_desc_mapping),
5771 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5772
5773 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5774 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5775 bnx2x_fp(bp, i, rx_desc_mapping),
5776 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5777
5778 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5779 bnx2x_fp(bp, i, rx_comp_mapping),
5780 sizeof(struct eth_fast_path_rx_cqe) *
5781 NUM_RCQ_BD);
a2fbb9ea 5782
7a9b2557 5783 /* SGE ring */
32626230 5784 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5785 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5786 bnx2x_fp(bp, i, rx_sge_mapping),
5787 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5788 }
a2fbb9ea
ET
5789 /* end of fastpath */
5790
5791 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5792 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5793
5794 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5795 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5796
5797#ifdef BCM_ISCSI
5798 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5799 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5800 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5801 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5802#endif
7a9b2557 5803 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5804
5805#undef BNX2X_PCI_FREE
5806#undef BNX2X_KFREE
5807}
5808
5809static int bnx2x_alloc_mem(struct bnx2x *bp)
5810{
5811
5812#define BNX2X_PCI_ALLOC(x, y, size) \
5813 do { \
5814 x = pci_alloc_consistent(bp->pdev, size, y); \
5815 if (x == NULL) \
5816 goto alloc_mem_err; \
5817 memset(x, 0, size); \
5818 } while (0)
5819
5820#define BNX2X_ALLOC(x, size) \
5821 do { \
5822 x = vmalloc(size); \
5823 if (x == NULL) \
5824 goto alloc_mem_err; \
5825 memset(x, 0, size); \
5826 } while (0)
5827
5828 int i;
5829
5830 /* fastpath */
a2fbb9ea
ET
5831 for_each_queue(bp, i) {
5832 bnx2x_fp(bp, i, bp) = bp;
5833
5834 /* Status blocks */
5835 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5836 &bnx2x_fp(bp, i, status_blk_mapping),
5837 sizeof(struct host_status_block) +
5838 sizeof(struct eth_tx_db_data));
5839
5840 bnx2x_fp(bp, i, hw_tx_prods) =
5841 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5842
5843 bnx2x_fp(bp, i, tx_prods_mapping) =
5844 bnx2x_fp(bp, i, status_blk_mapping) +
5845 sizeof(struct host_status_block);
5846
5847 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5848 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5849 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5850 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5851 &bnx2x_fp(bp, i, tx_desc_mapping),
5852 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5853
5854 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5855 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5856 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5857 &bnx2x_fp(bp, i, rx_desc_mapping),
5858 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5859
5860 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5861 &bnx2x_fp(bp, i, rx_comp_mapping),
5862 sizeof(struct eth_fast_path_rx_cqe) *
5863 NUM_RCQ_BD);
5864
7a9b2557
VZ
5865 /* SGE ring */
5866 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5867 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5868 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5869 &bnx2x_fp(bp, i, rx_sge_mapping),
5870 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5871 }
5872 /* end of fastpath */
5873
5874 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5875 sizeof(struct host_def_status_block));
5876
5877 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5878 sizeof(struct bnx2x_slowpath));
5879
5880#ifdef BCM_ISCSI
5881 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5882
5883 /* Initialize T1 */
5884 for (i = 0; i < 64*1024; i += 64) {
5885 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5886 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5887 }
5888
5889 /* allocate searcher T2 table
5890 we allocate 1/4 of alloc num for T2
5891 (which is not entered into the ILT) */
5892 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5893
5894 /* Initialize T2 */
5895 for (i = 0; i < 16*1024; i += 64)
5896 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5897
c14423fe 5898 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5899 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5900
5901 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5902 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5903
5904 /* QM queues (128*MAX_CONN) */
5905 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5906#endif
5907
5908 /* Slow path ring */
5909 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5910
5911 return 0;
5912
5913alloc_mem_err:
5914 bnx2x_free_mem(bp);
5915 return -ENOMEM;
5916
5917#undef BNX2X_PCI_ALLOC
5918#undef BNX2X_ALLOC
5919}
5920
5921static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5922{
5923 int i;
5924
5925 for_each_queue(bp, i) {
5926 struct bnx2x_fastpath *fp = &bp->fp[i];
5927
5928 u16 bd_cons = fp->tx_bd_cons;
5929 u16 sw_prod = fp->tx_pkt_prod;
5930 u16 sw_cons = fp->tx_pkt_cons;
5931
a2fbb9ea
ET
5932 while (sw_cons != sw_prod) {
5933 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5934 sw_cons++;
5935 }
5936 }
5937}
5938
5939static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5940{
5941 int i, j;
5942
5943 for_each_queue(bp, j) {
5944 struct bnx2x_fastpath *fp = &bp->fp[j];
5945
a2fbb9ea
ET
5946 for (i = 0; i < NUM_RX_BD; i++) {
5947 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5948 struct sk_buff *skb = rx_buf->skb;
5949
5950 if (skb == NULL)
5951 continue;
5952
5953 pci_unmap_single(bp->pdev,
5954 pci_unmap_addr(rx_buf, mapping),
437cf2f1 5955 bp->rx_buf_size,
a2fbb9ea
ET
5956 PCI_DMA_FROMDEVICE);
5957
5958 rx_buf->skb = NULL;
5959 dev_kfree_skb(skb);
5960 }
7a9b2557 5961 if (!fp->disable_tpa)
32626230
EG
5962 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5963 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5964 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5965 }
5966}
5967
5968static void bnx2x_free_skbs(struct bnx2x *bp)
5969{
5970 bnx2x_free_tx_skbs(bp);
5971 bnx2x_free_rx_skbs(bp);
5972}
5973
5974static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5975{
34f80b04 5976 int i, offset = 1;
a2fbb9ea
ET
5977
5978 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5979 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5980 bp->msix_table[0].vector);
5981
5982 for_each_queue(bp, i) {
c14423fe 5983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5984 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5985 bnx2x_fp(bp, i, state));
5986
228241eb
ET
5987 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5988 BNX2X_ERR("IRQ of fp #%d being freed while "
5989 "state != closed\n", i);
a2fbb9ea 5990
34f80b04 5991 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5992 }
a2fbb9ea
ET
5993}
5994
5995static void bnx2x_free_irq(struct bnx2x *bp)
5996{
a2fbb9ea 5997 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5998 bnx2x_free_msix_irqs(bp);
5999 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6000 bp->flags &= ~USING_MSIX_FLAG;
6001
6002 } else
6003 free_irq(bp->pdev->irq, bp->dev);
6004}
6005
6006static int bnx2x_enable_msix(struct bnx2x *bp)
6007{
34f80b04 6008 int i, rc, offset;
a2fbb9ea
ET
6009
6010 bp->msix_table[0].entry = 0;
34f80b04
EG
6011 offset = 1;
6012 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6013
34f80b04
EG
6014 for_each_queue(bp, i) {
6015 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6016
34f80b04
EG
6017 bp->msix_table[i + offset].entry = igu_vec;
6018 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6019 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6020 }
6021
34f80b04
EG
6022 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6023 bp->num_queues + offset);
6024 if (rc) {
6025 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6026 return -1;
6027 }
a2fbb9ea
ET
6028 bp->flags |= USING_MSIX_FLAG;
6029
6030 return 0;
a2fbb9ea
ET
6031}
6032
a2fbb9ea
ET
6033static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6034{
34f80b04 6035 int i, rc, offset = 1;
a2fbb9ea 6036
a2fbb9ea
ET
6037 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6038 bp->dev->name, bp->dev);
a2fbb9ea
ET
6039 if (rc) {
6040 BNX2X_ERR("request sp irq failed\n");
6041 return -EBUSY;
6042 }
6043
6044 for_each_queue(bp, i) {
34f80b04 6045 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6046 bnx2x_msix_fp_int, 0,
6047 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6048 if (rc) {
3196a88a
EG
6049 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6050 i + offset, -rc);
a2fbb9ea
ET
6051 bnx2x_free_msix_irqs(bp);
6052 return -EBUSY;
6053 }
6054
6055 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6056 }
6057
6058 return 0;
a2fbb9ea
ET
6059}
6060
6061static int bnx2x_req_irq(struct bnx2x *bp)
6062{
34f80b04 6063 int rc;
a2fbb9ea 6064
34f80b04
EG
6065 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6066 bp->dev->name, bp->dev);
a2fbb9ea
ET
6067 if (!rc)
6068 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6069
6070 return rc;
a2fbb9ea
ET
6071}
6072
65abd74d
YG
6073static void bnx2x_napi_enable(struct bnx2x *bp)
6074{
6075 int i;
6076
6077 for_each_queue(bp, i)
6078 napi_enable(&bnx2x_fp(bp, i, napi));
6079}
6080
6081static void bnx2x_napi_disable(struct bnx2x *bp)
6082{
6083 int i;
6084
6085 for_each_queue(bp, i)
6086 napi_disable(&bnx2x_fp(bp, i, napi));
6087}
6088
6089static void bnx2x_netif_start(struct bnx2x *bp)
6090{
6091 if (atomic_dec_and_test(&bp->intr_sem)) {
6092 if (netif_running(bp->dev)) {
6093 if (bp->state == BNX2X_STATE_OPEN)
6094 netif_wake_queue(bp->dev);
6095 bnx2x_napi_enable(bp);
6096 bnx2x_int_enable(bp);
6097 }
6098 }
6099}
6100
f8ef6e44 6101static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6102{
f8ef6e44 6103 bnx2x_int_disable_sync(bp, disable_hw);
65abd74d
YG
6104 if (netif_running(bp->dev)) {
6105 bnx2x_napi_disable(bp);
6106 netif_tx_disable(bp->dev);
6107 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6108 }
6109}
6110
a2fbb9ea
ET
6111/*
6112 * Init service functions
6113 */
6114
3101c2bc 6115static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6116{
6117 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6118 int port = BP_PORT(bp);
a2fbb9ea
ET
6119
6120 /* CAM allocation
6121 * unicasts 0-31:port0 32-63:port1
6122 * multicast 64-127:port0 128-191:port1
6123 */
6124 config->hdr.length_6b = 2;
34f80b04
EG
6125 config->hdr.offset = port ? 31 : 0;
6126 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6127 config->hdr.reserved1 = 0;
6128
6129 /* primary MAC */
6130 config->config_table[0].cam_entry.msb_mac_addr =
6131 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6132 config->config_table[0].cam_entry.middle_mac_addr =
6133 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6134 config->config_table[0].cam_entry.lsb_mac_addr =
6135 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6136 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6137 if (set)
6138 config->config_table[0].target_table_entry.flags = 0;
6139 else
6140 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6141 config->config_table[0].target_table_entry.client_id = 0;
6142 config->config_table[0].target_table_entry.vlan_id = 0;
6143
3101c2bc
YG
6144 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6145 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6146 config->config_table[0].cam_entry.msb_mac_addr,
6147 config->config_table[0].cam_entry.middle_mac_addr,
6148 config->config_table[0].cam_entry.lsb_mac_addr);
6149
6150 /* broadcast */
6151 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6152 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6153 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6154 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6155 if (set)
6156 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6157 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6158 else
6159 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6160 config->config_table[1].target_table_entry.client_id = 0;
6161 config->config_table[1].target_table_entry.vlan_id = 0;
6162
6163 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6164 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6165 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6166}
6167
3101c2bc 6168static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6169{
6170 struct mac_configuration_cmd_e1h *config =
6171 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6172
3101c2bc 6173 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6174 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6175 return;
6176 }
6177
6178 /* CAM allocation for E1H
6179 * unicasts: by func number
6180 * multicast: 20+FUNC*20, 20 each
6181 */
6182 config->hdr.length_6b = 1;
6183 config->hdr.offset = BP_FUNC(bp);
6184 config->hdr.client_id = BP_CL_ID(bp);
6185 config->hdr.reserved1 = 0;
6186
6187 /* primary MAC */
6188 config->config_table[0].msb_mac_addr =
6189 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6190 config->config_table[0].middle_mac_addr =
6191 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6192 config->config_table[0].lsb_mac_addr =
6193 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6194 config->config_table[0].client_id = BP_L_ID(bp);
6195 config->config_table[0].vlan_id = 0;
6196 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6197 if (set)
6198 config->config_table[0].flags = BP_PORT(bp);
6199 else
6200 config->config_table[0].flags =
6201 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6202
3101c2bc
YG
6203 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6204 (set ? "setting" : "clearing"),
34f80b04
EG
6205 config->config_table[0].msb_mac_addr,
6206 config->config_table[0].middle_mac_addr,
6207 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6208
6209 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6210 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6211 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6212}
6213
a2fbb9ea
ET
6214static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6215 int *state_p, int poll)
6216{
6217 /* can take a while if any port is running */
34f80b04 6218 int cnt = 500;
a2fbb9ea 6219
c14423fe
ET
6220 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6221 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6222
6223 might_sleep();
34f80b04 6224 while (cnt--) {
a2fbb9ea
ET
6225 if (poll) {
6226 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6227 /* if index is different from 0
6228 * the reply for some commands will
3101c2bc 6229 * be on the non default queue
a2fbb9ea
ET
6230 */
6231 if (idx)
6232 bnx2x_rx_int(&bp->fp[idx], 10);
6233 }
a2fbb9ea 6234
3101c2bc 6235 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6236 if (*state_p == state)
a2fbb9ea
ET
6237 return 0;
6238
a2fbb9ea 6239 msleep(1);
a2fbb9ea
ET
6240 }
6241
a2fbb9ea 6242 /* timeout! */
49d66772
ET
6243 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6244 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6245#ifdef BNX2X_STOP_ON_ERROR
6246 bnx2x_panic();
6247#endif
a2fbb9ea 6248
49d66772 6249 return -EBUSY;
a2fbb9ea
ET
6250}
6251
6252static int bnx2x_setup_leading(struct bnx2x *bp)
6253{
34f80b04 6254 int rc;
a2fbb9ea 6255
c14423fe 6256 /* reset IGU state */
34f80b04 6257 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6258
6259 /* SETUP ramrod */
6260 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6261
34f80b04
EG
6262 /* Wait for completion */
6263 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6264
34f80b04 6265 return rc;
a2fbb9ea
ET
6266}
6267
6268static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6269{
a2fbb9ea 6270 /* reset IGU state */
34f80b04 6271 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6272
228241eb 6273 /* SETUP ramrod */
a2fbb9ea
ET
6274 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6275 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6276
6277 /* Wait for completion */
6278 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6279 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6280}
6281
a2fbb9ea
ET
6282static int bnx2x_poll(struct napi_struct *napi, int budget);
6283static void bnx2x_set_rx_mode(struct net_device *dev);
6284
34f80b04
EG
6285/* must be called with rtnl_lock */
6286static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6287{
228241eb 6288 u32 load_code;
34f80b04 6289 int i, rc;
34f80b04
EG
6290#ifdef BNX2X_STOP_ON_ERROR
6291 if (unlikely(bp->panic))
6292 return -EPERM;
6293#endif
a2fbb9ea
ET
6294
6295 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6296
34f80b04
EG
6297 /* Send LOAD_REQUEST command to MCP
6298 Returns the type of LOAD command:
6299 if it is the first port to be initialized
6300 common blocks should be initialized, otherwise - not
a2fbb9ea 6301 */
34f80b04 6302 if (!BP_NOMCP(bp)) {
228241eb
ET
6303 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6304 if (!load_code) {
da5a662a 6305 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6306 return -EBUSY;
6307 }
34f80b04 6308 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6309 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6310
a2fbb9ea 6311 } else {
da5a662a
VZ
6312 int port = BP_PORT(bp);
6313
34f80b04
EG
6314 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6315 load_count[0], load_count[1], load_count[2]);
6316 load_count[0]++;
da5a662a 6317 load_count[1 + port]++;
34f80b04
EG
6318 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6319 load_count[0], load_count[1], load_count[2]);
6320 if (load_count[0] == 1)
6321 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6322 else if (load_count[1 + port] == 1)
34f80b04
EG
6323 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6324 else
6325 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6326 }
6327
34f80b04
EG
6328 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6329 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6330 bp->port.pmf = 1;
6331 else
6332 bp->port.pmf = 0;
6333 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6334
6335 /* if we can't use MSI-X we only need one fp,
6336 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6337 * and fallback to inta with one fp
6338 */
34f80b04
EG
6339 if (use_inta) {
6340 bp->num_queues = 1;
6341
6342 } else {
6343 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6344 /* user requested number */
6345 bp->num_queues = use_multi;
6346
6347 else if (use_multi)
6348 bp->num_queues = min_t(u32, num_online_cpus(),
6349 BP_MAX_QUEUES(bp));
6350 else
a2fbb9ea 6351 bp->num_queues = 1;
34f80b04
EG
6352
6353 if (bnx2x_enable_msix(bp)) {
6354 /* failed to enable MSI-X */
6355 bp->num_queues = 1;
6356 if (use_multi)
6357 BNX2X_ERR("Multi requested but failed"
6358 " to enable MSI-X\n");
a2fbb9ea
ET
6359 }
6360 }
34f80b04
EG
6361 DP(NETIF_MSG_IFUP,
6362 "set number of queues to %d\n", bp->num_queues);
c14423fe 6363
a2fbb9ea
ET
6364 if (bnx2x_alloc_mem(bp))
6365 return -ENOMEM;
6366
7a9b2557
VZ
6367 for_each_queue(bp, i)
6368 bnx2x_fp(bp, i, disable_tpa) =
6369 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6370
34f80b04
EG
6371 if (bp->flags & USING_MSIX_FLAG) {
6372 rc = bnx2x_req_msix_irqs(bp);
6373 if (rc) {
6374 pci_disable_msix(bp->pdev);
6375 goto load_error;
6376 }
6377 } else {
6378 bnx2x_ack_int(bp);
6379 rc = bnx2x_req_irq(bp);
6380 if (rc) {
6381 BNX2X_ERR("IRQ request failed, aborting\n");
6382 goto load_error;
a2fbb9ea
ET
6383 }
6384 }
6385
6386 for_each_queue(bp, i)
6387 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6388 bnx2x_poll, 128);
6389
a2fbb9ea 6390 /* Initialize HW */
34f80b04
EG
6391 rc = bnx2x_init_hw(bp, load_code);
6392 if (rc) {
a2fbb9ea 6393 BNX2X_ERR("HW init failed, aborting\n");
d1014634 6394 goto load_int_disable;
a2fbb9ea
ET
6395 }
6396
a2fbb9ea 6397 /* Setup NIC internals and enable interrupts */
471de716 6398 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6399
6400 /* Send LOAD_DONE command to MCP */
34f80b04 6401 if (!BP_NOMCP(bp)) {
228241eb
ET
6402 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6403 if (!load_code) {
da5a662a 6404 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6405 rc = -EBUSY;
d1014634 6406 goto load_rings_free;
a2fbb9ea
ET
6407 }
6408 }
6409
bb2a0f7a
YG
6410 bnx2x_stats_init(bp);
6411
a2fbb9ea
ET
6412 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6413
6414 /* Enable Rx interrupt handling before sending the ramrod
6415 as it's completed on Rx FP queue */
65abd74d 6416 bnx2x_napi_enable(bp);
a2fbb9ea 6417
da5a662a
VZ
6418 /* Enable interrupt handling */
6419 atomic_set(&bp->intr_sem, 0);
6420
34f80b04
EG
6421 rc = bnx2x_setup_leading(bp);
6422 if (rc) {
da5a662a 6423 BNX2X_ERR("Setup leading failed!\n");
d1014634 6424 goto load_netif_stop;
34f80b04 6425 }
a2fbb9ea 6426
34f80b04
EG
6427 if (CHIP_IS_E1H(bp))
6428 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6429 BNX2X_ERR("!!! mf_cfg function disabled\n");
6430 bp->state = BNX2X_STATE_DISABLED;
6431 }
a2fbb9ea 6432
34f80b04
EG
6433 if (bp->state == BNX2X_STATE_OPEN)
6434 for_each_nondefault_queue(bp, i) {
6435 rc = bnx2x_setup_multi(bp, i);
6436 if (rc)
d1014634 6437 goto load_netif_stop;
34f80b04 6438 }
a2fbb9ea 6439
34f80b04 6440 if (CHIP_IS_E1(bp))
3101c2bc 6441 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6442 else
3101c2bc 6443 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6444
6445 if (bp->port.pmf)
6446 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6447
6448 /* Start fast path */
34f80b04
EG
6449 switch (load_mode) {
6450 case LOAD_NORMAL:
6451 /* Tx queue should be only reenabled */
6452 netif_wake_queue(bp->dev);
6453 bnx2x_set_rx_mode(bp->dev);
6454 break;
6455
6456 case LOAD_OPEN:
a2fbb9ea 6457 netif_start_queue(bp->dev);
34f80b04 6458 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6459 if (bp->flags & USING_MSIX_FLAG)
6460 printk(KERN_INFO PFX "%s: using MSI-X\n",
6461 bp->dev->name);
34f80b04 6462 break;
a2fbb9ea 6463
34f80b04 6464 case LOAD_DIAG:
a2fbb9ea 6465 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6466 bp->state = BNX2X_STATE_DIAG;
6467 break;
6468
6469 default:
6470 break;
a2fbb9ea
ET
6471 }
6472
34f80b04
EG
6473 if (!bp->port.pmf)
6474 bnx2x__link_status_update(bp);
6475
a2fbb9ea
ET
6476 /* start the timer */
6477 mod_timer(&bp->timer, jiffies + bp->current_interval);
6478
34f80b04 6479
a2fbb9ea
ET
6480 return 0;
6481
d1014634 6482load_netif_stop:
65abd74d 6483 bnx2x_napi_disable(bp);
d1014634 6484load_rings_free:
7a9b2557
VZ
6485 /* Free SKBs, SGEs, TPA pool and driver internals */
6486 bnx2x_free_skbs(bp);
6487 for_each_queue(bp, i)
3196a88a 6488 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d1014634 6489load_int_disable:
f8ef6e44 6490 bnx2x_int_disable_sync(bp, 1);
d1014634
YG
6491 /* Release IRQs */
6492 bnx2x_free_irq(bp);
228241eb 6493load_error:
a2fbb9ea 6494 bnx2x_free_mem(bp);
9a035440 6495 bp->port.pmf = 0;
a2fbb9ea
ET
6496
6497 /* TBD we really need to reset the chip
6498 if we want to recover from this */
34f80b04 6499 return rc;
a2fbb9ea
ET
6500}
6501
6502static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6503{
a2fbb9ea
ET
6504 int rc;
6505
c14423fe 6506 /* halt the connection */
a2fbb9ea 6507 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6508 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6509
34f80b04 6510 /* Wait for completion */
a2fbb9ea 6511 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6512 &(bp->fp[index].state), 1);
c14423fe 6513 if (rc) /* timeout */
a2fbb9ea
ET
6514 return rc;
6515
6516 /* delete cfc entry */
6517 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6518
34f80b04
EG
6519 /* Wait for completion */
6520 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6521 &(bp->fp[index].state), 1);
6522 return rc;
a2fbb9ea
ET
6523}
6524
da5a662a 6525static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6526{
49d66772 6527 u16 dsb_sp_prod_idx;
c14423fe 6528 /* if the other port is handling traffic,
a2fbb9ea 6529 this can take a lot of time */
34f80b04
EG
6530 int cnt = 500;
6531 int rc;
a2fbb9ea
ET
6532
6533 might_sleep();
6534
6535 /* Send HALT ramrod */
6536 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6537 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6538
34f80b04
EG
6539 /* Wait for completion */
6540 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6541 &(bp->fp[0].state), 1);
6542 if (rc) /* timeout */
da5a662a 6543 return rc;
a2fbb9ea 6544
49d66772 6545 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6546
228241eb 6547 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6548 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6549
49d66772 6550 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6551 we are going to reset the chip anyway
6552 so there is not much to do if this times out
6553 */
34f80b04 6554 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6555 if (!cnt) {
6556 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6557 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6558 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6559#ifdef BNX2X_STOP_ON_ERROR
6560 bnx2x_panic();
da5a662a
VZ
6561#else
6562 rc = -EBUSY;
34f80b04
EG
6563#endif
6564 break;
6565 }
6566 cnt--;
da5a662a 6567 msleep(1);
49d66772
ET
6568 }
6569 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6570 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6571
6572 return rc;
a2fbb9ea
ET
6573}
6574
34f80b04
EG
6575static void bnx2x_reset_func(struct bnx2x *bp)
6576{
6577 int port = BP_PORT(bp);
6578 int func = BP_FUNC(bp);
6579 int base, i;
6580
6581 /* Configure IGU */
6582 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6583 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6584
6585 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6586
6587 /* Clear ILT */
6588 base = FUNC_ILT_BASE(func);
6589 for (i = base; i < base + ILT_PER_FUNC; i++)
6590 bnx2x_ilt_wr(bp, i, 0);
6591}
6592
6593static void bnx2x_reset_port(struct bnx2x *bp)
6594{
6595 int port = BP_PORT(bp);
6596 u32 val;
6597
6598 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6599
6600 /* Do not rcv packets to BRB */
6601 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6602 /* Do not direct rcv packets that are not for MCP to the BRB */
6603 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6604 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6605
6606 /* Configure AEU */
6607 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6608
6609 msleep(100);
6610 /* Check for BRB port occupancy */
6611 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6612 if (val)
6613 DP(NETIF_MSG_IFDOWN,
33471629 6614 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6615
6616 /* TODO: Close Doorbell port? */
6617}
6618
6619static void bnx2x_reset_common(struct bnx2x *bp)
6620{
6621 /* reset_common */
6622 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6623 0xd3ffff7f);
6624 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6625}
6626
6627static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6628{
6629 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6630 BP_FUNC(bp), reset_code);
6631
6632 switch (reset_code) {
6633 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6634 bnx2x_reset_port(bp);
6635 bnx2x_reset_func(bp);
6636 bnx2x_reset_common(bp);
6637 break;
6638
6639 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6640 bnx2x_reset_port(bp);
6641 bnx2x_reset_func(bp);
6642 break;
6643
6644 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6645 bnx2x_reset_func(bp);
6646 break;
49d66772 6647
34f80b04
EG
6648 default:
6649 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6650 break;
6651 }
6652}
6653
33471629 6654/* must be called with rtnl_lock */
34f80b04 6655static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6656{
da5a662a 6657 int port = BP_PORT(bp);
a2fbb9ea 6658 u32 reset_code = 0;
da5a662a 6659 int i, cnt, rc;
a2fbb9ea
ET
6660
6661 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6662
228241eb
ET
6663 bp->rx_mode = BNX2X_RX_MODE_NONE;
6664 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6665
f8ef6e44 6666 bnx2x_netif_stop(bp, 1);
65abd74d
YG
6667 if (!netif_running(bp->dev))
6668 bnx2x_napi_disable(bp);
34f80b04
EG
6669 del_timer_sync(&bp->timer);
6670 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6671 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6672 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6673
da5a662a 6674 /* Wait until tx fast path tasks complete */
228241eb
ET
6675 for_each_queue(bp, i) {
6676 struct bnx2x_fastpath *fp = &bp->fp[i];
6677
34f80b04
EG
6678 cnt = 1000;
6679 smp_rmb();
da5a662a
VZ
6680 while (BNX2X_HAS_TX_WORK(fp)) {
6681
65abd74d 6682 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6683 if (!cnt) {
6684 BNX2X_ERR("timeout waiting for queue[%d]\n",
6685 i);
6686#ifdef BNX2X_STOP_ON_ERROR
6687 bnx2x_panic();
6688 return -EBUSY;
6689#else
6690 break;
6691#endif
6692 }
6693 cnt--;
da5a662a 6694 msleep(1);
34f80b04
EG
6695 smp_rmb();
6696 }
228241eb 6697 }
da5a662a
VZ
6698 /* Give HW time to discard old tx messages */
6699 msleep(1);
a2fbb9ea 6700
34f80b04
EG
6701 /* Release IRQs */
6702 bnx2x_free_irq(bp);
6703
3101c2bc
YG
6704 if (CHIP_IS_E1(bp)) {
6705 struct mac_configuration_cmd *config =
6706 bnx2x_sp(bp, mcast_config);
6707
6708 bnx2x_set_mac_addr_e1(bp, 0);
6709
6710 for (i = 0; i < config->hdr.length_6b; i++)
6711 CAM_INVALIDATE(config->config_table[i]);
6712
6713 config->hdr.length_6b = i;
6714 if (CHIP_REV_IS_SLOW(bp))
6715 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6716 else
6717 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6718 config->hdr.client_id = BP_CL_ID(bp);
6719 config->hdr.reserved1 = 0;
6720
6721 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6722 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6723 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6724
6725 } else { /* E1H */
65abd74d
YG
6726 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6727
3101c2bc
YG
6728 bnx2x_set_mac_addr_e1h(bp, 0);
6729
6730 for (i = 0; i < MC_HASH_SIZE; i++)
6731 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6732 }
6733
65abd74d
YG
6734 if (unload_mode == UNLOAD_NORMAL)
6735 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6736
6737 else if (bp->flags & NO_WOL_FLAG) {
6738 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6739 if (CHIP_IS_E1H(bp))
6740 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6741
6742 } else if (bp->wol) {
6743 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6744 u8 *mac_addr = bp->dev->dev_addr;
6745 u32 val;
6746 /* The mac address is written to entries 1-4 to
6747 preserve entry 0 which is used by the PMF */
6748 u8 entry = (BP_E1HVN(bp) + 1)*8;
6749
6750 val = (mac_addr[0] << 8) | mac_addr[1];
6751 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6752
6753 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6754 (mac_addr[4] << 8) | mac_addr[5];
6755 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6756
6757 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6758
6759 } else
6760 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6761
34f80b04
EG
6762 /* Close multi and leading connections
6763 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6764 for_each_nondefault_queue(bp, i)
6765 if (bnx2x_stop_multi(bp, i))
228241eb 6766 goto unload_error;
a2fbb9ea 6767
da5a662a
VZ
6768 rc = bnx2x_stop_leading(bp);
6769 if (rc) {
34f80b04 6770 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6771#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6772 return -EBUSY;
da5a662a
VZ
6773#else
6774 goto unload_error;
34f80b04 6775#endif
228241eb
ET
6776 }
6777
6778unload_error:
34f80b04 6779 if (!BP_NOMCP(bp))
228241eb 6780 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6781 else {
6782 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6783 load_count[0], load_count[1], load_count[2]);
6784 load_count[0]--;
da5a662a 6785 load_count[1 + port]--;
34f80b04
EG
6786 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6787 load_count[0], load_count[1], load_count[2]);
6788 if (load_count[0] == 0)
6789 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6790 else if (load_count[1 + port] == 0)
34f80b04
EG
6791 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6792 else
6793 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6794 }
a2fbb9ea 6795
34f80b04
EG
6796 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6797 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6798 bnx2x__link_reset(bp);
a2fbb9ea
ET
6799
6800 /* Reset the chip */
228241eb 6801 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6802
6803 /* Report UNLOAD_DONE to MCP */
34f80b04 6804 if (!BP_NOMCP(bp))
a2fbb9ea 6805 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6806 bp->port.pmf = 0;
a2fbb9ea 6807
7a9b2557 6808 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6809 bnx2x_free_skbs(bp);
7a9b2557 6810 for_each_queue(bp, i)
3196a88a 6811 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
a2fbb9ea
ET
6812 bnx2x_free_mem(bp);
6813
6814 bp->state = BNX2X_STATE_CLOSED;
228241eb 6815
a2fbb9ea
ET
6816 netif_carrier_off(bp->dev);
6817
6818 return 0;
6819}
6820
34f80b04
EG
6821static void bnx2x_reset_task(struct work_struct *work)
6822{
6823 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6824
6825#ifdef BNX2X_STOP_ON_ERROR
6826 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6827 " so reset not done to allow debug dump,\n"
6828 KERN_ERR " you will need to reboot when done\n");
6829 return;
6830#endif
6831
6832 rtnl_lock();
6833
6834 if (!netif_running(bp->dev))
6835 goto reset_task_exit;
6836
6837 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6838 bnx2x_nic_load(bp, LOAD_NORMAL);
6839
6840reset_task_exit:
6841 rtnl_unlock();
6842}
6843
a2fbb9ea
ET
6844/* end of nic load/unload */
6845
6846/* ethtool_ops */
6847
6848/*
6849 * Init service functions
6850 */
6851
34f80b04
EG
6852static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6853{
6854 u32 val;
6855
6856 /* Check if there is any driver already loaded */
6857 val = REG_RD(bp, MISC_REG_UNPREPARED);
6858 if (val == 0x1) {
6859 /* Check if it is the UNDI driver
6860 * UNDI driver initializes CID offset for normal bell to 0x7
6861 */
4a37fb66 6862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04 6863 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
76b190c5
EG
6864 if (val == 0x7)
6865 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6866 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6867
34f80b04
EG
6868 if (val == 0x7) {
6869 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6870 /* save our func */
34f80b04 6871 int func = BP_FUNC(bp);
da5a662a
VZ
6872 u32 swap_en;
6873 u32 swap_val;
34f80b04
EG
6874
6875 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6876
6877 /* try unload UNDI on port 0 */
6878 bp->func = 0;
da5a662a
VZ
6879 bp->fw_seq =
6880 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6881 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6882 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6883
6884 /* if UNDI is loaded on the other port */
6885 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6886
da5a662a
VZ
6887 /* send "DONE" for previous unload */
6888 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6889
6890 /* unload UNDI on port 1 */
34f80b04 6891 bp->func = 1;
da5a662a
VZ
6892 bp->fw_seq =
6893 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6894 DRV_MSG_SEQ_NUMBER_MASK);
6895 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6896
6897 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6898 }
6899
da5a662a
VZ
6900 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6901 HC_REG_CONFIG_0), 0x1000);
6902
6903 /* close input traffic and wait for it */
6904 /* Do not rcv packets to BRB */
6905 REG_WR(bp,
6906 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6907 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6908 /* Do not direct rcv packets that are not for MCP to
6909 * the BRB */
6910 REG_WR(bp,
6911 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6912 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6913 /* clear AEU */
6914 REG_WR(bp,
6915 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6916 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6917 msleep(10);
6918
6919 /* save NIG port swap info */
6920 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6921 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6922 /* reset device */
6923 REG_WR(bp,
6924 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6925 0xd3ffffff);
34f80b04
EG
6926 REG_WR(bp,
6927 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6928 0x1403);
da5a662a
VZ
6929 /* take the NIG out of reset and restore swap values */
6930 REG_WR(bp,
6931 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6932 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6933 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6934 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6935
6936 /* send unload done to the MCP */
6937 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6938
6939 /* restore our func and fw_seq */
6940 bp->func = func;
6941 bp->fw_seq =
6942 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6943 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
6944 }
6945 }
6946}
6947
6948static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6949{
6950 u32 val, val2, val3, val4, id;
72ce58c3 6951 u16 pmc;
34f80b04
EG
6952
6953 /* Get the chip revision id and number. */
6954 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6955 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6956 id = ((val & 0xffff) << 16);
6957 val = REG_RD(bp, MISC_REG_CHIP_REV);
6958 id |= ((val & 0xf) << 12);
6959 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6960 id |= ((val & 0xff) << 4);
6961 REG_RD(bp, MISC_REG_BOND_ID);
6962 id |= (val & 0xf);
6963 bp->common.chip_id = id;
6964 bp->link_params.chip_id = bp->common.chip_id;
6965 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6966
6967 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6968 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6969 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6970 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6971 bp->common.flash_size, bp->common.flash_size);
6972
6973 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6974 bp->link_params.shmem_base = bp->common.shmem_base;
6975 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6976
6977 if (!bp->common.shmem_base ||
6978 (bp->common.shmem_base < 0xA0000) ||
6979 (bp->common.shmem_base >= 0xC0000)) {
6980 BNX2X_DEV_INFO("MCP not active\n");
6981 bp->flags |= NO_MCP_FLAG;
6982 return;
6983 }
6984
6985 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6986 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6987 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6988 BNX2X_ERR("BAD MCP validity signature\n");
6989
6990 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6991 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6992
6993 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6994 bp->common.hw_config, bp->common.board);
6995
6996 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6997 SHARED_HW_CFG_LED_MODE_MASK) >>
6998 SHARED_HW_CFG_LED_MODE_SHIFT);
6999
7000 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7001 bp->common.bc_ver = val;
7002 BNX2X_DEV_INFO("bc_ver %X\n", val);
7003 if (val < BNX2X_BC_VER) {
7004 /* for now only warn
7005 * later we might need to enforce this */
7006 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7007 " please upgrade BC\n", BNX2X_BC_VER, val);
7008 }
72ce58c3
EG
7009
7010 if (BP_E1HVN(bp) == 0) {
7011 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7012 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7013 } else {
7014 /* no WOL capability for E1HVN != 0 */
7015 bp->flags |= NO_WOL_FLAG;
7016 }
7017 BNX2X_DEV_INFO("%sWoL capable\n",
7018 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7019
7020 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7021 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7022 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7023 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7024
7025 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7026 val, val2, val3, val4);
7027}
7028
7029static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7030 u32 switch_cfg)
a2fbb9ea 7031{
34f80b04 7032 int port = BP_PORT(bp);
a2fbb9ea
ET
7033 u32 ext_phy_type;
7034
a2fbb9ea
ET
7035 switch (switch_cfg) {
7036 case SWITCH_CFG_1G:
7037 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7038
c18487ee
YR
7039 ext_phy_type =
7040 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7041 switch (ext_phy_type) {
7042 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7043 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7044 ext_phy_type);
7045
34f80b04
EG
7046 bp->port.supported |= (SUPPORTED_10baseT_Half |
7047 SUPPORTED_10baseT_Full |
7048 SUPPORTED_100baseT_Half |
7049 SUPPORTED_100baseT_Full |
7050 SUPPORTED_1000baseT_Full |
7051 SUPPORTED_2500baseX_Full |
7052 SUPPORTED_TP |
7053 SUPPORTED_FIBRE |
7054 SUPPORTED_Autoneg |
7055 SUPPORTED_Pause |
7056 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7057 break;
7058
7059 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7060 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7061 ext_phy_type);
7062
34f80b04
EG
7063 bp->port.supported |= (SUPPORTED_10baseT_Half |
7064 SUPPORTED_10baseT_Full |
7065 SUPPORTED_100baseT_Half |
7066 SUPPORTED_100baseT_Full |
7067 SUPPORTED_1000baseT_Full |
7068 SUPPORTED_TP |
7069 SUPPORTED_FIBRE |
7070 SUPPORTED_Autoneg |
7071 SUPPORTED_Pause |
7072 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7073 break;
7074
7075 default:
7076 BNX2X_ERR("NVRAM config error. "
7077 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7078 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7079 return;
7080 }
7081
34f80b04
EG
7082 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7083 port*0x10);
7084 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7085 break;
7086
7087 case SWITCH_CFG_10G:
7088 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7089
c18487ee
YR
7090 ext_phy_type =
7091 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7092 switch (ext_phy_type) {
7093 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7094 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7095 ext_phy_type);
7096
34f80b04
EG
7097 bp->port.supported |= (SUPPORTED_10baseT_Half |
7098 SUPPORTED_10baseT_Full |
7099 SUPPORTED_100baseT_Half |
7100 SUPPORTED_100baseT_Full |
7101 SUPPORTED_1000baseT_Full |
7102 SUPPORTED_2500baseX_Full |
7103 SUPPORTED_10000baseT_Full |
7104 SUPPORTED_TP |
7105 SUPPORTED_FIBRE |
7106 SUPPORTED_Autoneg |
7107 SUPPORTED_Pause |
7108 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7109 break;
7110
7111 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7112 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7113 ext_phy_type);
f1410647 7114
34f80b04
EG
7115 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7116 SUPPORTED_FIBRE |
7117 SUPPORTED_Pause |
7118 SUPPORTED_Asym_Pause);
f1410647
ET
7119 break;
7120
a2fbb9ea 7121 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7122 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7123 ext_phy_type);
7124
34f80b04
EG
7125 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7126 SUPPORTED_1000baseT_Full |
7127 SUPPORTED_FIBRE |
7128 SUPPORTED_Pause |
7129 SUPPORTED_Asym_Pause);
f1410647
ET
7130 break;
7131
7132 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7133 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7134 ext_phy_type);
7135
34f80b04
EG
7136 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7137 SUPPORTED_1000baseT_Full |
7138 SUPPORTED_FIBRE |
7139 SUPPORTED_Autoneg |
7140 SUPPORTED_Pause |
7141 SUPPORTED_Asym_Pause);
f1410647
ET
7142 break;
7143
c18487ee
YR
7144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7145 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7146 ext_phy_type);
7147
34f80b04
EG
7148 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7149 SUPPORTED_2500baseX_Full |
7150 SUPPORTED_1000baseT_Full |
7151 SUPPORTED_FIBRE |
7152 SUPPORTED_Autoneg |
7153 SUPPORTED_Pause |
7154 SUPPORTED_Asym_Pause);
c18487ee
YR
7155 break;
7156
f1410647
ET
7157 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7158 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7159 ext_phy_type);
7160
34f80b04
EG
7161 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7162 SUPPORTED_TP |
7163 SUPPORTED_Autoneg |
7164 SUPPORTED_Pause |
7165 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7166 break;
7167
c18487ee
YR
7168 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7169 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7170 bp->link_params.ext_phy_config);
7171 break;
7172
a2fbb9ea
ET
7173 default:
7174 BNX2X_ERR("NVRAM config error. "
7175 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7176 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7177 return;
7178 }
7179
34f80b04
EG
7180 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7181 port*0x18);
7182 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7183
a2fbb9ea
ET
7184 break;
7185
7186 default:
7187 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7188 bp->port.link_config);
a2fbb9ea
ET
7189 return;
7190 }
34f80b04 7191 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7192
7193 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7194 if (!(bp->link_params.speed_cap_mask &
7195 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7196 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7197
c18487ee
YR
7198 if (!(bp->link_params.speed_cap_mask &
7199 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7200 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7201
c18487ee
YR
7202 if (!(bp->link_params.speed_cap_mask &
7203 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7204 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7205
c18487ee
YR
7206 if (!(bp->link_params.speed_cap_mask &
7207 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7208 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7209
c18487ee
YR
7210 if (!(bp->link_params.speed_cap_mask &
7211 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7212 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7213 SUPPORTED_1000baseT_Full);
a2fbb9ea 7214
c18487ee
YR
7215 if (!(bp->link_params.speed_cap_mask &
7216 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7217 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7218
c18487ee
YR
7219 if (!(bp->link_params.speed_cap_mask &
7220 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7221 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7222
34f80b04 7223 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7224}
7225
34f80b04 7226static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7227{
c18487ee 7228 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7229
34f80b04 7230 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7231 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7232 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7233 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7234 bp->port.advertising = bp->port.supported;
a2fbb9ea 7235 } else {
c18487ee
YR
7236 u32 ext_phy_type =
7237 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7238
7239 if ((ext_phy_type ==
7240 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7241 (ext_phy_type ==
7242 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7243 /* force 10G, no AN */
c18487ee 7244 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7245 bp->port.advertising =
a2fbb9ea
ET
7246 (ADVERTISED_10000baseT_Full |
7247 ADVERTISED_FIBRE);
7248 break;
7249 }
7250 BNX2X_ERR("NVRAM config error. "
7251 "Invalid link_config 0x%x"
7252 " Autoneg not supported\n",
34f80b04 7253 bp->port.link_config);
a2fbb9ea
ET
7254 return;
7255 }
7256 break;
7257
7258 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7259 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7260 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7261 bp->port.advertising = (ADVERTISED_10baseT_Full |
7262 ADVERTISED_TP);
a2fbb9ea
ET
7263 } else {
7264 BNX2X_ERR("NVRAM config error. "
7265 "Invalid link_config 0x%x"
7266 " speed_cap_mask 0x%x\n",
34f80b04 7267 bp->port.link_config,
c18487ee 7268 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7269 return;
7270 }
7271 break;
7272
7273 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7274 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7275 bp->link_params.req_line_speed = SPEED_10;
7276 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7277 bp->port.advertising = (ADVERTISED_10baseT_Half |
7278 ADVERTISED_TP);
a2fbb9ea
ET
7279 } else {
7280 BNX2X_ERR("NVRAM config error. "
7281 "Invalid link_config 0x%x"
7282 " speed_cap_mask 0x%x\n",
34f80b04 7283 bp->port.link_config,
c18487ee 7284 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7285 return;
7286 }
7287 break;
7288
7289 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7290 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7291 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7292 bp->port.advertising = (ADVERTISED_100baseT_Full |
7293 ADVERTISED_TP);
a2fbb9ea
ET
7294 } else {
7295 BNX2X_ERR("NVRAM config error. "
7296 "Invalid link_config 0x%x"
7297 " speed_cap_mask 0x%x\n",
34f80b04 7298 bp->port.link_config,
c18487ee 7299 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7300 return;
7301 }
7302 break;
7303
7304 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7305 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7306 bp->link_params.req_line_speed = SPEED_100;
7307 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7308 bp->port.advertising = (ADVERTISED_100baseT_Half |
7309 ADVERTISED_TP);
a2fbb9ea
ET
7310 } else {
7311 BNX2X_ERR("NVRAM config error. "
7312 "Invalid link_config 0x%x"
7313 " speed_cap_mask 0x%x\n",
34f80b04 7314 bp->port.link_config,
c18487ee 7315 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7316 return;
7317 }
7318 break;
7319
7320 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7321 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7322 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7323 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7324 ADVERTISED_TP);
a2fbb9ea
ET
7325 } else {
7326 BNX2X_ERR("NVRAM config error. "
7327 "Invalid link_config 0x%x"
7328 " speed_cap_mask 0x%x\n",
34f80b04 7329 bp->port.link_config,
c18487ee 7330 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7331 return;
7332 }
7333 break;
7334
7335 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7336 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7337 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7338 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7339 ADVERTISED_TP);
a2fbb9ea
ET
7340 } else {
7341 BNX2X_ERR("NVRAM config error. "
7342 "Invalid link_config 0x%x"
7343 " speed_cap_mask 0x%x\n",
34f80b04 7344 bp->port.link_config,
c18487ee 7345 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7346 return;
7347 }
7348 break;
7349
7350 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7351 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7352 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7353 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7354 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7355 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7356 ADVERTISED_FIBRE);
a2fbb9ea
ET
7357 } else {
7358 BNX2X_ERR("NVRAM config error. "
7359 "Invalid link_config 0x%x"
7360 " speed_cap_mask 0x%x\n",
34f80b04 7361 bp->port.link_config,
c18487ee 7362 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7363 return;
7364 }
7365 break;
7366
7367 default:
7368 BNX2X_ERR("NVRAM config error. "
7369 "BAD link speed link_config 0x%x\n",
34f80b04 7370 bp->port.link_config);
c18487ee 7371 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7372 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7373 break;
7374 }
a2fbb9ea 7375
34f80b04
EG
7376 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7377 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7378 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7379 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7380 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7381
c18487ee 7382 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7383 " advertising 0x%x\n",
c18487ee
YR
7384 bp->link_params.req_line_speed,
7385 bp->link_params.req_duplex,
34f80b04 7386 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7387}
7388
34f80b04 7389static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7390{
34f80b04
EG
7391 int port = BP_PORT(bp);
7392 u32 val, val2;
a2fbb9ea 7393
c18487ee 7394 bp->link_params.bp = bp;
34f80b04 7395 bp->link_params.port = port;
c18487ee 7396
c18487ee 7397 bp->link_params.serdes_config =
f1410647 7398 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7399 bp->link_params.lane_config =
a2fbb9ea 7400 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7401 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7402 SHMEM_RD(bp,
7403 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7404 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7405 SHMEM_RD(bp,
7406 dev_info.port_hw_config[port].speed_capability_mask);
7407
34f80b04 7408 bp->port.link_config =
a2fbb9ea
ET
7409 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7410
34f80b04
EG
7411 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7412 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7413 " link_config 0x%08x\n",
c18487ee
YR
7414 bp->link_params.serdes_config,
7415 bp->link_params.lane_config,
7416 bp->link_params.ext_phy_config,
34f80b04 7417 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7418
34f80b04 7419 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7420 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7421 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7422
7423 bnx2x_link_settings_requested(bp);
7424
7425 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7426 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7427 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7428 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7429 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7430 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7431 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7432 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7433 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7434 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7435}
7436
7437static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7438{
7439 int func = BP_FUNC(bp);
7440 u32 val, val2;
7441 int rc = 0;
a2fbb9ea 7442
34f80b04 7443 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7444
34f80b04
EG
7445 bp->e1hov = 0;
7446 bp->e1hmf = 0;
7447 if (CHIP_IS_E1H(bp)) {
7448 bp->mf_config =
7449 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7450
3196a88a
EG
7451 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7452 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7453 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7454
34f80b04
EG
7455 bp->e1hov = val;
7456 bp->e1hmf = 1;
7457 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7458 "(0x%04x)\n",
7459 func, bp->e1hov, bp->e1hov);
7460 } else {
7461 BNX2X_DEV_INFO("Single function mode\n");
7462 if (BP_E1HVN(bp)) {
7463 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7464 " aborting\n", func);
7465 rc = -EPERM;
7466 }
7467 }
7468 }
a2fbb9ea 7469
34f80b04
EG
7470 if (!BP_NOMCP(bp)) {
7471 bnx2x_get_port_hwinfo(bp);
7472
7473 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7474 DRV_MSG_SEQ_NUMBER_MASK);
7475 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7476 }
7477
7478 if (IS_E1HMF(bp)) {
7479 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7480 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7481 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7482 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7483 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7484 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7485 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7486 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7487 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7488 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7489 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7490 ETH_ALEN);
7491 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7492 ETH_ALEN);
a2fbb9ea 7493 }
34f80b04
EG
7494
7495 return rc;
a2fbb9ea
ET
7496 }
7497
34f80b04
EG
7498 if (BP_NOMCP(bp)) {
7499 /* only supposed to happen on emulation/FPGA */
33471629 7500 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7501 random_ether_addr(bp->dev->dev_addr);
7502 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7503 }
a2fbb9ea 7504
34f80b04
EG
7505 return rc;
7506}
7507
7508static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7509{
7510 int func = BP_FUNC(bp);
7511 int rc;
7512
da5a662a
VZ
7513 /* Disable interrupt handling until HW is initialized */
7514 atomic_set(&bp->intr_sem, 1);
7515
34f80b04 7516 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7517
1cf167f2 7518 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7519 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7520
7521 rc = bnx2x_get_hwinfo(bp);
7522
7523 /* need to reset chip if undi was active */
7524 if (!BP_NOMCP(bp))
7525 bnx2x_undi_unload(bp);
7526
7527 if (CHIP_REV_IS_FPGA(bp))
7528 printk(KERN_ERR PFX "FPGA detected\n");
7529
7530 if (BP_NOMCP(bp) && (func == 0))
7531 printk(KERN_ERR PFX
7532 "MCP disabled, must load devices in order!\n");
7533
7a9b2557
VZ
7534 /* Set TPA flags */
7535 if (disable_tpa) {
7536 bp->flags &= ~TPA_ENABLE_FLAG;
7537 bp->dev->features &= ~NETIF_F_LRO;
7538 } else {
7539 bp->flags |= TPA_ENABLE_FLAG;
7540 bp->dev->features |= NETIF_F_LRO;
7541 }
7542
7543
34f80b04
EG
7544 bp->tx_ring_size = MAX_TX_AVAIL;
7545 bp->rx_ring_size = MAX_RX_AVAIL;
7546
7547 bp->rx_csum = 1;
7548 bp->rx_offset = 0;
7549
7550 bp->tx_ticks = 50;
7551 bp->rx_ticks = 25;
7552
34f80b04
EG
7553 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7554 bp->current_interval = (poll ? poll : bp->timer_interval);
7555
7556 init_timer(&bp->timer);
7557 bp->timer.expires = jiffies + bp->current_interval;
7558 bp->timer.data = (unsigned long) bp;
7559 bp->timer.function = bnx2x_timer;
7560
7561 return rc;
a2fbb9ea
ET
7562}
7563
7564/*
7565 * ethtool service functions
7566 */
7567
7568/* All ethtool functions called with rtnl_lock */
7569
7570static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7571{
7572 struct bnx2x *bp = netdev_priv(dev);
7573
34f80b04
EG
7574 cmd->supported = bp->port.supported;
7575 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7576
7577 if (netif_carrier_ok(dev)) {
c18487ee
YR
7578 cmd->speed = bp->link_vars.line_speed;
7579 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7580 } else {
c18487ee
YR
7581 cmd->speed = bp->link_params.req_line_speed;
7582 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7583 }
34f80b04
EG
7584 if (IS_E1HMF(bp)) {
7585 u16 vn_max_rate;
7586
7587 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7588 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7589 if (vn_max_rate < cmd->speed)
7590 cmd->speed = vn_max_rate;
7591 }
a2fbb9ea 7592
c18487ee
YR
7593 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7594 u32 ext_phy_type =
7595 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7596
7597 switch (ext_phy_type) {
7598 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7599 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7600 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7602 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7603 cmd->port = PORT_FIBRE;
7604 break;
7605
7606 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7607 cmd->port = PORT_TP;
7608 break;
7609
c18487ee
YR
7610 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7611 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7612 bp->link_params.ext_phy_config);
7613 break;
7614
f1410647
ET
7615 default:
7616 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7617 bp->link_params.ext_phy_config);
7618 break;
f1410647
ET
7619 }
7620 } else
a2fbb9ea 7621 cmd->port = PORT_TP;
a2fbb9ea 7622
34f80b04 7623 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7624 cmd->transceiver = XCVR_INTERNAL;
7625
c18487ee 7626 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7627 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7628 else
a2fbb9ea 7629 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7630
7631 cmd->maxtxpkt = 0;
7632 cmd->maxrxpkt = 0;
7633
7634 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7635 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7636 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7637 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7638 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7639 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7640 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7641
7642 return 0;
7643}
7644
7645static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7646{
7647 struct bnx2x *bp = netdev_priv(dev);
7648 u32 advertising;
7649
34f80b04
EG
7650 if (IS_E1HMF(bp))
7651 return 0;
7652
a2fbb9ea
ET
7653 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7654 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7655 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7656 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7657 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7658 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7659 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7660
a2fbb9ea 7661 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7662 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7663 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7664 return -EINVAL;
f1410647 7665 }
a2fbb9ea
ET
7666
7667 /* advertise the requested speed and duplex if supported */
34f80b04 7668 cmd->advertising &= bp->port.supported;
a2fbb9ea 7669
c18487ee
YR
7670 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7671 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7672 bp->port.advertising |= (ADVERTISED_Autoneg |
7673 cmd->advertising);
a2fbb9ea
ET
7674
7675 } else { /* forced speed */
7676 /* advertise the requested speed and duplex if supported */
7677 switch (cmd->speed) {
7678 case SPEED_10:
7679 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7680 if (!(bp->port.supported &
f1410647
ET
7681 SUPPORTED_10baseT_Full)) {
7682 DP(NETIF_MSG_LINK,
7683 "10M full not supported\n");
a2fbb9ea 7684 return -EINVAL;
f1410647 7685 }
a2fbb9ea
ET
7686
7687 advertising = (ADVERTISED_10baseT_Full |
7688 ADVERTISED_TP);
7689 } else {
34f80b04 7690 if (!(bp->port.supported &
f1410647
ET
7691 SUPPORTED_10baseT_Half)) {
7692 DP(NETIF_MSG_LINK,
7693 "10M half not supported\n");
a2fbb9ea 7694 return -EINVAL;
f1410647 7695 }
a2fbb9ea
ET
7696
7697 advertising = (ADVERTISED_10baseT_Half |
7698 ADVERTISED_TP);
7699 }
7700 break;
7701
7702 case SPEED_100:
7703 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7704 if (!(bp->port.supported &
f1410647
ET
7705 SUPPORTED_100baseT_Full)) {
7706 DP(NETIF_MSG_LINK,
7707 "100M full not supported\n");
a2fbb9ea 7708 return -EINVAL;
f1410647 7709 }
a2fbb9ea
ET
7710
7711 advertising = (ADVERTISED_100baseT_Full |
7712 ADVERTISED_TP);
7713 } else {
34f80b04 7714 if (!(bp->port.supported &
f1410647
ET
7715 SUPPORTED_100baseT_Half)) {
7716 DP(NETIF_MSG_LINK,
7717 "100M half not supported\n");
a2fbb9ea 7718 return -EINVAL;
f1410647 7719 }
a2fbb9ea
ET
7720
7721 advertising = (ADVERTISED_100baseT_Half |
7722 ADVERTISED_TP);
7723 }
7724 break;
7725
7726 case SPEED_1000:
f1410647
ET
7727 if (cmd->duplex != DUPLEX_FULL) {
7728 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7729 return -EINVAL;
f1410647 7730 }
a2fbb9ea 7731
34f80b04 7732 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7733 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7734 return -EINVAL;
f1410647 7735 }
a2fbb9ea
ET
7736
7737 advertising = (ADVERTISED_1000baseT_Full |
7738 ADVERTISED_TP);
7739 break;
7740
7741 case SPEED_2500:
f1410647
ET
7742 if (cmd->duplex != DUPLEX_FULL) {
7743 DP(NETIF_MSG_LINK,
7744 "2.5G half not supported\n");
a2fbb9ea 7745 return -EINVAL;
f1410647 7746 }
a2fbb9ea 7747
34f80b04 7748 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7749 DP(NETIF_MSG_LINK,
7750 "2.5G full not supported\n");
a2fbb9ea 7751 return -EINVAL;
f1410647 7752 }
a2fbb9ea 7753
f1410647 7754 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7755 ADVERTISED_TP);
7756 break;
7757
7758 case SPEED_10000:
f1410647
ET
7759 if (cmd->duplex != DUPLEX_FULL) {
7760 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7761 return -EINVAL;
f1410647 7762 }
a2fbb9ea 7763
34f80b04 7764 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7765 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7766 return -EINVAL;
f1410647 7767 }
a2fbb9ea
ET
7768
7769 advertising = (ADVERTISED_10000baseT_Full |
7770 ADVERTISED_FIBRE);
7771 break;
7772
7773 default:
f1410647 7774 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7775 return -EINVAL;
7776 }
7777
c18487ee
YR
7778 bp->link_params.req_line_speed = cmd->speed;
7779 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7780 bp->port.advertising = advertising;
a2fbb9ea
ET
7781 }
7782
c18487ee 7783 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7784 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7785 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7786 bp->port.advertising);
a2fbb9ea 7787
34f80b04 7788 if (netif_running(dev)) {
bb2a0f7a 7789 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7790 bnx2x_link_set(bp);
7791 }
a2fbb9ea
ET
7792
7793 return 0;
7794}
7795
c18487ee
YR
7796#define PHY_FW_VER_LEN 10
7797
a2fbb9ea
ET
7798static void bnx2x_get_drvinfo(struct net_device *dev,
7799 struct ethtool_drvinfo *info)
7800{
7801 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7802 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7803
7804 strcpy(info->driver, DRV_MODULE_NAME);
7805 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7806
7807 phy_fw_ver[0] = '\0';
34f80b04 7808 if (bp->port.pmf) {
4a37fb66 7809 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7810 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7811 (bp->state != BNX2X_STATE_CLOSED),
7812 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7813 bnx2x_release_phy_lock(bp);
34f80b04 7814 }
c18487ee 7815
f0e53a84
EG
7816 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7817 (bp->common.bc_ver & 0xff0000) >> 16,
7818 (bp->common.bc_ver & 0xff00) >> 8,
7819 (bp->common.bc_ver & 0xff),
7820 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7821 strcpy(info->bus_info, pci_name(bp->pdev));
7822 info->n_stats = BNX2X_NUM_STATS;
7823 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7824 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7825 info->regdump_len = 0;
7826}
7827
7828static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7829{
7830 struct bnx2x *bp = netdev_priv(dev);
7831
7832 if (bp->flags & NO_WOL_FLAG) {
7833 wol->supported = 0;
7834 wol->wolopts = 0;
7835 } else {
7836 wol->supported = WAKE_MAGIC;
7837 if (bp->wol)
7838 wol->wolopts = WAKE_MAGIC;
7839 else
7840 wol->wolopts = 0;
7841 }
7842 memset(&wol->sopass, 0, sizeof(wol->sopass));
7843}
7844
7845static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7846{
7847 struct bnx2x *bp = netdev_priv(dev);
7848
7849 if (wol->wolopts & ~WAKE_MAGIC)
7850 return -EINVAL;
7851
7852 if (wol->wolopts & WAKE_MAGIC) {
7853 if (bp->flags & NO_WOL_FLAG)
7854 return -EINVAL;
7855
7856 bp->wol = 1;
34f80b04 7857 } else
a2fbb9ea 7858 bp->wol = 0;
34f80b04 7859
a2fbb9ea
ET
7860 return 0;
7861}
7862
7863static u32 bnx2x_get_msglevel(struct net_device *dev)
7864{
7865 struct bnx2x *bp = netdev_priv(dev);
7866
7867 return bp->msglevel;
7868}
7869
7870static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7871{
7872 struct bnx2x *bp = netdev_priv(dev);
7873
7874 if (capable(CAP_NET_ADMIN))
7875 bp->msglevel = level;
7876}
7877
7878static int bnx2x_nway_reset(struct net_device *dev)
7879{
7880 struct bnx2x *bp = netdev_priv(dev);
7881
34f80b04
EG
7882 if (!bp->port.pmf)
7883 return 0;
a2fbb9ea 7884
34f80b04 7885 if (netif_running(dev)) {
bb2a0f7a 7886 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7887 bnx2x_link_set(bp);
7888 }
a2fbb9ea
ET
7889
7890 return 0;
7891}
7892
7893static int bnx2x_get_eeprom_len(struct net_device *dev)
7894{
7895 struct bnx2x *bp = netdev_priv(dev);
7896
34f80b04 7897 return bp->common.flash_size;
a2fbb9ea
ET
7898}
7899
7900static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7901{
34f80b04 7902 int port = BP_PORT(bp);
a2fbb9ea
ET
7903 int count, i;
7904 u32 val = 0;
7905
7906 /* adjust timeout for emulation/FPGA */
7907 count = NVRAM_TIMEOUT_COUNT;
7908 if (CHIP_REV_IS_SLOW(bp))
7909 count *= 100;
7910
7911 /* request access to nvram interface */
7912 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7913 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7914
7915 for (i = 0; i < count*10; i++) {
7916 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7917 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7918 break;
7919
7920 udelay(5);
7921 }
7922
7923 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7924 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7925 return -EBUSY;
7926 }
7927
7928 return 0;
7929}
7930
7931static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7932{
34f80b04 7933 int port = BP_PORT(bp);
a2fbb9ea
ET
7934 int count, i;
7935 u32 val = 0;
7936
7937 /* adjust timeout for emulation/FPGA */
7938 count = NVRAM_TIMEOUT_COUNT;
7939 if (CHIP_REV_IS_SLOW(bp))
7940 count *= 100;
7941
7942 /* relinquish nvram interface */
7943 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7944 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7945
7946 for (i = 0; i < count*10; i++) {
7947 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7948 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7949 break;
7950
7951 udelay(5);
7952 }
7953
7954 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7955 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7956 return -EBUSY;
7957 }
7958
7959 return 0;
7960}
7961
7962static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7963{
7964 u32 val;
7965
7966 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7967
7968 /* enable both bits, even on read */
7969 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7970 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7971 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7972}
7973
7974static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7975{
7976 u32 val;
7977
7978 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7979
7980 /* disable both bits, even after read */
7981 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7982 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7983 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7984}
7985
7986static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7987 u32 cmd_flags)
7988{
f1410647 7989 int count, i, rc;
a2fbb9ea
ET
7990 u32 val;
7991
7992 /* build the command word */
7993 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7994
7995 /* need to clear DONE bit separately */
7996 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7997
7998 /* address of the NVRAM to read from */
7999 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8000 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8001
8002 /* issue a read command */
8003 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8004
8005 /* adjust timeout for emulation/FPGA */
8006 count = NVRAM_TIMEOUT_COUNT;
8007 if (CHIP_REV_IS_SLOW(bp))
8008 count *= 100;
8009
8010 /* wait for completion */
8011 *ret_val = 0;
8012 rc = -EBUSY;
8013 for (i = 0; i < count; i++) {
8014 udelay(5);
8015 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8016
8017 if (val & MCPR_NVM_COMMAND_DONE) {
8018 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8019 /* we read nvram data in cpu order
8020 * but ethtool sees it as an array of bytes
8021 * converting to big-endian will do the work */
8022 val = cpu_to_be32(val);
8023 *ret_val = val;
8024 rc = 0;
8025 break;
8026 }
8027 }
8028
8029 return rc;
8030}
8031
8032static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8033 int buf_size)
8034{
8035 int rc;
8036 u32 cmd_flags;
8037 u32 val;
8038
8039 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8040 DP(BNX2X_MSG_NVM,
c14423fe 8041 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8042 offset, buf_size);
8043 return -EINVAL;
8044 }
8045
34f80b04
EG
8046 if (offset + buf_size > bp->common.flash_size) {
8047 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8048 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8049 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8050 return -EINVAL;
8051 }
8052
8053 /* request access to nvram interface */
8054 rc = bnx2x_acquire_nvram_lock(bp);
8055 if (rc)
8056 return rc;
8057
8058 /* enable access to nvram interface */
8059 bnx2x_enable_nvram_access(bp);
8060
8061 /* read the first word(s) */
8062 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8063 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8064 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8065 memcpy(ret_buf, &val, 4);
8066
8067 /* advance to the next dword */
8068 offset += sizeof(u32);
8069 ret_buf += sizeof(u32);
8070 buf_size -= sizeof(u32);
8071 cmd_flags = 0;
8072 }
8073
8074 if (rc == 0) {
8075 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8076 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8077 memcpy(ret_buf, &val, 4);
8078 }
8079
8080 /* disable access to nvram interface */
8081 bnx2x_disable_nvram_access(bp);
8082 bnx2x_release_nvram_lock(bp);
8083
8084 return rc;
8085}
8086
8087static int bnx2x_get_eeprom(struct net_device *dev,
8088 struct ethtool_eeprom *eeprom, u8 *eebuf)
8089{
8090 struct bnx2x *bp = netdev_priv(dev);
8091 int rc;
8092
34f80b04 8093 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8094 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8095 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8096 eeprom->len, eeprom->len);
8097
8098 /* parameters already validated in ethtool_get_eeprom */
8099
8100 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8101
8102 return rc;
8103}
8104
8105static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8106 u32 cmd_flags)
8107{
f1410647 8108 int count, i, rc;
a2fbb9ea
ET
8109
8110 /* build the command word */
8111 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8112
8113 /* need to clear DONE bit separately */
8114 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8115
8116 /* write the data */
8117 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8118
8119 /* address of the NVRAM to write to */
8120 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8121 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8122
8123 /* issue the write command */
8124 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8125
8126 /* adjust timeout for emulation/FPGA */
8127 count = NVRAM_TIMEOUT_COUNT;
8128 if (CHIP_REV_IS_SLOW(bp))
8129 count *= 100;
8130
8131 /* wait for completion */
8132 rc = -EBUSY;
8133 for (i = 0; i < count; i++) {
8134 udelay(5);
8135 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8136 if (val & MCPR_NVM_COMMAND_DONE) {
8137 rc = 0;
8138 break;
8139 }
8140 }
8141
8142 return rc;
8143}
8144
f1410647 8145#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8146
8147static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8148 int buf_size)
8149{
8150 int rc;
8151 u32 cmd_flags;
8152 u32 align_offset;
8153 u32 val;
8154
34f80b04
EG
8155 if (offset + buf_size > bp->common.flash_size) {
8156 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8157 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8158 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8159 return -EINVAL;
8160 }
8161
8162 /* request access to nvram interface */
8163 rc = bnx2x_acquire_nvram_lock(bp);
8164 if (rc)
8165 return rc;
8166
8167 /* enable access to nvram interface */
8168 bnx2x_enable_nvram_access(bp);
8169
8170 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8171 align_offset = (offset & ~0x03);
8172 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8173
8174 if (rc == 0) {
8175 val &= ~(0xff << BYTE_OFFSET(offset));
8176 val |= (*data_buf << BYTE_OFFSET(offset));
8177
8178 /* nvram data is returned as an array of bytes
8179 * convert it back to cpu order */
8180 val = be32_to_cpu(val);
8181
a2fbb9ea
ET
8182 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8183 cmd_flags);
8184 }
8185
8186 /* disable access to nvram interface */
8187 bnx2x_disable_nvram_access(bp);
8188 bnx2x_release_nvram_lock(bp);
8189
8190 return rc;
8191}
8192
8193static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8194 int buf_size)
8195{
8196 int rc;
8197 u32 cmd_flags;
8198 u32 val;
8199 u32 written_so_far;
8200
34f80b04 8201 if (buf_size == 1) /* ethtool */
a2fbb9ea 8202 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8203
8204 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8205 DP(BNX2X_MSG_NVM,
c14423fe 8206 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8207 offset, buf_size);
8208 return -EINVAL;
8209 }
8210
34f80b04
EG
8211 if (offset + buf_size > bp->common.flash_size) {
8212 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8213 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8214 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8215 return -EINVAL;
8216 }
8217
8218 /* request access to nvram interface */
8219 rc = bnx2x_acquire_nvram_lock(bp);
8220 if (rc)
8221 return rc;
8222
8223 /* enable access to nvram interface */
8224 bnx2x_enable_nvram_access(bp);
8225
8226 written_so_far = 0;
8227 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8228 while ((written_so_far < buf_size) && (rc == 0)) {
8229 if (written_so_far == (buf_size - sizeof(u32)))
8230 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8231 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8232 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8233 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8234 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8235
8236 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8237
8238 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8239
8240 /* advance to the next dword */
8241 offset += sizeof(u32);
8242 data_buf += sizeof(u32);
8243 written_so_far += sizeof(u32);
8244 cmd_flags = 0;
8245 }
8246
8247 /* disable access to nvram interface */
8248 bnx2x_disable_nvram_access(bp);
8249 bnx2x_release_nvram_lock(bp);
8250
8251 return rc;
8252}
8253
8254static int bnx2x_set_eeprom(struct net_device *dev,
8255 struct ethtool_eeprom *eeprom, u8 *eebuf)
8256{
8257 struct bnx2x *bp = netdev_priv(dev);
8258 int rc;
8259
9f4c9583
EG
8260 if (!netif_running(dev))
8261 return -EAGAIN;
8262
34f80b04 8263 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8264 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8265 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8266 eeprom->len, eeprom->len);
8267
8268 /* parameters already validated in ethtool_set_eeprom */
8269
c18487ee 8270 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8271 if (eeprom->magic == 0x00504859)
8272 if (bp->port.pmf) {
8273
4a37fb66 8274 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8275 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8276 bp->link_params.ext_phy_config,
8277 (bp->state != BNX2X_STATE_CLOSED),
8278 eebuf, eeprom->len);
bb2a0f7a
YG
8279 if ((bp->state == BNX2X_STATE_OPEN) ||
8280 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8281 rc |= bnx2x_link_reset(&bp->link_params,
8282 &bp->link_vars);
8283 rc |= bnx2x_phy_init(&bp->link_params,
8284 &bp->link_vars);
bb2a0f7a 8285 }
4a37fb66 8286 bnx2x_release_phy_lock(bp);
34f80b04
EG
8287
8288 } else /* Only the PMF can access the PHY */
8289 return -EINVAL;
8290 else
c18487ee 8291 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8292
8293 return rc;
8294}
8295
8296static int bnx2x_get_coalesce(struct net_device *dev,
8297 struct ethtool_coalesce *coal)
8298{
8299 struct bnx2x *bp = netdev_priv(dev);
8300
8301 memset(coal, 0, sizeof(struct ethtool_coalesce));
8302
8303 coal->rx_coalesce_usecs = bp->rx_ticks;
8304 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8305
8306 return 0;
8307}
8308
8309static int bnx2x_set_coalesce(struct net_device *dev,
8310 struct ethtool_coalesce *coal)
8311{
8312 struct bnx2x *bp = netdev_priv(dev);
8313
8314 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8315 if (bp->rx_ticks > 3000)
8316 bp->rx_ticks = 3000;
8317
8318 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8319 if (bp->tx_ticks > 0x3000)
8320 bp->tx_ticks = 0x3000;
8321
34f80b04 8322 if (netif_running(dev))
a2fbb9ea
ET
8323 bnx2x_update_coalesce(bp);
8324
8325 return 0;
8326}
8327
8328static void bnx2x_get_ringparam(struct net_device *dev,
8329 struct ethtool_ringparam *ering)
8330{
8331 struct bnx2x *bp = netdev_priv(dev);
8332
8333 ering->rx_max_pending = MAX_RX_AVAIL;
8334 ering->rx_mini_max_pending = 0;
8335 ering->rx_jumbo_max_pending = 0;
8336
8337 ering->rx_pending = bp->rx_ring_size;
8338 ering->rx_mini_pending = 0;
8339 ering->rx_jumbo_pending = 0;
8340
8341 ering->tx_max_pending = MAX_TX_AVAIL;
8342 ering->tx_pending = bp->tx_ring_size;
8343}
8344
8345static int bnx2x_set_ringparam(struct net_device *dev,
8346 struct ethtool_ringparam *ering)
8347{
8348 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8349 int rc = 0;
a2fbb9ea
ET
8350
8351 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8352 (ering->tx_pending > MAX_TX_AVAIL) ||
8353 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8354 return -EINVAL;
8355
8356 bp->rx_ring_size = ering->rx_pending;
8357 bp->tx_ring_size = ering->tx_pending;
8358
34f80b04
EG
8359 if (netif_running(dev)) {
8360 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8361 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8362 }
8363
34f80b04 8364 return rc;
a2fbb9ea
ET
8365}
8366
8367static void bnx2x_get_pauseparam(struct net_device *dev,
8368 struct ethtool_pauseparam *epause)
8369{
8370 struct bnx2x *bp = netdev_priv(dev);
8371
c0700f90 8372 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8373 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8374
c0700f90
DM
8375 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8376 BNX2X_FLOW_CTRL_RX);
8377 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8378 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8379
8380 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8381 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8382 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8383}
8384
8385static int bnx2x_set_pauseparam(struct net_device *dev,
8386 struct ethtool_pauseparam *epause)
8387{
8388 struct bnx2x *bp = netdev_priv(dev);
8389
34f80b04
EG
8390 if (IS_E1HMF(bp))
8391 return 0;
8392
a2fbb9ea
ET
8393 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8394 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8395 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8396
c0700f90 8397 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8398
f1410647 8399 if (epause->rx_pause)
c0700f90 8400 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8401
f1410647 8402 if (epause->tx_pause)
c0700f90 8403 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8404
c0700f90
DM
8405 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8406 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8407
c18487ee 8408 if (epause->autoneg) {
34f80b04 8409 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8410 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8411 return -EINVAL;
8412 }
a2fbb9ea 8413
c18487ee 8414 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8415 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8416 }
a2fbb9ea 8417
c18487ee
YR
8418 DP(NETIF_MSG_LINK,
8419 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8420
8421 if (netif_running(dev)) {
bb2a0f7a 8422 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8423 bnx2x_link_set(bp);
8424 }
a2fbb9ea
ET
8425
8426 return 0;
8427}
8428
df0f2343
VZ
8429static int bnx2x_set_flags(struct net_device *dev, u32 data)
8430{
8431 struct bnx2x *bp = netdev_priv(dev);
8432 int changed = 0;
8433 int rc = 0;
8434
8435 /* TPA requires Rx CSUM offloading */
8436 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8437 if (!(dev->features & NETIF_F_LRO)) {
8438 dev->features |= NETIF_F_LRO;
8439 bp->flags |= TPA_ENABLE_FLAG;
8440 changed = 1;
8441 }
8442
8443 } else if (dev->features & NETIF_F_LRO) {
8444 dev->features &= ~NETIF_F_LRO;
8445 bp->flags &= ~TPA_ENABLE_FLAG;
8446 changed = 1;
8447 }
8448
8449 if (changed && netif_running(dev)) {
8450 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8451 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8452 }
8453
8454 return rc;
8455}
8456
a2fbb9ea
ET
8457static u32 bnx2x_get_rx_csum(struct net_device *dev)
8458{
8459 struct bnx2x *bp = netdev_priv(dev);
8460
8461 return bp->rx_csum;
8462}
8463
8464static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8465{
8466 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8467 int rc = 0;
a2fbb9ea
ET
8468
8469 bp->rx_csum = data;
df0f2343
VZ
8470
8471 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8472 TPA'ed packets will be discarded due to wrong TCP CSUM */
8473 if (!data) {
8474 u32 flags = ethtool_op_get_flags(dev);
8475
8476 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8477 }
8478
8479 return rc;
a2fbb9ea
ET
8480}
8481
8482static int bnx2x_set_tso(struct net_device *dev, u32 data)
8483{
755735eb 8484 if (data) {
a2fbb9ea 8485 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8486 dev->features |= NETIF_F_TSO6;
8487 } else {
a2fbb9ea 8488 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8489 dev->features &= ~NETIF_F_TSO6;
8490 }
8491
a2fbb9ea
ET
8492 return 0;
8493}
8494
f3c87cdd 8495static const struct {
a2fbb9ea
ET
8496 char string[ETH_GSTRING_LEN];
8497} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8498 { "register_test (offline)" },
8499 { "memory_test (offline)" },
8500 { "loopback_test (offline)" },
8501 { "nvram_test (online)" },
8502 { "interrupt_test (online)" },
8503 { "link_test (online)" },
8504 { "idle check (online)" },
8505 { "MC errors (online)" }
a2fbb9ea
ET
8506};
8507
8508static int bnx2x_self_test_count(struct net_device *dev)
8509{
8510 return BNX2X_NUM_TESTS;
8511}
8512
f3c87cdd
YG
8513static int bnx2x_test_registers(struct bnx2x *bp)
8514{
8515 int idx, i, rc = -ENODEV;
8516 u32 wr_val = 0;
9dabc424 8517 int port = BP_PORT(bp);
f3c87cdd
YG
8518 static const struct {
8519 u32 offset0;
8520 u32 offset1;
8521 u32 mask;
8522 } reg_tbl[] = {
8523/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8524 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8525 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8526 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8527 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8528 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8529 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8530 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8531 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8532 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8533/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8534 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8535 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8536 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8537 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8538 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8539 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8540 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8541 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8542 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8543/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8544 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8545 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8546 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8547 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8548 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8549 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8550 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8551 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8552 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8553/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8554 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8555 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8556 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8557 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8558 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8559 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8560 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8561
8562 { 0xffffffff, 0, 0x00000000 }
8563 };
8564
8565 if (!netif_running(bp->dev))
8566 return rc;
8567
8568 /* Repeat the test twice:
8569 First by writing 0x00000000, second by writing 0xffffffff */
8570 for (idx = 0; idx < 2; idx++) {
8571
8572 switch (idx) {
8573 case 0:
8574 wr_val = 0;
8575 break;
8576 case 1:
8577 wr_val = 0xffffffff;
8578 break;
8579 }
8580
8581 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8582 u32 offset, mask, save_val, val;
f3c87cdd
YG
8583
8584 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8585 mask = reg_tbl[i].mask;
8586
8587 save_val = REG_RD(bp, offset);
8588
8589 REG_WR(bp, offset, wr_val);
8590 val = REG_RD(bp, offset);
8591
8592 /* Restore the original register's value */
8593 REG_WR(bp, offset, save_val);
8594
8595 /* verify that value is as expected value */
8596 if ((val & mask) != (wr_val & mask))
8597 goto test_reg_exit;
8598 }
8599 }
8600
8601 rc = 0;
8602
8603test_reg_exit:
8604 return rc;
8605}
8606
8607static int bnx2x_test_memory(struct bnx2x *bp)
8608{
8609 int i, j, rc = -ENODEV;
8610 u32 val;
8611 static const struct {
8612 u32 offset;
8613 int size;
8614 } mem_tbl[] = {
8615 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8616 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8617 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8618 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8619 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8620 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8621 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8622
8623 { 0xffffffff, 0 }
8624 };
8625 static const struct {
8626 char *name;
8627 u32 offset;
9dabc424
YG
8628 u32 e1_mask;
8629 u32 e1h_mask;
f3c87cdd 8630 } prty_tbl[] = {
9dabc424
YG
8631 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8632 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8633 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8634 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8635 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8636 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8637
8638 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8639 };
8640
8641 if (!netif_running(bp->dev))
8642 return rc;
8643
8644 /* Go through all the memories */
8645 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8646 for (j = 0; j < mem_tbl[i].size; j++)
8647 REG_RD(bp, mem_tbl[i].offset + j*4);
8648
8649 /* Check the parity status */
8650 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8651 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8652 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8653 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8654 DP(NETIF_MSG_HW,
8655 "%s is 0x%x\n", prty_tbl[i].name, val);
8656 goto test_mem_exit;
8657 }
8658 }
8659
8660 rc = 0;
8661
8662test_mem_exit:
8663 return rc;
8664}
8665
f3c87cdd
YG
8666static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8667{
8668 int cnt = 1000;
8669
8670 if (link_up)
8671 while (bnx2x_link_test(bp) && cnt--)
8672 msleep(10);
8673}
8674
8675static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8676{
8677 unsigned int pkt_size, num_pkts, i;
8678 struct sk_buff *skb;
8679 unsigned char *packet;
8680 struct bnx2x_fastpath *fp = &bp->fp[0];
8681 u16 tx_start_idx, tx_idx;
8682 u16 rx_start_idx, rx_idx;
8683 u16 pkt_prod;
8684 struct sw_tx_bd *tx_buf;
8685 struct eth_tx_bd *tx_bd;
8686 dma_addr_t mapping;
8687 union eth_rx_cqe *cqe;
8688 u8 cqe_fp_flags;
8689 struct sw_rx_bd *rx_buf;
8690 u16 len;
8691 int rc = -ENODEV;
8692
8693 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8694 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8695 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8696 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8697 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8698
8699 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8700 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8701 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8702 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8703 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8704 /* wait until link state is restored */
8705 bnx2x_wait_for_link(bp, link_up);
8706
8707 } else
8708 return -EINVAL;
8709
8710 pkt_size = 1514;
8711 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8712 if (!skb) {
8713 rc = -ENOMEM;
8714 goto test_loopback_exit;
8715 }
8716 packet = skb_put(skb, pkt_size);
8717 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8718 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8719 for (i = ETH_HLEN; i < pkt_size; i++)
8720 packet[i] = (unsigned char) (i & 0xff);
8721
8722 num_pkts = 0;
8723 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8724 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8725
8726 pkt_prod = fp->tx_pkt_prod++;
8727 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8728 tx_buf->first_bd = fp->tx_bd_prod;
8729 tx_buf->skb = skb;
8730
8731 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8732 mapping = pci_map_single(bp->pdev, skb->data,
8733 skb_headlen(skb), PCI_DMA_TODEVICE);
8734 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8735 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8736 tx_bd->nbd = cpu_to_le16(1);
8737 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8738 tx_bd->vlan = cpu_to_le16(pkt_prod);
8739 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8740 ETH_TX_BD_FLAGS_END_BD);
8741 tx_bd->general_data = ((UNICAST_ADDRESS <<
8742 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8743
58f4c4cf
EG
8744 wmb();
8745
f3c87cdd
YG
8746 fp->hw_tx_prods->bds_prod =
8747 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8748 mb(); /* FW restriction: must not reorder writing nbd and packets */
8749 fp->hw_tx_prods->packets_prod =
8750 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8751 DOORBELL(bp, FP_IDX(fp), 0);
8752
8753 mmiowb();
8754
8755 num_pkts++;
8756 fp->tx_bd_prod++;
8757 bp->dev->trans_start = jiffies;
8758
8759 udelay(100);
8760
8761 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8762 if (tx_idx != tx_start_idx + num_pkts)
8763 goto test_loopback_exit;
8764
8765 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8766 if (rx_idx != rx_start_idx + num_pkts)
8767 goto test_loopback_exit;
8768
8769 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8770 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8771 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8772 goto test_loopback_rx_exit;
8773
8774 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8775 if (len != pkt_size)
8776 goto test_loopback_rx_exit;
8777
8778 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8779 skb = rx_buf->skb;
8780 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8781 for (i = ETH_HLEN; i < pkt_size; i++)
8782 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8783 goto test_loopback_rx_exit;
8784
8785 rc = 0;
8786
8787test_loopback_rx_exit:
f3c87cdd
YG
8788
8789 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8790 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8791 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8792 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8793
8794 /* Update producers */
8795 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8796 fp->rx_sge_prod);
f3c87cdd
YG
8797
8798test_loopback_exit:
8799 bp->link_params.loopback_mode = LOOPBACK_NONE;
8800
8801 return rc;
8802}
8803
8804static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8805{
8806 int rc = 0;
8807
8808 if (!netif_running(bp->dev))
8809 return BNX2X_LOOPBACK_FAILED;
8810
f8ef6e44 8811 bnx2x_netif_stop(bp, 1);
f3c87cdd
YG
8812
8813 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8814 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8815 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8816 }
8817
8818 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8819 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8820 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8821 }
8822
8823 bnx2x_netif_start(bp);
8824
8825 return rc;
8826}
8827
8828#define CRC32_RESIDUAL 0xdebb20e3
8829
8830static int bnx2x_test_nvram(struct bnx2x *bp)
8831{
8832 static const struct {
8833 int offset;
8834 int size;
8835 } nvram_tbl[] = {
8836 { 0, 0x14 }, /* bootstrap */
8837 { 0x14, 0xec }, /* dir */
8838 { 0x100, 0x350 }, /* manuf_info */
8839 { 0x450, 0xf0 }, /* feature_info */
8840 { 0x640, 0x64 }, /* upgrade_key_info */
8841 { 0x6a4, 0x64 },
8842 { 0x708, 0x70 }, /* manuf_key_info */
8843 { 0x778, 0x70 },
8844 { 0, 0 }
8845 };
8846 u32 buf[0x350 / 4];
8847 u8 *data = (u8 *)buf;
8848 int i, rc;
8849 u32 magic, csum;
8850
8851 rc = bnx2x_nvram_read(bp, 0, data, 4);
8852 if (rc) {
8853 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8854 goto test_nvram_exit;
8855 }
8856
8857 magic = be32_to_cpu(buf[0]);
8858 if (magic != 0x669955aa) {
8859 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8860 rc = -ENODEV;
8861 goto test_nvram_exit;
8862 }
8863
8864 for (i = 0; nvram_tbl[i].size; i++) {
8865
8866 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8867 nvram_tbl[i].size);
8868 if (rc) {
8869 DP(NETIF_MSG_PROBE,
8870 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8871 goto test_nvram_exit;
8872 }
8873
8874 csum = ether_crc_le(nvram_tbl[i].size, data);
8875 if (csum != CRC32_RESIDUAL) {
8876 DP(NETIF_MSG_PROBE,
8877 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8878 rc = -ENODEV;
8879 goto test_nvram_exit;
8880 }
8881 }
8882
8883test_nvram_exit:
8884 return rc;
8885}
8886
8887static int bnx2x_test_intr(struct bnx2x *bp)
8888{
8889 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8890 int i, rc;
8891
8892 if (!netif_running(bp->dev))
8893 return -ENODEV;
8894
8895 config->hdr.length_6b = 0;
8896 config->hdr.offset = 0;
8897 config->hdr.client_id = BP_CL_ID(bp);
8898 config->hdr.reserved1 = 0;
8899
8900 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8901 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8902 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8903 if (rc == 0) {
8904 bp->set_mac_pending++;
8905 for (i = 0; i < 10; i++) {
8906 if (!bp->set_mac_pending)
8907 break;
8908 msleep_interruptible(10);
8909 }
8910 if (i == 10)
8911 rc = -ENODEV;
8912 }
8913
8914 return rc;
8915}
8916
a2fbb9ea
ET
8917static void bnx2x_self_test(struct net_device *dev,
8918 struct ethtool_test *etest, u64 *buf)
8919{
8920 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8921
8922 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8923
f3c87cdd 8924 if (!netif_running(dev))
a2fbb9ea 8925 return;
a2fbb9ea 8926
33471629 8927 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8928 if (IS_E1HMF(bp))
8929 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8930
8931 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8932 u8 link_up;
8933
8934 link_up = bp->link_vars.link_up;
8935 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8936 bnx2x_nic_load(bp, LOAD_DIAG);
8937 /* wait until link state is restored */
8938 bnx2x_wait_for_link(bp, link_up);
8939
8940 if (bnx2x_test_registers(bp) != 0) {
8941 buf[0] = 1;
8942 etest->flags |= ETH_TEST_FL_FAILED;
8943 }
8944 if (bnx2x_test_memory(bp) != 0) {
8945 buf[1] = 1;
8946 etest->flags |= ETH_TEST_FL_FAILED;
8947 }
8948 buf[2] = bnx2x_test_loopback(bp, link_up);
8949 if (buf[2] != 0)
8950 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8951
f3c87cdd
YG
8952 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8953 bnx2x_nic_load(bp, LOAD_NORMAL);
8954 /* wait until link state is restored */
8955 bnx2x_wait_for_link(bp, link_up);
8956 }
8957 if (bnx2x_test_nvram(bp) != 0) {
8958 buf[3] = 1;
a2fbb9ea
ET
8959 etest->flags |= ETH_TEST_FL_FAILED;
8960 }
f3c87cdd
YG
8961 if (bnx2x_test_intr(bp) != 0) {
8962 buf[4] = 1;
8963 etest->flags |= ETH_TEST_FL_FAILED;
8964 }
8965 if (bp->port.pmf)
8966 if (bnx2x_link_test(bp) != 0) {
8967 buf[5] = 1;
8968 etest->flags |= ETH_TEST_FL_FAILED;
8969 }
8970 buf[7] = bnx2x_mc_assert(bp);
8971 if (buf[7] != 0)
8972 etest->flags |= ETH_TEST_FL_FAILED;
8973
8974#ifdef BNX2X_EXTRA_DEBUG
8975 bnx2x_panic_dump(bp);
8976#endif
a2fbb9ea
ET
8977}
8978
bb2a0f7a
YG
8979static const struct {
8980 long offset;
8981 int size;
8982 u32 flags;
66e855f3
YG
8983#define STATS_FLAGS_PORT 1
8984#define STATS_FLAGS_FUNC 2
8985 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8986} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8987/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8988 8, STATS_FLAGS_FUNC, "rx_bytes" },
8989 { STATS_OFFSET32(error_bytes_received_hi),
8990 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8991 { STATS_OFFSET32(total_bytes_transmitted_hi),
8992 8, STATS_FLAGS_FUNC, "tx_bytes" },
8993 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8994 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8995 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8996 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8997 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8998 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8999 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 9000 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9001 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9002 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9003 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9004 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9005/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9006 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9007 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9008 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9009 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9010 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9011 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9012 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9013 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9014 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9015 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9016 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9017 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9018 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9019 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9020 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9021 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9022 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9023 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9024 8, STATS_FLAGS_PORT, "rx_fragments" },
9025/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9026 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9027 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9028 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9029 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9030 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9031 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9032 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9033 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9034 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9035 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9036 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9037 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9038 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9039 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9040 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9041 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9042 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9043 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9044 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9045/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9046 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9047 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9048 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9049 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9050 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9051 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9052 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9053 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9054 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9055 { STATS_OFFSET32(mac_filter_discard),
9056 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9057 { STATS_OFFSET32(no_buff_discard),
9058 4, STATS_FLAGS_FUNC, "rx_discards" },
9059 { STATS_OFFSET32(xxoverflow_discard),
9060 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9061 { STATS_OFFSET32(brb_drop_hi),
9062 8, STATS_FLAGS_PORT, "brb_discard" },
9063 { STATS_OFFSET32(brb_truncate_hi),
9064 8, STATS_FLAGS_PORT, "brb_truncate" },
9065/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9066 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9067 { STATS_OFFSET32(rx_skb_alloc_failed),
9068 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9069/* 42 */{ STATS_OFFSET32(hw_csum_err),
9070 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9071};
9072
66e855f3
YG
9073#define IS_NOT_E1HMF_STAT(bp, i) \
9074 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9075
a2fbb9ea
ET
9076static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9077{
bb2a0f7a
YG
9078 struct bnx2x *bp = netdev_priv(dev);
9079 int i, j;
9080
a2fbb9ea
ET
9081 switch (stringset) {
9082 case ETH_SS_STATS:
bb2a0f7a 9083 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9084 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9085 continue;
9086 strcpy(buf + j*ETH_GSTRING_LEN,
9087 bnx2x_stats_arr[i].string);
9088 j++;
9089 }
a2fbb9ea
ET
9090 break;
9091
9092 case ETH_SS_TEST:
9093 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9094 break;
9095 }
9096}
9097
9098static int bnx2x_get_stats_count(struct net_device *dev)
9099{
bb2a0f7a
YG
9100 struct bnx2x *bp = netdev_priv(dev);
9101 int i, num_stats = 0;
9102
9103 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9104 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9105 continue;
9106 num_stats++;
9107 }
9108 return num_stats;
a2fbb9ea
ET
9109}
9110
9111static void bnx2x_get_ethtool_stats(struct net_device *dev,
9112 struct ethtool_stats *stats, u64 *buf)
9113{
9114 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9115 u32 *hw_stats = (u32 *)&bp->eth_stats;
9116 int i, j;
a2fbb9ea 9117
bb2a0f7a 9118 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9119 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9120 continue;
bb2a0f7a
YG
9121
9122 if (bnx2x_stats_arr[i].size == 0) {
9123 /* skip this counter */
9124 buf[j] = 0;
9125 j++;
a2fbb9ea
ET
9126 continue;
9127 }
bb2a0f7a 9128 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9129 /* 4-byte counter */
bb2a0f7a
YG
9130 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9131 j++;
a2fbb9ea
ET
9132 continue;
9133 }
9134 /* 8-byte counter */
bb2a0f7a
YG
9135 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9136 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9137 j++;
a2fbb9ea
ET
9138 }
9139}
9140
9141static int bnx2x_phys_id(struct net_device *dev, u32 data)
9142{
9143 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9144 int port = BP_PORT(bp);
a2fbb9ea
ET
9145 int i;
9146
34f80b04
EG
9147 if (!netif_running(dev))
9148 return 0;
9149
9150 if (!bp->port.pmf)
9151 return 0;
9152
a2fbb9ea
ET
9153 if (data == 0)
9154 data = 2;
9155
9156 for (i = 0; i < (data * 2); i++) {
c18487ee 9157 if ((i % 2) == 0)
34f80b04 9158 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9159 bp->link_params.hw_led_mode,
9160 bp->link_params.chip_id);
9161 else
34f80b04 9162 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9163 bp->link_params.hw_led_mode,
9164 bp->link_params.chip_id);
9165
a2fbb9ea
ET
9166 msleep_interruptible(500);
9167 if (signal_pending(current))
9168 break;
9169 }
9170
c18487ee 9171 if (bp->link_vars.link_up)
34f80b04 9172 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9173 bp->link_vars.line_speed,
9174 bp->link_params.hw_led_mode,
9175 bp->link_params.chip_id);
a2fbb9ea
ET
9176
9177 return 0;
9178}
9179
9180static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9181 .get_settings = bnx2x_get_settings,
9182 .set_settings = bnx2x_set_settings,
9183 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9184 .get_wol = bnx2x_get_wol,
9185 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9186 .get_msglevel = bnx2x_get_msglevel,
9187 .set_msglevel = bnx2x_set_msglevel,
9188 .nway_reset = bnx2x_nway_reset,
9189 .get_link = ethtool_op_get_link,
9190 .get_eeprom_len = bnx2x_get_eeprom_len,
9191 .get_eeprom = bnx2x_get_eeprom,
9192 .set_eeprom = bnx2x_set_eeprom,
9193 .get_coalesce = bnx2x_get_coalesce,
9194 .set_coalesce = bnx2x_set_coalesce,
9195 .get_ringparam = bnx2x_get_ringparam,
9196 .set_ringparam = bnx2x_set_ringparam,
9197 .get_pauseparam = bnx2x_get_pauseparam,
9198 .set_pauseparam = bnx2x_set_pauseparam,
9199 .get_rx_csum = bnx2x_get_rx_csum,
9200 .set_rx_csum = bnx2x_set_rx_csum,
9201 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9202 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9203 .set_flags = bnx2x_set_flags,
9204 .get_flags = ethtool_op_get_flags,
9205 .get_sg = ethtool_op_get_sg,
9206 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9207 .get_tso = ethtool_op_get_tso,
9208 .set_tso = bnx2x_set_tso,
9209 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9210 .self_test = bnx2x_self_test,
9211 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9212 .phys_id = bnx2x_phys_id,
9213 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9214 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9215};
9216
9217/* end of ethtool_ops */
9218
9219/****************************************************************************
9220* General service functions
9221****************************************************************************/
9222
9223static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9224{
9225 u16 pmcsr;
9226
9227 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9228
9229 switch (state) {
9230 case PCI_D0:
34f80b04 9231 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9232 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9233 PCI_PM_CTRL_PME_STATUS));
9234
9235 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9236 /* delay required during transition out of D3hot */
a2fbb9ea 9237 msleep(20);
34f80b04 9238 break;
a2fbb9ea 9239
34f80b04
EG
9240 case PCI_D3hot:
9241 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9242 pmcsr |= 3;
a2fbb9ea 9243
34f80b04
EG
9244 if (bp->wol)
9245 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9246
34f80b04
EG
9247 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9248 pmcsr);
a2fbb9ea 9249
34f80b04
EG
9250 /* No more memory access after this point until
9251 * device is brought back to D0.
9252 */
9253 break;
9254
9255 default:
9256 return -EINVAL;
9257 }
9258 return 0;
a2fbb9ea
ET
9259}
9260
34f80b04
EG
9261/*
9262 * net_device service functions
9263 */
9264
a2fbb9ea
ET
9265static int bnx2x_poll(struct napi_struct *napi, int budget)
9266{
9267 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9268 napi);
9269 struct bnx2x *bp = fp->bp;
9270 int work_done = 0;
2772f903 9271 u16 rx_cons_sb;
a2fbb9ea
ET
9272
9273#ifdef BNX2X_STOP_ON_ERROR
9274 if (unlikely(bp->panic))
34f80b04 9275 goto poll_panic;
a2fbb9ea
ET
9276#endif
9277
9278 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9279 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9280 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9281
9282 bnx2x_update_fpsb_idx(fp);
9283
da5a662a 9284 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9285 bnx2x_tx_int(fp, budget);
9286
2772f903
EG
9287 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9288 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9289 rx_cons_sb++;
da5a662a 9290 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9291 work_done = bnx2x_rx_int(fp, budget);
9292
da5a662a 9293 rmb(); /* BNX2X_HAS_WORK() reads the status block */
2772f903
EG
9294 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9295 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9296 rx_cons_sb++;
a2fbb9ea
ET
9297
9298 /* must not complete if we consumed full budget */
da5a662a 9299 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9300
9301#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9302poll_panic:
a2fbb9ea 9303#endif
908a7a16 9304 netif_rx_complete(napi);
a2fbb9ea 9305
34f80b04 9306 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9307 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9308 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9309 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9310 }
a2fbb9ea
ET
9311 return work_done;
9312}
9313
755735eb
EG
9314
9315/* we split the first BD into headers and data BDs
33471629 9316 * to ease the pain of our fellow microcode engineers
755735eb
EG
9317 * we use one mapping for both BDs
9318 * So far this has only been observed to happen
9319 * in Other Operating Systems(TM)
9320 */
9321static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9322 struct bnx2x_fastpath *fp,
9323 struct eth_tx_bd **tx_bd, u16 hlen,
9324 u16 bd_prod, int nbd)
9325{
9326 struct eth_tx_bd *h_tx_bd = *tx_bd;
9327 struct eth_tx_bd *d_tx_bd;
9328 dma_addr_t mapping;
9329 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9330
9331 /* first fix first BD */
9332 h_tx_bd->nbd = cpu_to_le16(nbd);
9333 h_tx_bd->nbytes = cpu_to_le16(hlen);
9334
9335 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9336 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9337 h_tx_bd->addr_lo, h_tx_bd->nbd);
9338
9339 /* now get a new data BD
9340 * (after the pbd) and fill it */
9341 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9342 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9343
9344 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9345 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9346
9347 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9348 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9349 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9350 d_tx_bd->vlan = 0;
9351 /* this marks the BD as one that has no individual mapping
9352 * the FW ignores this flag in a BD not marked start
9353 */
9354 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9355 DP(NETIF_MSG_TX_QUEUED,
9356 "TSO split data size is %d (%x:%x)\n",
9357 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9358
9359 /* update tx_bd for marking the last BD flag */
9360 *tx_bd = d_tx_bd;
9361
9362 return bd_prod;
9363}
9364
9365static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9366{
9367 if (fix > 0)
9368 csum = (u16) ~csum_fold(csum_sub(csum,
9369 csum_partial(t_header - fix, fix, 0)));
9370
9371 else if (fix < 0)
9372 csum = (u16) ~csum_fold(csum_add(csum,
9373 csum_partial(t_header, -fix, 0)));
9374
9375 return swab16(csum);
9376}
9377
9378static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9379{
9380 u32 rc;
9381
9382 if (skb->ip_summed != CHECKSUM_PARTIAL)
9383 rc = XMIT_PLAIN;
9384
9385 else {
9386 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9387 rc = XMIT_CSUM_V6;
9388 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9389 rc |= XMIT_CSUM_TCP;
9390
9391 } else {
9392 rc = XMIT_CSUM_V4;
9393 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9394 rc |= XMIT_CSUM_TCP;
9395 }
9396 }
9397
9398 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9399 rc |= XMIT_GSO_V4;
9400
9401 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9402 rc |= XMIT_GSO_V6;
9403
9404 return rc;
9405}
9406
9407/* check if packet requires linearization (packet is too fragmented) */
9408static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9409 u32 xmit_type)
9410{
9411 int to_copy = 0;
9412 int hlen = 0;
9413 int first_bd_sz = 0;
9414
9415 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9416 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9417
9418 if (xmit_type & XMIT_GSO) {
9419 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9420 /* Check if LSO packet needs to be copied:
9421 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9422 int wnd_size = MAX_FETCH_BD - 3;
33471629 9423 /* Number of windows to check */
755735eb
EG
9424 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9425 int wnd_idx = 0;
9426 int frag_idx = 0;
9427 u32 wnd_sum = 0;
9428
9429 /* Headers length */
9430 hlen = (int)(skb_transport_header(skb) - skb->data) +
9431 tcp_hdrlen(skb);
9432
9433 /* Amount of data (w/o headers) on linear part of SKB*/
9434 first_bd_sz = skb_headlen(skb) - hlen;
9435
9436 wnd_sum = first_bd_sz;
9437
9438 /* Calculate the first sum - it's special */
9439 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9440 wnd_sum +=
9441 skb_shinfo(skb)->frags[frag_idx].size;
9442
9443 /* If there was data on linear skb data - check it */
9444 if (first_bd_sz > 0) {
9445 if (unlikely(wnd_sum < lso_mss)) {
9446 to_copy = 1;
9447 goto exit_lbl;
9448 }
9449
9450 wnd_sum -= first_bd_sz;
9451 }
9452
9453 /* Others are easier: run through the frag list and
9454 check all windows */
9455 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9456 wnd_sum +=
9457 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9458
9459 if (unlikely(wnd_sum < lso_mss)) {
9460 to_copy = 1;
9461 break;
9462 }
9463 wnd_sum -=
9464 skb_shinfo(skb)->frags[wnd_idx].size;
9465 }
9466
9467 } else {
9468 /* in non-LSO too fragmented packet should always
9469 be linearized */
9470 to_copy = 1;
9471 }
9472 }
9473
9474exit_lbl:
9475 if (unlikely(to_copy))
9476 DP(NETIF_MSG_TX_QUEUED,
9477 "Linearization IS REQUIRED for %s packet. "
9478 "num_frags %d hlen %d first_bd_sz %d\n",
9479 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9480 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9481
9482 return to_copy;
9483}
9484
9485/* called with netif_tx_lock
a2fbb9ea 9486 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9487 * netif_wake_queue()
a2fbb9ea
ET
9488 */
9489static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9490{
9491 struct bnx2x *bp = netdev_priv(dev);
9492 struct bnx2x_fastpath *fp;
9493 struct sw_tx_bd *tx_buf;
9494 struct eth_tx_bd *tx_bd;
9495 struct eth_tx_parse_bd *pbd = NULL;
9496 u16 pkt_prod, bd_prod;
755735eb 9497 int nbd, fp_index;
a2fbb9ea 9498 dma_addr_t mapping;
755735eb
EG
9499 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9500 int vlan_off = (bp->e1hov ? 4 : 0);
9501 int i;
9502 u8 hlen = 0;
a2fbb9ea
ET
9503
9504#ifdef BNX2X_STOP_ON_ERROR
9505 if (unlikely(bp->panic))
9506 return NETDEV_TX_BUSY;
9507#endif
9508
755735eb 9509 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9510 fp = &bp->fp[fp_index];
755735eb 9511
231fd58a 9512 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9513 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9514 netif_stop_queue(dev);
9515 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9516 return NETDEV_TX_BUSY;
9517 }
9518
755735eb
EG
9519 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9520 " gso type %x xmit_type %x\n",
9521 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9522 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9523
33471629 9524 /* First, check if we need to linearize the skb
755735eb
EG
9525 (due to FW restrictions) */
9526 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9527 /* Statistics of linearization */
9528 bp->lin_cnt++;
9529 if (skb_linearize(skb) != 0) {
9530 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9531 "silently dropping this SKB\n");
9532 dev_kfree_skb_any(skb);
da5a662a 9533 return NETDEV_TX_OK;
755735eb
EG
9534 }
9535 }
9536
a2fbb9ea 9537 /*
755735eb 9538 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9539 then for TSO or xsum we have a parsing info BD,
755735eb 9540 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9541 (don't forget to mark the last one as last,
9542 and to unmap only AFTER you write to the BD ...)
755735eb 9543 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9544 */
9545
9546 pkt_prod = fp->tx_pkt_prod++;
755735eb 9547 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9548
755735eb 9549 /* get a tx_buf and first BD */
a2fbb9ea
ET
9550 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9551 tx_bd = &fp->tx_desc_ring[bd_prod];
9552
9553 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9554 tx_bd->general_data = (UNICAST_ADDRESS <<
9555 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9556 /* header nbd */
9557 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9558
755735eb
EG
9559 /* remember the first BD of the packet */
9560 tx_buf->first_bd = fp->tx_bd_prod;
9561 tx_buf->skb = skb;
a2fbb9ea
ET
9562
9563 DP(NETIF_MSG_TX_QUEUED,
9564 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9565 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9566
755735eb
EG
9567 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9568 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9569 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9570 vlan_off += 4;
9571 } else
9572 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9573
755735eb 9574 if (xmit_type) {
755735eb 9575 /* turn on parsing and get a BD */
a2fbb9ea
ET
9576 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9577 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9578
9579 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9580 }
9581
9582 if (xmit_type & XMIT_CSUM) {
9583 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9584
9585 /* for now NS flag is not used in Linux */
755735eb 9586 pbd->global_data = (hlen |
96fc1784 9587 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9588 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9589
755735eb
EG
9590 pbd->ip_hlen = (skb_transport_header(skb) -
9591 skb_network_header(skb)) / 2;
9592
9593 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9594
755735eb
EG
9595 pbd->total_hlen = cpu_to_le16(hlen);
9596 hlen = hlen*2 - vlan_off;
a2fbb9ea 9597
755735eb
EG
9598 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9599
9600 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9601 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9602 ETH_TX_BD_FLAGS_IP_CSUM;
9603 else
9604 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9605
9606 if (xmit_type & XMIT_CSUM_TCP) {
9607 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9608
9609 } else {
9610 s8 fix = SKB_CS_OFF(skb); /* signed! */
9611
a2fbb9ea 9612 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9613 pbd->cs_offset = fix / 2;
a2fbb9ea 9614
755735eb
EG
9615 DP(NETIF_MSG_TX_QUEUED,
9616 "hlen %d offset %d fix %d csum before fix %x\n",
9617 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9618 SKB_CS(skb));
9619
9620 /* HW bug: fixup the CSUM */
9621 pbd->tcp_pseudo_csum =
9622 bnx2x_csum_fix(skb_transport_header(skb),
9623 SKB_CS(skb), fix);
9624
9625 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9626 pbd->tcp_pseudo_csum);
9627 }
a2fbb9ea
ET
9628 }
9629
9630 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9631 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9632
9633 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9634 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9635 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9636 tx_bd->nbd = cpu_to_le16(nbd);
9637 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9638
9639 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9640 " nbytes %d flags %x vlan %x\n",
9641 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9642 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9643 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9644
755735eb 9645 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9646
9647 DP(NETIF_MSG_TX_QUEUED,
9648 "TSO packet len %d hlen %d total len %d tso size %d\n",
9649 skb->len, hlen, skb_headlen(skb),
9650 skb_shinfo(skb)->gso_size);
9651
9652 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9653
755735eb
EG
9654 if (unlikely(skb_headlen(skb) > hlen))
9655 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9656 bd_prod, ++nbd);
a2fbb9ea
ET
9657
9658 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9659 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9660 pbd->tcp_flags = pbd_tcp_flags(skb);
9661
9662 if (xmit_type & XMIT_GSO_V4) {
9663 pbd->ip_id = swab16(ip_hdr(skb)->id);
9664 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9665 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9666 ip_hdr(skb)->daddr,
9667 0, IPPROTO_TCP, 0));
755735eb
EG
9668
9669 } else
9670 pbd->tcp_pseudo_csum =
9671 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9672 &ipv6_hdr(skb)->daddr,
9673 0, IPPROTO_TCP, 0));
9674
a2fbb9ea
ET
9675 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9676 }
9677
755735eb
EG
9678 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9679 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9680
755735eb
EG
9681 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9682 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9683
755735eb
EG
9684 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9685 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9686
755735eb
EG
9687 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9688 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9689 tx_bd->nbytes = cpu_to_le16(frag->size);
9690 tx_bd->vlan = cpu_to_le16(pkt_prod);
9691 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9692
755735eb
EG
9693 DP(NETIF_MSG_TX_QUEUED,
9694 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9695 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9696 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9697 }
9698
755735eb 9699 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9700 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9701
9702 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9703 tx_bd, tx_bd->bd_flags.as_bitfield);
9704
a2fbb9ea
ET
9705 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9706
755735eb 9707 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9708 * if the packet contains or ends with it
9709 */
9710 if (TX_BD_POFF(bd_prod) < nbd)
9711 nbd++;
9712
9713 if (pbd)
9714 DP(NETIF_MSG_TX_QUEUED,
9715 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9716 " tcp_flags %x xsum %x seq %u hlen %u\n",
9717 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9718 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9719 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9720
755735eb 9721 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9722
58f4c4cf
EG
9723 /*
9724 * Make sure that the BD data is updated before updating the producer
9725 * since FW might read the BD right after the producer is updated.
9726 * This is only applicable for weak-ordered memory model archs such
9727 * as IA-64. The following barrier is also mandatory since FW will
9728 * assumes packets must have BDs.
9729 */
9730 wmb();
9731
96fc1784
ET
9732 fp->hw_tx_prods->bds_prod =
9733 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9734 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9735 fp->hw_tx_prods->packets_prod =
9736 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9737 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9738
9739 mmiowb();
9740
755735eb 9741 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9742 dev->trans_start = jiffies;
9743
9744 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9745 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9746 if we put Tx into XOFF state. */
9747 smp_mb();
a2fbb9ea 9748 netif_stop_queue(dev);
bb2a0f7a 9749 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9750 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9751 netif_wake_queue(dev);
9752 }
9753 fp->tx_pkt++;
9754
9755 return NETDEV_TX_OK;
9756}
9757
bb2a0f7a 9758/* called with rtnl_lock */
a2fbb9ea
ET
9759static int bnx2x_open(struct net_device *dev)
9760{
9761 struct bnx2x *bp = netdev_priv(dev);
9762
9763 bnx2x_set_power_state(bp, PCI_D0);
9764
bb2a0f7a 9765 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9766}
9767
bb2a0f7a 9768/* called with rtnl_lock */
a2fbb9ea
ET
9769static int bnx2x_close(struct net_device *dev)
9770{
a2fbb9ea
ET
9771 struct bnx2x *bp = netdev_priv(dev);
9772
9773 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9774 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9775 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9776 if (!CHIP_REV_IS_SLOW(bp))
9777 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9778
9779 return 0;
9780}
9781
34f80b04
EG
9782/* called with netif_tx_lock from set_multicast */
9783static void bnx2x_set_rx_mode(struct net_device *dev)
9784{
9785 struct bnx2x *bp = netdev_priv(dev);
9786 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9787 int port = BP_PORT(bp);
9788
9789 if (bp->state != BNX2X_STATE_OPEN) {
9790 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9791 return;
9792 }
9793
9794 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9795
9796 if (dev->flags & IFF_PROMISC)
9797 rx_mode = BNX2X_RX_MODE_PROMISC;
9798
9799 else if ((dev->flags & IFF_ALLMULTI) ||
9800 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9801 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9802
9803 else { /* some multicasts */
9804 if (CHIP_IS_E1(bp)) {
9805 int i, old, offset;
9806 struct dev_mc_list *mclist;
9807 struct mac_configuration_cmd *config =
9808 bnx2x_sp(bp, mcast_config);
9809
9810 for (i = 0, mclist = dev->mc_list;
9811 mclist && (i < dev->mc_count);
9812 i++, mclist = mclist->next) {
9813
9814 config->config_table[i].
9815 cam_entry.msb_mac_addr =
9816 swab16(*(u16 *)&mclist->dmi_addr[0]);
9817 config->config_table[i].
9818 cam_entry.middle_mac_addr =
9819 swab16(*(u16 *)&mclist->dmi_addr[2]);
9820 config->config_table[i].
9821 cam_entry.lsb_mac_addr =
9822 swab16(*(u16 *)&mclist->dmi_addr[4]);
9823 config->config_table[i].cam_entry.flags =
9824 cpu_to_le16(port);
9825 config->config_table[i].
9826 target_table_entry.flags = 0;
9827 config->config_table[i].
9828 target_table_entry.client_id = 0;
9829 config->config_table[i].
9830 target_table_entry.vlan_id = 0;
9831
9832 DP(NETIF_MSG_IFUP,
9833 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9834 config->config_table[i].
9835 cam_entry.msb_mac_addr,
9836 config->config_table[i].
9837 cam_entry.middle_mac_addr,
9838 config->config_table[i].
9839 cam_entry.lsb_mac_addr);
9840 }
9841 old = config->hdr.length_6b;
9842 if (old > i) {
9843 for (; i < old; i++) {
9844 if (CAM_IS_INVALID(config->
9845 config_table[i])) {
9846 i--; /* already invalidated */
9847 break;
9848 }
9849 /* invalidate */
9850 CAM_INVALIDATE(config->
9851 config_table[i]);
9852 }
9853 }
9854
9855 if (CHIP_REV_IS_SLOW(bp))
9856 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9857 else
9858 offset = BNX2X_MAX_MULTICAST*(1 + port);
9859
9860 config->hdr.length_6b = i;
9861 config->hdr.offset = offset;
9862 config->hdr.client_id = BP_CL_ID(bp);
9863 config->hdr.reserved1 = 0;
9864
9865 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9866 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9867 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9868 0);
9869 } else { /* E1H */
9870 /* Accept one or more multicasts */
9871 struct dev_mc_list *mclist;
9872 u32 mc_filter[MC_HASH_SIZE];
9873 u32 crc, bit, regidx;
9874 int i;
9875
9876 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9877
9878 for (i = 0, mclist = dev->mc_list;
9879 mclist && (i < dev->mc_count);
9880 i++, mclist = mclist->next) {
9881
7c510e4b
JB
9882 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9883 mclist->dmi_addr);
34f80b04
EG
9884
9885 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9886 bit = (crc >> 24) & 0xff;
9887 regidx = bit >> 5;
9888 bit &= 0x1f;
9889 mc_filter[regidx] |= (1 << bit);
9890 }
9891
9892 for (i = 0; i < MC_HASH_SIZE; i++)
9893 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9894 mc_filter[i]);
9895 }
9896 }
9897
9898 bp->rx_mode = rx_mode;
9899 bnx2x_set_storm_rx_mode(bp);
9900}
9901
9902/* called with rtnl_lock */
a2fbb9ea
ET
9903static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9904{
9905 struct sockaddr *addr = p;
9906 struct bnx2x *bp = netdev_priv(dev);
9907
34f80b04 9908 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9909 return -EINVAL;
9910
9911 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9912 if (netif_running(dev)) {
9913 if (CHIP_IS_E1(bp))
3101c2bc 9914 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9915 else
3101c2bc 9916 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9917 }
a2fbb9ea
ET
9918
9919 return 0;
9920}
9921
c18487ee 9922/* called with rtnl_lock */
a2fbb9ea
ET
9923static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9924{
9925 struct mii_ioctl_data *data = if_mii(ifr);
9926 struct bnx2x *bp = netdev_priv(dev);
3196a88a 9927 int port = BP_PORT(bp);
a2fbb9ea
ET
9928 int err;
9929
9930 switch (cmd) {
9931 case SIOCGMIIPHY:
34f80b04 9932 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9933
c14423fe 9934 /* fallthrough */
c18487ee 9935
a2fbb9ea 9936 case SIOCGMIIREG: {
c18487ee 9937 u16 mii_regval;
a2fbb9ea 9938
c18487ee
YR
9939 if (!netif_running(dev))
9940 return -EAGAIN;
a2fbb9ea 9941
34f80b04 9942 mutex_lock(&bp->port.phy_mutex);
3196a88a 9943 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9944 DEFAULT_PHY_DEV_ADDR,
9945 (data->reg_num & 0x1f), &mii_regval);
9946 data->val_out = mii_regval;
34f80b04 9947 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9948 return err;
9949 }
9950
9951 case SIOCSMIIREG:
9952 if (!capable(CAP_NET_ADMIN))
9953 return -EPERM;
9954
c18487ee
YR
9955 if (!netif_running(dev))
9956 return -EAGAIN;
9957
34f80b04 9958 mutex_lock(&bp->port.phy_mutex);
3196a88a 9959 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9960 DEFAULT_PHY_DEV_ADDR,
9961 (data->reg_num & 0x1f), data->val_in);
34f80b04 9962 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9963 return err;
9964
9965 default:
9966 /* do nothing */
9967 break;
9968 }
9969
9970 return -EOPNOTSUPP;
9971}
9972
34f80b04 9973/* called with rtnl_lock */
a2fbb9ea
ET
9974static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9975{
9976 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9977 int rc = 0;
a2fbb9ea
ET
9978
9979 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9980 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9981 return -EINVAL;
9982
9983 /* This does not race with packet allocation
c14423fe 9984 * because the actual alloc size is
a2fbb9ea
ET
9985 * only updated as part of load
9986 */
9987 dev->mtu = new_mtu;
9988
9989 if (netif_running(dev)) {
34f80b04
EG
9990 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9991 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9992 }
34f80b04
EG
9993
9994 return rc;
a2fbb9ea
ET
9995}
9996
9997static void bnx2x_tx_timeout(struct net_device *dev)
9998{
9999 struct bnx2x *bp = netdev_priv(dev);
10000
10001#ifdef BNX2X_STOP_ON_ERROR
10002 if (!bp->panic)
10003 bnx2x_panic();
10004#endif
10005 /* This allows the netif to be shutdown gracefully before resetting */
10006 schedule_work(&bp->reset_task);
10007}
10008
10009#ifdef BCM_VLAN
34f80b04 10010/* called with rtnl_lock */
a2fbb9ea
ET
10011static void bnx2x_vlan_rx_register(struct net_device *dev,
10012 struct vlan_group *vlgrp)
10013{
10014 struct bnx2x *bp = netdev_priv(dev);
10015
10016 bp->vlgrp = vlgrp;
10017 if (netif_running(dev))
49d66772 10018 bnx2x_set_client_config(bp);
a2fbb9ea 10019}
34f80b04 10020
a2fbb9ea
ET
10021#endif
10022
10023#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10024static void poll_bnx2x(struct net_device *dev)
10025{
10026 struct bnx2x *bp = netdev_priv(dev);
10027
10028 disable_irq(bp->pdev->irq);
10029 bnx2x_interrupt(bp->pdev->irq, dev);
10030 enable_irq(bp->pdev->irq);
10031}
10032#endif
10033
c64213cd
SH
10034static const struct net_device_ops bnx2x_netdev_ops = {
10035 .ndo_open = bnx2x_open,
10036 .ndo_stop = bnx2x_close,
10037 .ndo_start_xmit = bnx2x_start_xmit,
10038 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10039 .ndo_set_mac_address = bnx2x_change_mac_addr,
10040 .ndo_validate_addr = eth_validate_addr,
10041 .ndo_do_ioctl = bnx2x_ioctl,
10042 .ndo_change_mtu = bnx2x_change_mtu,
10043 .ndo_tx_timeout = bnx2x_tx_timeout,
10044#ifdef BCM_VLAN
10045 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10046#endif
10047#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10048 .ndo_poll_controller = poll_bnx2x,
10049#endif
10050};
10051
10052
34f80b04
EG
10053static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10054 struct net_device *dev)
a2fbb9ea
ET
10055{
10056 struct bnx2x *bp;
10057 int rc;
10058
10059 SET_NETDEV_DEV(dev, &pdev->dev);
10060 bp = netdev_priv(dev);
10061
34f80b04
EG
10062 bp->dev = dev;
10063 bp->pdev = pdev;
a2fbb9ea 10064 bp->flags = 0;
34f80b04 10065 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10066
10067 rc = pci_enable_device(pdev);
10068 if (rc) {
10069 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10070 goto err_out;
10071 }
10072
10073 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10074 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10075 " aborting\n");
10076 rc = -ENODEV;
10077 goto err_out_disable;
10078 }
10079
10080 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10081 printk(KERN_ERR PFX "Cannot find second PCI device"
10082 " base address, aborting\n");
10083 rc = -ENODEV;
10084 goto err_out_disable;
10085 }
10086
34f80b04
EG
10087 if (atomic_read(&pdev->enable_cnt) == 1) {
10088 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10089 if (rc) {
10090 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10091 " aborting\n");
10092 goto err_out_disable;
10093 }
a2fbb9ea 10094
34f80b04
EG
10095 pci_set_master(pdev);
10096 pci_save_state(pdev);
10097 }
a2fbb9ea
ET
10098
10099 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10100 if (bp->pm_cap == 0) {
10101 printk(KERN_ERR PFX "Cannot find power management"
10102 " capability, aborting\n");
10103 rc = -EIO;
10104 goto err_out_release;
10105 }
10106
10107 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10108 if (bp->pcie_cap == 0) {
10109 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10110 " aborting\n");
10111 rc = -EIO;
10112 goto err_out_release;
10113 }
10114
10115 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10116 bp->flags |= USING_DAC_FLAG;
10117 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10118 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10119 " failed, aborting\n");
10120 rc = -EIO;
10121 goto err_out_release;
10122 }
10123
10124 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10125 printk(KERN_ERR PFX "System does not support DMA,"
10126 " aborting\n");
10127 rc = -EIO;
10128 goto err_out_release;
10129 }
10130
34f80b04
EG
10131 dev->mem_start = pci_resource_start(pdev, 0);
10132 dev->base_addr = dev->mem_start;
10133 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10134
10135 dev->irq = pdev->irq;
10136
275f165f 10137 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10138 if (!bp->regview) {
10139 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10140 rc = -ENOMEM;
10141 goto err_out_release;
10142 }
10143
34f80b04
EG
10144 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10145 min_t(u64, BNX2X_DB_SIZE,
10146 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10147 if (!bp->doorbells) {
10148 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10149 rc = -ENOMEM;
10150 goto err_out_unmap;
10151 }
10152
10153 bnx2x_set_power_state(bp, PCI_D0);
10154
34f80b04
EG
10155 /* clean indirect addresses */
10156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10157 PCICFG_VENDOR_ID_OFFSET);
10158 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10159 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10160 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10161 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10162
34f80b04 10163 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10164
c64213cd 10165 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10166 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10167 dev->features |= NETIF_F_SG;
10168 dev->features |= NETIF_F_HW_CSUM;
10169 if (bp->flags & USING_DAC_FLAG)
10170 dev->features |= NETIF_F_HIGHDMA;
10171#ifdef BCM_VLAN
10172 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10173#endif
10174 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10175 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10176
10177 return 0;
10178
10179err_out_unmap:
10180 if (bp->regview) {
10181 iounmap(bp->regview);
10182 bp->regview = NULL;
10183 }
a2fbb9ea
ET
10184 if (bp->doorbells) {
10185 iounmap(bp->doorbells);
10186 bp->doorbells = NULL;
10187 }
10188
10189err_out_release:
34f80b04
EG
10190 if (atomic_read(&pdev->enable_cnt) == 1)
10191 pci_release_regions(pdev);
a2fbb9ea
ET
10192
10193err_out_disable:
10194 pci_disable_device(pdev);
10195 pci_set_drvdata(pdev, NULL);
10196
10197err_out:
10198 return rc;
10199}
10200
25047950
ET
10201static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10202{
10203 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10204
10205 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10206 return val;
10207}
10208
10209/* return value of 1=2.5GHz 2=5GHz */
10210static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10211{
10212 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10213
10214 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10215 return val;
10216}
10217
a2fbb9ea
ET
10218static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10219 const struct pci_device_id *ent)
10220{
10221 static int version_printed;
10222 struct net_device *dev = NULL;
10223 struct bnx2x *bp;
25047950 10224 int rc;
a2fbb9ea
ET
10225
10226 if (version_printed++ == 0)
10227 printk(KERN_INFO "%s", version);
10228
10229 /* dev zeroed in init_etherdev */
10230 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10231 if (!dev) {
10232 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10233 return -ENOMEM;
34f80b04 10234 }
a2fbb9ea 10235
a2fbb9ea
ET
10236 bp = netdev_priv(dev);
10237 bp->msglevel = debug;
10238
34f80b04 10239 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10240 if (rc < 0) {
10241 free_netdev(dev);
10242 return rc;
10243 }
10244
a2fbb9ea
ET
10245 rc = register_netdev(dev);
10246 if (rc) {
c14423fe 10247 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10248 goto init_one_exit;
a2fbb9ea
ET
10249 }
10250
10251 pci_set_drvdata(pdev, dev);
10252
34f80b04
EG
10253 rc = bnx2x_init_bp(bp);
10254 if (rc) {
10255 unregister_netdev(dev);
10256 goto init_one_exit;
10257 }
10258
12b56ea8
EG
10259 netif_carrier_off(dev);
10260
34f80b04 10261 bp->common.name = board_info[ent->driver_data].name;
25047950 10262 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10263 " IRQ %d, ", dev->name, bp->common.name,
10264 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10265 bnx2x_get_pcie_width(bp),
10266 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10267 dev->base_addr, bp->pdev->irq);
e174961c 10268 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10269 return 0;
34f80b04
EG
10270
10271init_one_exit:
10272 if (bp->regview)
10273 iounmap(bp->regview);
10274
10275 if (bp->doorbells)
10276 iounmap(bp->doorbells);
10277
10278 free_netdev(dev);
10279
10280 if (atomic_read(&pdev->enable_cnt) == 1)
10281 pci_release_regions(pdev);
10282
10283 pci_disable_device(pdev);
10284 pci_set_drvdata(pdev, NULL);
10285
10286 return rc;
a2fbb9ea
ET
10287}
10288
10289static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10290{
10291 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10292 struct bnx2x *bp;
10293
10294 if (!dev) {
228241eb
ET
10295 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10296 return;
10297 }
228241eb 10298 bp = netdev_priv(dev);
a2fbb9ea 10299
a2fbb9ea
ET
10300 unregister_netdev(dev);
10301
10302 if (bp->regview)
10303 iounmap(bp->regview);
10304
10305 if (bp->doorbells)
10306 iounmap(bp->doorbells);
10307
10308 free_netdev(dev);
34f80b04
EG
10309
10310 if (atomic_read(&pdev->enable_cnt) == 1)
10311 pci_release_regions(pdev);
10312
a2fbb9ea
ET
10313 pci_disable_device(pdev);
10314 pci_set_drvdata(pdev, NULL);
10315}
10316
10317static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10318{
10319 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10320 struct bnx2x *bp;
10321
34f80b04
EG
10322 if (!dev) {
10323 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10324 return -ENODEV;
10325 }
10326 bp = netdev_priv(dev);
a2fbb9ea 10327
34f80b04 10328 rtnl_lock();
a2fbb9ea 10329
34f80b04 10330 pci_save_state(pdev);
228241eb 10331
34f80b04
EG
10332 if (!netif_running(dev)) {
10333 rtnl_unlock();
10334 return 0;
10335 }
a2fbb9ea
ET
10336
10337 netif_device_detach(dev);
a2fbb9ea 10338
da5a662a 10339 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10340
a2fbb9ea 10341 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10342
34f80b04
EG
10343 rtnl_unlock();
10344
a2fbb9ea
ET
10345 return 0;
10346}
10347
10348static int bnx2x_resume(struct pci_dev *pdev)
10349{
10350 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10351 struct bnx2x *bp;
a2fbb9ea
ET
10352 int rc;
10353
228241eb
ET
10354 if (!dev) {
10355 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10356 return -ENODEV;
10357 }
228241eb 10358 bp = netdev_priv(dev);
a2fbb9ea 10359
34f80b04
EG
10360 rtnl_lock();
10361
228241eb 10362 pci_restore_state(pdev);
34f80b04
EG
10363
10364 if (!netif_running(dev)) {
10365 rtnl_unlock();
10366 return 0;
10367 }
10368
a2fbb9ea
ET
10369 bnx2x_set_power_state(bp, PCI_D0);
10370 netif_device_attach(dev);
10371
da5a662a 10372 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10373
34f80b04
EG
10374 rtnl_unlock();
10375
10376 return rc;
a2fbb9ea
ET
10377}
10378
f8ef6e44
YG
10379static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10380{
10381 int i;
10382
10383 bp->state = BNX2X_STATE_ERROR;
10384
10385 bp->rx_mode = BNX2X_RX_MODE_NONE;
10386
10387 bnx2x_netif_stop(bp, 0);
10388
10389 del_timer_sync(&bp->timer);
10390 bp->stats_state = STATS_STATE_DISABLED;
10391 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10392
10393 /* Release IRQs */
10394 bnx2x_free_irq(bp);
10395
10396 if (CHIP_IS_E1(bp)) {
10397 struct mac_configuration_cmd *config =
10398 bnx2x_sp(bp, mcast_config);
10399
10400 for (i = 0; i < config->hdr.length_6b; i++)
10401 CAM_INVALIDATE(config->config_table[i]);
10402 }
10403
10404 /* Free SKBs, SGEs, TPA pool and driver internals */
10405 bnx2x_free_skbs(bp);
10406 for_each_queue(bp, i)
10407 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10408 bnx2x_free_mem(bp);
10409
10410 bp->state = BNX2X_STATE_CLOSED;
10411
10412 netif_carrier_off(bp->dev);
10413
10414 return 0;
10415}
10416
10417static void bnx2x_eeh_recover(struct bnx2x *bp)
10418{
10419 u32 val;
10420
10421 mutex_init(&bp->port.phy_mutex);
10422
10423 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10424 bp->link_params.shmem_base = bp->common.shmem_base;
10425 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10426
10427 if (!bp->common.shmem_base ||
10428 (bp->common.shmem_base < 0xA0000) ||
10429 (bp->common.shmem_base >= 0xC0000)) {
10430 BNX2X_DEV_INFO("MCP not active\n");
10431 bp->flags |= NO_MCP_FLAG;
10432 return;
10433 }
10434
10435 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10436 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10437 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10438 BNX2X_ERR("BAD MCP validity signature\n");
10439
10440 if (!BP_NOMCP(bp)) {
10441 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10442 & DRV_MSG_SEQ_NUMBER_MASK);
10443 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10444 }
10445}
10446
493adb1f
WX
10447/**
10448 * bnx2x_io_error_detected - called when PCI error is detected
10449 * @pdev: Pointer to PCI device
10450 * @state: The current pci connection state
10451 *
10452 * This function is called after a PCI bus error affecting
10453 * this device has been detected.
10454 */
10455static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10456 pci_channel_state_t state)
10457{
10458 struct net_device *dev = pci_get_drvdata(pdev);
10459 struct bnx2x *bp = netdev_priv(dev);
10460
10461 rtnl_lock();
10462
10463 netif_device_detach(dev);
10464
10465 if (netif_running(dev))
f8ef6e44 10466 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10467
10468 pci_disable_device(pdev);
10469
10470 rtnl_unlock();
10471
10472 /* Request a slot reset */
10473 return PCI_ERS_RESULT_NEED_RESET;
10474}
10475
10476/**
10477 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10478 * @pdev: Pointer to PCI device
10479 *
10480 * Restart the card from scratch, as if from a cold-boot.
10481 */
10482static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10483{
10484 struct net_device *dev = pci_get_drvdata(pdev);
10485 struct bnx2x *bp = netdev_priv(dev);
10486
10487 rtnl_lock();
10488
10489 if (pci_enable_device(pdev)) {
10490 dev_err(&pdev->dev,
10491 "Cannot re-enable PCI device after reset\n");
10492 rtnl_unlock();
10493 return PCI_ERS_RESULT_DISCONNECT;
10494 }
10495
10496 pci_set_master(pdev);
10497 pci_restore_state(pdev);
10498
10499 if (netif_running(dev))
10500 bnx2x_set_power_state(bp, PCI_D0);
10501
10502 rtnl_unlock();
10503
10504 return PCI_ERS_RESULT_RECOVERED;
10505}
10506
10507/**
10508 * bnx2x_io_resume - called when traffic can start flowing again
10509 * @pdev: Pointer to PCI device
10510 *
10511 * This callback is called when the error recovery driver tells us that
10512 * its OK to resume normal operation.
10513 */
10514static void bnx2x_io_resume(struct pci_dev *pdev)
10515{
10516 struct net_device *dev = pci_get_drvdata(pdev);
10517 struct bnx2x *bp = netdev_priv(dev);
10518
10519 rtnl_lock();
10520
f8ef6e44
YG
10521 bnx2x_eeh_recover(bp);
10522
493adb1f 10523 if (netif_running(dev))
f8ef6e44 10524 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10525
10526 netif_device_attach(dev);
10527
10528 rtnl_unlock();
10529}
10530
10531static struct pci_error_handlers bnx2x_err_handler = {
10532 .error_detected = bnx2x_io_error_detected,
10533 .slot_reset = bnx2x_io_slot_reset,
10534 .resume = bnx2x_io_resume,
10535};
10536
a2fbb9ea 10537static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10538 .name = DRV_MODULE_NAME,
10539 .id_table = bnx2x_pci_tbl,
10540 .probe = bnx2x_init_one,
10541 .remove = __devexit_p(bnx2x_remove_one),
10542 .suspend = bnx2x_suspend,
10543 .resume = bnx2x_resume,
10544 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10545};
10546
10547static int __init bnx2x_init(void)
10548{
1cf167f2
EG
10549 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10550 if (bnx2x_wq == NULL) {
10551 printk(KERN_ERR PFX "Cannot create workqueue\n");
10552 return -ENOMEM;
10553 }
10554
a2fbb9ea
ET
10555 return pci_register_driver(&bnx2x_pci_driver);
10556}
10557
10558static void __exit bnx2x_cleanup(void)
10559{
10560 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10561
10562 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10563}
10564
10565module_init(bnx2x_init);
10566module_exit(bnx2x_cleanup);
10567