]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Flow control updated before reporting the link
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea
ET
53#include <linux/io.h>
54
55#include "bnx2x_reg.h"
56#include "bnx2x_fw_defs.h"
57#include "bnx2x_hsi.h"
c18487ee 58#include "bnx2x_link.h"
a2fbb9ea
ET
59#include "bnx2x.h"
60#include "bnx2x_init.h"
61
ca8eac55
EG
62#define DRV_MODULE_VERSION "1.45.23"
63#define DRV_MODULE_RELDATE "2008/11/03"
34f80b04 64#define BNX2X_BC_VER 0x040200
a2fbb9ea 65
34f80b04
EG
66/* Time in jiffies before concluding the transmitter is hung */
67#define TX_TIMEOUT (5*HZ)
a2fbb9ea 68
53a10565 69static char version[] __devinitdata =
34f80b04 70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
24e3fcef 73MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 77
19680c48 78static int disable_tpa;
a2fbb9ea
ET
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
83static int use_multi;
84
19680c48 85module_param(disable_tpa, int, 0);
a2fbb9ea
ET
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
19680c48 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
93
94#ifdef BNX2X_MULTI
95module_param(use_multi, int, 0);
96MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97#endif
1cf167f2 98static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
473 u32 data[9];
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
526 fp->fp_u_idx,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
529
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
537 }
538
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 }
547
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
556 }
557
3196a88a
EG
558 start = RX_SGE(fp->rx_sge_prod);
559 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
566 }
567
a2fbb9ea
ET
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
575 }
576 }
577
49d66772
ET
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 580 " spq_prod_idx(%u)\n",
49d66772 581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
34f80b04 584 bnx2x_fw_dump(bp);
a2fbb9ea
ET
585 bnx2x_mc_assert(bp);
586 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
587}
588
615f8fd9 589static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 590{
34f80b04 591 int port = BP_PORT(bp);
a2fbb9ea
ET
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596 if (msix) {
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600 } else {
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 605
615f8fd9
ET
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
608
609 REG_WR(bp, addr, val);
610
a2fbb9ea
ET
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 }
613
615f8fd9 614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
615 val, port, addr, msix);
616
617 REG_WR(bp, addr, val);
34f80b04
EG
618
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
621 if (IS_E1HMF(bp)) {
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623 if (bp->port.pmf)
624 /* enable nig attention */
625 val |= 0x0100;
626 } else
627 val = 0xffff;
628
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631 }
a2fbb9ea
ET
632}
633
615f8fd9 634static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 635{
34f80b04 636 int port = BP_PORT(bp);
a2fbb9ea
ET
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 val, port, addr);
647
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651}
652
f8ef6e44 653static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 654{
a2fbb9ea
ET
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 int i;
657
34f80b04 658 /* disable interrupt handling */
a2fbb9ea 659 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
660 if (disable_hw)
661 /* prevent the HW from sending interrupts */
662 bnx2x_int_disable(bp);
a2fbb9ea
ET
663
664 /* make sure all ISRs are done */
665 if (msix) {
666 for_each_queue(bp, i)
667 synchronize_irq(bp->msix_table[i].vector);
668
669 /* one more for the Slow Path IRQ */
670 synchronize_irq(bp->msix_table[i].vector);
671 } else
672 synchronize_irq(bp->pdev->irq);
673
674 /* make sure sp_task is not running */
1cf167f2
EG
675 cancel_delayed_work(&bp->sp_task);
676 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
5c862848
EG
688 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
689 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
690 struct igu_ack_register igu_ack;
691
692 igu_ack.status_block_index = index;
693 igu_ack.sb_id_and_flags =
34f80b04 694 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
695 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
696 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
697 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698
5c862848
EG
699 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
700 (*(u32 *)&igu_ack), hc_addr);
701 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
702}
703
704static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705{
706 struct host_status_block *fpsb = fp->status_blk;
707 u16 rc = 0;
708
709 barrier(); /* status block is written to by the chip */
710 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
711 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 rc |= 1;
713 }
714 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
715 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
716 rc |= 2;
717 }
718 return rc;
719}
720
a2fbb9ea
ET
721static u16 bnx2x_ack_int(struct bnx2x *bp)
722{
5c862848
EG
723 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
724 COMMAND_REG_SIMD_MASK);
725 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 726
5c862848
EG
727 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
728 result, hc_addr);
a2fbb9ea 729
a2fbb9ea
ET
730 return result;
731}
732
733
734/*
735 * fast path service functions
736 */
737
738/* free skb in the packet ring at pos idx
739 * return idx of last bd freed
740 */
741static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742 u16 idx)
743{
744 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
745 struct eth_tx_bd *tx_bd;
746 struct sk_buff *skb = tx_buf->skb;
34f80b04 747 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
748 int nbd;
749
750 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
751 idx, tx_buf, skb);
752
753 /* unmap first bd */
754 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
755 tx_bd = &fp->tx_desc_ring[bd_idx];
756 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
757 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
758
759 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 760 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
761#ifdef BNX2X_STOP_ON_ERROR
762 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 763 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
764 bnx2x_panic();
765 }
766#endif
767
768 /* Skip a parse bd and the TSO split header bd
769 since they have no mapping */
770 if (nbd)
771 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
772
773 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
774 ETH_TX_BD_FLAGS_TCP_CSUM |
775 ETH_TX_BD_FLAGS_SW_LSO)) {
776 if (--nbd)
777 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 tx_bd = &fp->tx_desc_ring[bd_idx];
779 /* is this a TSO split header bd? */
780 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
781 if (--nbd)
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 }
784 }
785
786 /* now free frags */
787 while (nbd > 0) {
788
789 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
790 tx_bd = &fp->tx_desc_ring[bd_idx];
791 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
792 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
793 if (--nbd)
794 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795 }
796
797 /* release skb */
53e5e96e 798 WARN_ON(!skb);
a2fbb9ea
ET
799 dev_kfree_skb(skb);
800 tx_buf->first_bd = 0;
801 tx_buf->skb = NULL;
802
34f80b04 803 return new_cons;
a2fbb9ea
ET
804}
805
34f80b04 806static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 807{
34f80b04
EG
808 s16 used;
809 u16 prod;
810 u16 cons;
a2fbb9ea 811
34f80b04 812 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
813 prod = fp->tx_bd_prod;
814 cons = fp->tx_bd_cons;
815
34f80b04
EG
816 /* NUM_TX_RINGS = number of "next-page" entries
817 It will be used as a threshold */
818 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 819
34f80b04 820#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
821 WARN_ON(used < 0);
822 WARN_ON(used > fp->bp->tx_ring_size);
823 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 824#endif
a2fbb9ea 825
34f80b04 826 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
827}
828
829static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
830{
831 struct bnx2x *bp = fp->bp;
832 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833 int done = 0;
834
835#ifdef BNX2X_STOP_ON_ERROR
836 if (unlikely(bp->panic))
837 return;
838#endif
839
840 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
841 sw_cons = fp->tx_pkt_cons;
842
843 while (sw_cons != hw_cons) {
844 u16 pkt_cons;
845
846 pkt_cons = TX_BD(sw_cons);
847
848 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
849
34f80b04 850 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
851 hw_cons, sw_cons, pkt_cons);
852
34f80b04 853/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
854 rmb();
855 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 }
857*/
858 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
859 sw_cons++;
860 done++;
861
862 if (done == work)
863 break;
864 }
865
866 fp->tx_pkt_cons = sw_cons;
867 fp->tx_bd_cons = bd_cons;
868
869 /* Need to make the tx_cons update visible to start_xmit()
870 * before checking for netif_queue_stopped(). Without the
871 * memory barrier, there is a small possibility that start_xmit()
872 * will miss it and cause the queue to be stopped forever.
873 */
874 smp_mb();
875
876 /* TBD need a thresh? */
877 if (unlikely(netif_queue_stopped(bp->dev))) {
878
879 netif_tx_lock(bp->dev);
880
881 if (netif_queue_stopped(bp->dev) &&
da5a662a 882 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
883 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
884 netif_wake_queue(bp->dev);
885
886 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
887 }
888}
889
3196a88a 890
a2fbb9ea
ET
891static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
892 union eth_rx_cqe *rr_cqe)
893{
894 struct bnx2x *bp = fp->bp;
895 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
897
34f80b04 898 DP(BNX2X_MSG_SP,
a2fbb9ea 899 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
900 FP_IDX(fp), cid, command, bp->state,
901 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
902
903 bp->spq_left++;
904
34f80b04 905 if (FP_IDX(fp)) {
a2fbb9ea
ET
906 switch (command | fp->state) {
907 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
908 BNX2X_FP_STATE_OPENING):
909 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
910 cid);
911 fp->state = BNX2X_FP_STATE_OPEN;
912 break;
913
914 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
915 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
916 cid);
917 fp->state = BNX2X_FP_STATE_HALTED;
918 break;
919
920 default:
34f80b04
EG
921 BNX2X_ERR("unexpected MC reply (%d) "
922 "fp->state is %x\n", command, fp->state);
923 break;
a2fbb9ea 924 }
34f80b04 925 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
926 return;
927 }
c14423fe 928
a2fbb9ea
ET
929 switch (command | bp->state) {
930 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
931 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
932 bp->state = BNX2X_STATE_OPEN;
933 break;
934
935 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
936 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
937 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
938 fp->state = BNX2X_FP_STATE_HALTED;
939 break;
940
a2fbb9ea 941 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 942 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 943 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
944 break;
945
3196a88a 946
a2fbb9ea 947 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 948 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 949 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 950 bp->set_mac_pending = 0;
a2fbb9ea
ET
951 break;
952
49d66772 953 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 954 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
955 break;
956
a2fbb9ea 957 default:
34f80b04 958 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 959 command, bp->state);
34f80b04 960 break;
a2fbb9ea 961 }
34f80b04 962 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
963}
964
7a9b2557
VZ
965static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
966 struct bnx2x_fastpath *fp, u16 index)
967{
968 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
969 struct page *page = sw_buf->page;
970 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
971
972 /* Skip "next page" elements */
973 if (!page)
974 return;
975
976 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 977 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
978 __free_pages(page, PAGES_PER_SGE_SHIFT);
979
980 sw_buf->page = NULL;
981 sge->addr_hi = 0;
982 sge->addr_lo = 0;
983}
984
985static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
986 struct bnx2x_fastpath *fp, int last)
987{
988 int i;
989
990 for (i = 0; i < last; i++)
991 bnx2x_free_rx_sge(bp, fp, i);
992}
993
994static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
995 struct bnx2x_fastpath *fp, u16 index)
996{
997 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
998 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
999 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1000 dma_addr_t mapping;
1001
1002 if (unlikely(page == NULL))
1003 return -ENOMEM;
1004
4f40f2cb 1005 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1006 PCI_DMA_FROMDEVICE);
8d8bb39b 1007 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1008 __free_pages(page, PAGES_PER_SGE_SHIFT);
1009 return -ENOMEM;
1010 }
1011
1012 sw_buf->page = page;
1013 pci_unmap_addr_set(sw_buf, mapping, mapping);
1014
1015 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1016 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1017
1018 return 0;
1019}
1020
a2fbb9ea
ET
1021static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct sk_buff *skb;
1025 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1026 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1027 dma_addr_t mapping;
1028
1029 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1030 if (unlikely(skb == NULL))
1031 return -ENOMEM;
1032
437cf2f1 1033 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1034 PCI_DMA_FROMDEVICE);
8d8bb39b 1035 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1036 dev_kfree_skb(skb);
1037 return -ENOMEM;
1038 }
1039
1040 rx_buf->skb = skb;
1041 pci_unmap_addr_set(rx_buf, mapping, mapping);
1042
1043 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1044 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1045
1046 return 0;
1047}
1048
1049/* note that we are not allocating a new skb,
1050 * we are just moving one from cons to prod
1051 * we are not creating a new mapping,
1052 * so there is no need to check for dma_mapping_error().
1053 */
1054static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1055 struct sk_buff *skb, u16 cons, u16 prod)
1056{
1057 struct bnx2x *bp = fp->bp;
1058 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1059 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1060 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1061 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1062
1063 pci_dma_sync_single_for_device(bp->pdev,
1064 pci_unmap_addr(cons_rx_buf, mapping),
1065 bp->rx_offset + RX_COPY_THRESH,
1066 PCI_DMA_FROMDEVICE);
1067
1068 prod_rx_buf->skb = cons_rx_buf->skb;
1069 pci_unmap_addr_set(prod_rx_buf, mapping,
1070 pci_unmap_addr(cons_rx_buf, mapping));
1071 *prod_bd = *cons_bd;
1072}
1073
7a9b2557
VZ
1074static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1075 u16 idx)
1076{
1077 u16 last_max = fp->last_max_sge;
1078
1079 if (SUB_S16(idx, last_max) > 0)
1080 fp->last_max_sge = idx;
1081}
1082
1083static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1084{
1085 int i, j;
1086
1087 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1088 int idx = RX_SGE_CNT * i - 1;
1089
1090 for (j = 0; j < 2; j++) {
1091 SGE_MASK_CLEAR_BIT(fp, idx);
1092 idx--;
1093 }
1094 }
1095}
1096
1097static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1098 struct eth_fast_path_rx_cqe *fp_cqe)
1099{
1100 struct bnx2x *bp = fp->bp;
4f40f2cb 1101 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1102 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1103 SGE_PAGE_SHIFT;
7a9b2557
VZ
1104 u16 last_max, last_elem, first_elem;
1105 u16 delta = 0;
1106 u16 i;
1107
1108 if (!sge_len)
1109 return;
1110
1111 /* First mark all used pages */
1112 for (i = 0; i < sge_len; i++)
1113 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1114
1115 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1116 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118 /* Here we assume that the last SGE index is the biggest */
1119 prefetch((void *)(fp->sge_mask));
1120 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1121
1122 last_max = RX_SGE(fp->last_max_sge);
1123 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1124 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1125
1126 /* If ring is not full */
1127 if (last_elem + 1 != first_elem)
1128 last_elem++;
1129
1130 /* Now update the prod */
1131 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1132 if (likely(fp->sge_mask[i]))
1133 break;
1134
1135 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1136 delta += RX_SGE_MASK_ELEM_SZ;
1137 }
1138
1139 if (delta > 0) {
1140 fp->rx_sge_prod += delta;
1141 /* clear page-end entries */
1142 bnx2x_clear_sge_mask_next_elems(fp);
1143 }
1144
1145 DP(NETIF_MSG_RX_STATUS,
1146 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1147 fp->last_max_sge, fp->rx_sge_prod);
1148}
1149
1150static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1151{
1152 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1153 memset(fp->sge_mask, 0xff,
1154 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1155
33471629
EG
1156 /* Clear the two last indices in the page to 1:
1157 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1158 hence will never be indicated and should be removed from
1159 the calculations. */
1160 bnx2x_clear_sge_mask_next_elems(fp);
1161}
1162
1163static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1164 struct sk_buff *skb, u16 cons, u16 prod)
1165{
1166 struct bnx2x *bp = fp->bp;
1167 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1168 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1169 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1170 dma_addr_t mapping;
1171
1172 /* move empty skb from pool to prod and map it */
1173 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1174 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1175 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1176 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1177
1178 /* move partial skb from cons to pool (don't unmap yet) */
1179 fp->tpa_pool[queue] = *cons_rx_buf;
1180
1181 /* mark bin state as start - print error if current state != stop */
1182 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1183 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1184
1185 fp->tpa_state[queue] = BNX2X_TPA_START;
1186
1187 /* point prod_bd to new skb */
1188 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1189 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1190
1191#ifdef BNX2X_STOP_ON_ERROR
1192 fp->tpa_queue_used |= (1 << queue);
1193#ifdef __powerpc64__
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1195#else
1196 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1197#endif
1198 fp->tpa_queue_used);
1199#endif
1200}
1201
1202static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1203 struct sk_buff *skb,
1204 struct eth_fast_path_rx_cqe *fp_cqe,
1205 u16 cqe_idx)
1206{
1207 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1208 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1209 u32 i, frag_len, frag_size, pages;
1210 int err;
1211 int j;
1212
1213 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1214 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1215
1216 /* This is needed in order to enable forwarding support */
1217 if (frag_size)
4f40f2cb 1218 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1219 max(frag_size, (u32)len_on_bd));
1220
1221#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1222 if (pages >
1223 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1224 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1225 pages, cqe_idx);
1226 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1227 fp_cqe->pkt_len, len_on_bd);
1228 bnx2x_panic();
1229 return -EINVAL;
1230 }
1231#endif
1232
1233 /* Run through the SGL and compose the fragmented skb */
1234 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1235 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1236
1237 /* FW gives the indices of the SGE as if the ring is an array
1238 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1239 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1240 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1241 old_rx_pg = *rx_pg;
1242
1243 /* If we fail to allocate a substitute page, we simply stop
1244 where we are and drop the whole packet */
1245 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1246 if (unlikely(err)) {
66e855f3 1247 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1248 return err;
1249 }
1250
1251 /* Unmap the page as we r going to pass it to the stack */
1252 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1253 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1254
1255 /* Add one frag and update the appropriate fields in the skb */
1256 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1257
1258 skb->data_len += frag_len;
1259 skb->truesize += frag_len;
1260 skb->len += frag_len;
1261
1262 frag_size -= frag_len;
1263 }
1264
1265 return 0;
1266}
1267
1268static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1269 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1270 u16 cqe_idx)
1271{
1272 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1273 struct sk_buff *skb = rx_buf->skb;
1274 /* alloc new skb */
1275 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1276
1277 /* Unmap skb in the pool anyway, as we are going to change
1278 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1279 fails. */
1280 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1281 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1282
7a9b2557 1283 if (likely(new_skb)) {
66e855f3
YG
1284 /* fix ip xsum and give it to the stack */
1285 /* (no need to map the new skb) */
7a9b2557
VZ
1286
1287 prefetch(skb);
1288 prefetch(((char *)(skb)) + 128);
1289
7a9b2557
VZ
1290#ifdef BNX2X_STOP_ON_ERROR
1291 if (pad + len > bp->rx_buf_size) {
1292 BNX2X_ERR("skb_put is about to fail... "
1293 "pad %d len %d rx_buf_size %d\n",
1294 pad, len, bp->rx_buf_size);
1295 bnx2x_panic();
1296 return;
1297 }
1298#endif
1299
1300 skb_reserve(skb, pad);
1301 skb_put(skb, len);
1302
1303 skb->protocol = eth_type_trans(skb, bp->dev);
1304 skb->ip_summed = CHECKSUM_UNNECESSARY;
1305
1306 {
1307 struct iphdr *iph;
1308
1309 iph = (struct iphdr *)skb->data;
1310 iph->check = 0;
1311 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1312 }
1313
1314 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1315 &cqe->fast_path_cqe, cqe_idx)) {
1316#ifdef BCM_VLAN
1317 if ((bp->vlgrp != NULL) &&
1318 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1319 PARSING_FLAGS_VLAN))
1320 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1321 le16_to_cpu(cqe->fast_path_cqe.
1322 vlan_tag));
1323 else
1324#endif
1325 netif_receive_skb(skb);
1326 } else {
1327 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1328 " - dropping packet!\n");
1329 dev_kfree_skb(skb);
1330 }
1331
7a9b2557
VZ
1332
1333 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb;
1335
1336 } else {
66e855f3 1337 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1338 DP(NETIF_MSG_RX_STATUS,
1339 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1340 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1341 }
1342
1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344}
1345
1346static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347 struct bnx2x_fastpath *fp,
1348 u16 bd_prod, u16 rx_comp_prod,
1349 u16 rx_sge_prod)
1350{
1351 struct tstorm_eth_rx_producers rx_prods = {0};
1352 int i;
1353
1354 /* Update producers */
1355 rx_prods.bd_prod = bd_prod;
1356 rx_prods.cqe_prod = rx_comp_prod;
1357 rx_prods.sge_prod = rx_sge_prod;
1358
58f4c4cf
EG
1359 /*
1360 * Make sure that the BD and SGE data is updated before updating the
1361 * producers since FW might read the BD/SGE right after the producer
1362 * is updated.
1363 * This is only applicable for weak-ordered memory model archs such
1364 * as IA-64. The following barrier is also mandatory since FW will
1365 * assumes BDs must have buffers.
1366 */
1367 wmb();
1368
7a9b2557
VZ
1369 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1370 REG_WR(bp, BAR_TSTRORM_INTMEM +
1371 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1372 ((u32 *)&rx_prods)[i]);
1373
58f4c4cf
EG
1374 mmiowb(); /* keep prod updates ordered */
1375
7a9b2557
VZ
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1378 bd_prod, rx_comp_prod, rx_sge_prod);
1379}
1380
a2fbb9ea
ET
1381static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1382{
1383 struct bnx2x *bp = fp->bp;
34f80b04 1384 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1385 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1386 int rx_pkt = 0;
1387
1388#ifdef BNX2X_STOP_ON_ERROR
1389 if (unlikely(bp->panic))
1390 return 0;
1391#endif
1392
34f80b04
EG
1393 /* CQ "next element" is of the size of the regular element,
1394 that's why it's ok here */
a2fbb9ea
ET
1395 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1396 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1397 hw_comp_cons++;
1398
1399 bd_cons = fp->rx_bd_cons;
1400 bd_prod = fp->rx_bd_prod;
34f80b04 1401 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1402 sw_comp_cons = fp->rx_comp_cons;
1403 sw_comp_prod = fp->rx_comp_prod;
1404
1405 /* Memory barrier necessary as speculative reads of the rx
1406 * buffer can be ahead of the index in the status block
1407 */
1408 rmb();
1409
1410 DP(NETIF_MSG_RX_STATUS,
1411 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1412 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1413
1414 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1415 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1416 struct sk_buff *skb;
1417 union eth_rx_cqe *cqe;
34f80b04
EG
1418 u8 cqe_fp_flags;
1419 u16 len, pad;
a2fbb9ea
ET
1420
1421 comp_ring_cons = RCQ_BD(sw_comp_cons);
1422 bd_prod = RX_BD(bd_prod);
1423 bd_cons = RX_BD(bd_cons);
1424
1425 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1426 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1427
a2fbb9ea 1428 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1429 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1430 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1431 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1432 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1433 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1434
1435 /* is this a slowpath msg? */
34f80b04 1436 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1437 bnx2x_sp_event(fp, cqe);
1438 goto next_cqe;
1439
1440 /* this is an rx packet */
1441 } else {
1442 rx_buf = &fp->rx_buf_ring[bd_cons];
1443 skb = rx_buf->skb;
a2fbb9ea
ET
1444 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1445 pad = cqe->fast_path_cqe.placement_offset;
1446
7a9b2557
VZ
1447 /* If CQE is marked both TPA_START and TPA_END
1448 it is a non-TPA CQE */
1449 if ((!fp->disable_tpa) &&
1450 (TPA_TYPE(cqe_fp_flags) !=
1451 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1452 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1453
1454 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1455 DP(NETIF_MSG_RX_STATUS,
1456 "calling tpa_start on queue %d\n",
1457 queue);
1458
1459 bnx2x_tpa_start(fp, queue, skb,
1460 bd_cons, bd_prod);
1461 goto next_rx;
1462 }
1463
1464 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1465 DP(NETIF_MSG_RX_STATUS,
1466 "calling tpa_stop on queue %d\n",
1467 queue);
1468
1469 if (!BNX2X_RX_SUM_FIX(cqe))
1470 BNX2X_ERR("STOP on none TCP "
1471 "data\n");
1472
1473 /* This is a size of the linear data
1474 on this skb */
1475 len = le16_to_cpu(cqe->fast_path_cqe.
1476 len_on_bd);
1477 bnx2x_tpa_stop(bp, fp, queue, pad,
1478 len, cqe, comp_ring_cons);
1479#ifdef BNX2X_STOP_ON_ERROR
1480 if (bp->panic)
1481 return -EINVAL;
1482#endif
1483
1484 bnx2x_update_sge_prod(fp,
1485 &cqe->fast_path_cqe);
1486 goto next_cqe;
1487 }
1488 }
1489
a2fbb9ea
ET
1490 pci_dma_sync_single_for_device(bp->pdev,
1491 pci_unmap_addr(rx_buf, mapping),
1492 pad + RX_COPY_THRESH,
1493 PCI_DMA_FROMDEVICE);
1494 prefetch(skb);
1495 prefetch(((char *)(skb)) + 128);
1496
1497 /* is this an error packet? */
34f80b04 1498 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1499 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1500 "ERROR flags %x rx packet %u\n",
1501 cqe_fp_flags, sw_comp_cons);
66e855f3 1502 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1503 goto reuse_rx;
1504 }
1505
1506 /* Since we don't have a jumbo ring
1507 * copy small packets if mtu > 1500
1508 */
1509 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1510 (len <= RX_COPY_THRESH)) {
1511 struct sk_buff *new_skb;
1512
1513 new_skb = netdev_alloc_skb(bp->dev,
1514 len + pad);
1515 if (new_skb == NULL) {
1516 DP(NETIF_MSG_RX_ERR,
34f80b04 1517 "ERROR packet dropped "
a2fbb9ea 1518 "because of alloc failure\n");
66e855f3 1519 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1520 goto reuse_rx;
1521 }
1522
1523 /* aligned copy */
1524 skb_copy_from_linear_data_offset(skb, pad,
1525 new_skb->data + pad, len);
1526 skb_reserve(new_skb, pad);
1527 skb_put(new_skb, len);
1528
1529 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1530
1531 skb = new_skb;
1532
1533 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1534 pci_unmap_single(bp->pdev,
1535 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1536 bp->rx_buf_size,
a2fbb9ea
ET
1537 PCI_DMA_FROMDEVICE);
1538 skb_reserve(skb, pad);
1539 skb_put(skb, len);
1540
1541 } else {
1542 DP(NETIF_MSG_RX_ERR,
34f80b04 1543 "ERROR packet dropped because "
a2fbb9ea 1544 "of alloc failure\n");
66e855f3 1545 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1546reuse_rx:
1547 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1548 goto next_rx;
1549 }
1550
1551 skb->protocol = eth_type_trans(skb, bp->dev);
1552
1553 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1554 if (bp->rx_csum) {
1adcd8be
EG
1555 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1556 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1557 else
1558 bp->eth_stats.hw_csum_err++;
1559 }
a2fbb9ea
ET
1560 }
1561
1562#ifdef BCM_VLAN
34f80b04
EG
1563 if ((bp->vlgrp != NULL) &&
1564 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1565 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1566 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1567 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1568 else
1569#endif
34f80b04 1570 netif_receive_skb(skb);
a2fbb9ea 1571
a2fbb9ea
ET
1572
1573next_rx:
1574 rx_buf->skb = NULL;
1575
1576 bd_cons = NEXT_RX_IDX(bd_cons);
1577 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1578 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1579 rx_pkt++;
a2fbb9ea
ET
1580next_cqe:
1581 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1582 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1583
34f80b04 1584 if (rx_pkt == budget)
a2fbb9ea
ET
1585 break;
1586 } /* while */
1587
1588 fp->rx_bd_cons = bd_cons;
34f80b04 1589 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1590 fp->rx_comp_cons = sw_comp_cons;
1591 fp->rx_comp_prod = sw_comp_prod;
1592
7a9b2557
VZ
1593 /* Update producers */
1594 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1595 fp->rx_sge_prod);
a2fbb9ea
ET
1596
1597 fp->rx_pkt += rx_pkt;
1598 fp->rx_calls++;
1599
1600 return rx_pkt;
1601}
1602
1603static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1604{
1605 struct bnx2x_fastpath *fp = fp_cookie;
1606 struct bnx2x *bp = fp->bp;
34f80b04 1607 int index = FP_IDX(fp);
a2fbb9ea 1608
da5a662a
VZ
1609 /* Return here if interrupt is disabled */
1610 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1611 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1612 return IRQ_HANDLED;
1613 }
1614
34f80b04
EG
1615 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1616 index, FP_SB_ID(fp));
1617 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1618
1619#ifdef BNX2X_STOP_ON_ERROR
1620 if (unlikely(bp->panic))
1621 return IRQ_HANDLED;
1622#endif
1623
1624 prefetch(fp->rx_cons_sb);
1625 prefetch(fp->tx_cons_sb);
1626 prefetch(&fp->status_blk->c_status_block.status_block_index);
1627 prefetch(&fp->status_blk->u_status_block.status_block_index);
1628
908a7a16 1629 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1630
a2fbb9ea
ET
1631 return IRQ_HANDLED;
1632}
1633
1634static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1635{
1636 struct net_device *dev = dev_instance;
1637 struct bnx2x *bp = netdev_priv(dev);
1638 u16 status = bnx2x_ack_int(bp);
34f80b04 1639 u16 mask;
a2fbb9ea 1640
34f80b04 1641 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1642 if (unlikely(status == 0)) {
1643 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1644 return IRQ_NONE;
1645 }
34f80b04 1646 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1647
34f80b04 1648 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651 return IRQ_HANDLED;
1652 }
1653
3196a88a
EG
1654#ifdef BNX2X_STOP_ON_ERROR
1655 if (unlikely(bp->panic))
1656 return IRQ_HANDLED;
1657#endif
1658
34f80b04
EG
1659 mask = 0x2 << bp->fp[0].sb_id;
1660 if (status & mask) {
a2fbb9ea
ET
1661 struct bnx2x_fastpath *fp = &bp->fp[0];
1662
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
908a7a16 1668 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1669
34f80b04 1670 status &= ~mask;
a2fbb9ea
ET
1671 }
1672
a2fbb9ea 1673
34f80b04 1674 if (unlikely(status & 0x1)) {
1cf167f2 1675 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1676
1677 status &= ~0x1;
1678 if (!status)
1679 return IRQ_HANDLED;
1680 }
1681
34f80b04
EG
1682 if (status)
1683 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1684 status);
a2fbb9ea 1685
c18487ee 1686 return IRQ_HANDLED;
a2fbb9ea
ET
1687}
1688
c18487ee 1689/* end of fast path */
a2fbb9ea 1690
bb2a0f7a 1691static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1692
c18487ee
YR
1693/* Link */
1694
1695/*
1696 * General service functions
1697 */
a2fbb9ea 1698
4a37fb66 1699static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1700{
1701 u32 lock_status;
1702 u32 resource_bit = (1 << resource);
4a37fb66
YG
1703 int func = BP_FUNC(bp);
1704 u32 hw_lock_control_reg;
c18487ee 1705 int cnt;
a2fbb9ea 1706
c18487ee
YR
1707 /* Validating that the resource is within range */
1708 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1709 DP(NETIF_MSG_HW,
1710 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1711 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1712 return -EINVAL;
1713 }
a2fbb9ea 1714
4a37fb66
YG
1715 if (func <= 5) {
1716 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1717 } else {
1718 hw_lock_control_reg =
1719 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1720 }
1721
c18487ee 1722 /* Validating that the resource is not already taken */
4a37fb66 1723 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1724 if (lock_status & resource_bit) {
1725 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1726 lock_status, resource_bit);
1727 return -EEXIST;
1728 }
a2fbb9ea 1729
46230476
EG
1730 /* Try for 5 second every 5ms */
1731 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1732 /* Try to acquire the lock */
4a37fb66
YG
1733 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1734 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1735 if (lock_status & resource_bit)
1736 return 0;
a2fbb9ea 1737
c18487ee 1738 msleep(5);
a2fbb9ea 1739 }
c18487ee
YR
1740 DP(NETIF_MSG_HW, "Timeout\n");
1741 return -EAGAIN;
1742}
a2fbb9ea 1743
4a37fb66 1744static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1745{
1746 u32 lock_status;
1747 u32 resource_bit = (1 << resource);
4a37fb66
YG
1748 int func = BP_FUNC(bp);
1749 u32 hw_lock_control_reg;
a2fbb9ea 1750
c18487ee
YR
1751 /* Validating that the resource is within range */
1752 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1753 DP(NETIF_MSG_HW,
1754 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1755 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1756 return -EINVAL;
1757 }
1758
4a37fb66
YG
1759 if (func <= 5) {
1760 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1761 } else {
1762 hw_lock_control_reg =
1763 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1764 }
1765
c18487ee 1766 /* Validating that the resource is currently taken */
4a37fb66 1767 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1768 if (!(lock_status & resource_bit)) {
1769 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1770 lock_status, resource_bit);
1771 return -EFAULT;
a2fbb9ea
ET
1772 }
1773
4a37fb66 1774 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1775 return 0;
1776}
1777
1778/* HW Lock for shared dual port PHYs */
4a37fb66 1779static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1780{
1781 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1782
34f80b04 1783 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1784
c18487ee
YR
1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1787 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1788}
a2fbb9ea 1789
4a37fb66 1790static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1791{
1792 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1793
c18487ee
YR
1794 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1795 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1796 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1797
34f80b04 1798 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1799}
a2fbb9ea 1800
17de50b7 1801int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1802{
1803 /* The GPIO should be swapped if swap register is set and active */
1804 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1805 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1806 int gpio_shift = gpio_num +
1807 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1808 u32 gpio_mask = (1 << gpio_shift);
1809 u32 gpio_reg;
a2fbb9ea 1810
c18487ee
YR
1811 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1812 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1813 return -EINVAL;
1814 }
a2fbb9ea 1815
4a37fb66 1816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1817 /* read GPIO and mask except the float bits */
1818 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1819
c18487ee
YR
1820 switch (mode) {
1821 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1822 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1823 gpio_num, gpio_shift);
1824 /* clear FLOAT and set CLR */
1825 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1826 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1827 break;
a2fbb9ea 1828
c18487ee
YR
1829 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1830 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1831 gpio_num, gpio_shift);
1832 /* clear FLOAT and set SET */
1833 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1834 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1835 break;
a2fbb9ea 1836
17de50b7 1837 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1838 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1839 gpio_num, gpio_shift);
1840 /* set FLOAT */
1841 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1842 break;
a2fbb9ea 1843
c18487ee
YR
1844 default:
1845 break;
a2fbb9ea
ET
1846 }
1847
c18487ee 1848 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1849 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1850
c18487ee 1851 return 0;
a2fbb9ea
ET
1852}
1853
c18487ee 1854static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1855{
c18487ee
YR
1856 u32 spio_mask = (1 << spio_num);
1857 u32 spio_reg;
a2fbb9ea 1858
c18487ee
YR
1859 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1860 (spio_num > MISC_REGISTERS_SPIO_7)) {
1861 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1862 return -EINVAL;
a2fbb9ea
ET
1863 }
1864
4a37fb66 1865 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1866 /* read SPIO and mask except the float bits */
1867 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1868
c18487ee 1869 switch (mode) {
6378c025 1870 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1871 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1872 /* clear FLOAT and set CLR */
1873 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1874 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1875 break;
a2fbb9ea 1876
6378c025 1877 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1878 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1879 /* clear FLOAT and set SET */
1880 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1881 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1882 break;
a2fbb9ea 1883
c18487ee
YR
1884 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1885 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1886 /* set FLOAT */
1887 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1888 break;
a2fbb9ea 1889
c18487ee
YR
1890 default:
1891 break;
a2fbb9ea
ET
1892 }
1893
c18487ee 1894 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1895 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1896
a2fbb9ea
ET
1897 return 0;
1898}
1899
c18487ee 1900static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1901{
ad33ea3a
EG
1902 switch (bp->link_vars.ieee_fc &
1903 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1909 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1910 ADVERTISED_Pause);
1911 break;
1912 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1913 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1914 break;
1915 default:
34f80b04 1916 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1917 ADVERTISED_Pause);
1918 break;
1919 }
1920}
f1410647 1921
c18487ee
YR
1922static void bnx2x_link_report(struct bnx2x *bp)
1923{
1924 if (bp->link_vars.link_up) {
1925 if (bp->state == BNX2X_STATE_OPEN)
1926 netif_carrier_on(bp->dev);
1927 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1928
c18487ee 1929 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1930
c18487ee
YR
1931 if (bp->link_vars.duplex == DUPLEX_FULL)
1932 printk("full duplex");
1933 else
1934 printk("half duplex");
f1410647 1935
c0700f90
DM
1936 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1937 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1938 printk(", receive ");
c0700f90 1939 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1940 printk("& transmit ");
1941 } else {
1942 printk(", transmit ");
1943 }
1944 printk("flow control ON");
1945 }
1946 printk("\n");
f1410647 1947
c18487ee
YR
1948 } else { /* link_down */
1949 netif_carrier_off(bp->dev);
1950 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1951 }
c18487ee
YR
1952}
1953
1954static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1955{
19680c48
EG
1956 if (!BP_NOMCP(bp)) {
1957 u8 rc;
a2fbb9ea 1958
19680c48 1959 /* Initialize link parameters structure variables */
8c99e7b0
YR
1960 /* It is recommended to turn off RX FC for jumbo frames
1961 for better performance */
1962 if (IS_E1HMF(bp))
c0700f90 1963 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1964 else if (bp->dev->mtu > 5000)
c0700f90 1965 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1966 else
c0700f90 1967 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1968
4a37fb66 1969 bnx2x_acquire_phy_lock(bp);
19680c48 1970 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1971 bnx2x_release_phy_lock(bp);
a2fbb9ea 1972
3c96c68b
EG
1973 bnx2x_calc_fc_adv(bp);
1974
19680c48
EG
1975 if (bp->link_vars.link_up)
1976 bnx2x_link_report(bp);
a2fbb9ea 1977
34f80b04 1978
19680c48
EG
1979 return rc;
1980 }
1981 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1982 return -EINVAL;
a2fbb9ea
ET
1983}
1984
c18487ee 1985static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1986{
19680c48 1987 if (!BP_NOMCP(bp)) {
4a37fb66 1988 bnx2x_acquire_phy_lock(bp);
19680c48 1989 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1990 bnx2x_release_phy_lock(bp);
a2fbb9ea 1991
19680c48
EG
1992 bnx2x_calc_fc_adv(bp);
1993 } else
1994 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1995}
a2fbb9ea 1996
c18487ee
YR
1997static void bnx2x__link_reset(struct bnx2x *bp)
1998{
19680c48 1999 if (!BP_NOMCP(bp)) {
4a37fb66 2000 bnx2x_acquire_phy_lock(bp);
19680c48 2001 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2002 bnx2x_release_phy_lock(bp);
19680c48
EG
2003 } else
2004 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2005}
a2fbb9ea 2006
c18487ee
YR
2007static u8 bnx2x_link_test(struct bnx2x *bp)
2008{
2009 u8 rc;
a2fbb9ea 2010
4a37fb66 2011 bnx2x_acquire_phy_lock(bp);
c18487ee 2012 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2013 bnx2x_release_phy_lock(bp);
a2fbb9ea 2014
c18487ee
YR
2015 return rc;
2016}
a2fbb9ea 2017
34f80b04
EG
2018/* Calculates the sum of vn_min_rates.
2019 It's needed for further normalizing of the min_rates.
2020
2021 Returns:
2022 sum of vn_min_rates
2023 or
2024 0 - if all the min_rates are 0.
33471629 2025 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2026 If not all min_rates are zero then those that are zeroes will
2027 be set to 1.
2028 */
2029static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2030{
2031 int i, port = BP_PORT(bp);
2032 u32 wsum = 0;
2033 int all_zero = 1;
2034
2035 for (i = 0; i < E1HVN_MAX; i++) {
2036 u32 vn_cfg =
2037 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2038 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2039 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2040 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2041 /* If min rate is zero - set it to 1 */
2042 if (!vn_min_rate)
2043 vn_min_rate = DEF_MIN_RATE;
2044 else
2045 all_zero = 0;
2046
2047 wsum += vn_min_rate;
2048 }
2049 }
2050
2051 /* ... only if all min rates are zeros - disable FAIRNESS */
2052 if (all_zero)
2053 return 0;
2054
2055 return wsum;
2056}
2057
2058static void bnx2x_init_port_minmax(struct bnx2x *bp,
2059 int en_fness,
2060 u16 port_rate,
2061 struct cmng_struct_per_port *m_cmng_port)
2062{
2063 u32 r_param = port_rate / 8;
2064 int port = BP_PORT(bp);
2065 int i;
2066
2067 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2068
2069 /* Enable minmax only if we are in e1hmf mode */
2070 if (IS_E1HMF(bp)) {
2071 u32 fair_periodic_timeout_usec;
2072 u32 t_fair;
2073
2074 /* Enable rate shaping and fairness */
2075 m_cmng_port->flags.cmng_vn_enable = 1;
2076 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2077 m_cmng_port->flags.rate_shaping_enable = 1;
2078
2079 if (!en_fness)
2080 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2081 " fairness will be disabled\n");
2082
2083 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2084 m_cmng_port->rs_vars.rs_periodic_timeout =
2085 RS_PERIODIC_TIMEOUT_USEC / 4;
2086
2087 /* this is the threshold below which no timer arming will occur
2088 1.25 coefficient is for the threshold to be a little bigger
2089 than the real time, to compensate for timer in-accuracy */
2090 m_cmng_port->rs_vars.rs_threshold =
2091 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2092
2093 /* resolution of fairness timer */
2094 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2095 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2096 t_fair = T_FAIR_COEF / port_rate;
2097
2098 /* this is the threshold below which we won't arm
2099 the timer anymore */
2100 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2101
2102 /* we multiply by 1e3/8 to get bytes/msec.
2103 We don't want the credits to pass a credit
2104 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2105 m_cmng_port->fair_vars.upper_bound =
2106 r_param * t_fair * FAIR_MEM;
2107 /* since each tick is 4 usec */
2108 m_cmng_port->fair_vars.fairness_timeout =
2109 fair_periodic_timeout_usec / 4;
2110
2111 } else {
2112 /* Disable rate shaping and fairness */
2113 m_cmng_port->flags.cmng_vn_enable = 0;
2114 m_cmng_port->flags.fairness_enable = 0;
2115 m_cmng_port->flags.rate_shaping_enable = 0;
2116
2117 DP(NETIF_MSG_IFUP,
2118 "Single function mode minmax will be disabled\n");
2119 }
2120
2121 /* Store it to internal memory */
2122 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2123 REG_WR(bp, BAR_XSTRORM_INTMEM +
2124 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2125 ((u32 *)(m_cmng_port))[i]);
2126}
2127
2128static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2129 u32 wsum, u16 port_rate,
2130 struct cmng_struct_per_port *m_cmng_port)
2131{
2132 struct rate_shaping_vars_per_vn m_rs_vn;
2133 struct fairness_vars_per_vn m_fair_vn;
2134 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2135 u16 vn_min_rate, vn_max_rate;
2136 int i;
2137
2138 /* If function is hidden - set min and max to zeroes */
2139 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2140 vn_min_rate = 0;
2141 vn_max_rate = 0;
2142
2143 } else {
2144 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2145 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2146 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2147 if current min rate is zero - set it to 1.
33471629 2148 This is a requirement of the algorithm. */
34f80b04
EG
2149 if ((vn_min_rate == 0) && wsum)
2150 vn_min_rate = DEF_MIN_RATE;
2151 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2152 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2153 }
2154
2155 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2156 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2157
2158 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2159 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2160
2161 /* global vn counter - maximal Mbps for this vn */
2162 m_rs_vn.vn_counter.rate = vn_max_rate;
2163
2164 /* quota - number of bytes transmitted in this period */
2165 m_rs_vn.vn_counter.quota =
2166 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2167
2168#ifdef BNX2X_PER_PROT_QOS
2169 /* per protocol counter */
2170 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2171 /* maximal Mbps for this protocol */
2172 m_rs_vn.protocol_counters[protocol].rate =
2173 protocol_max_rate[protocol];
2174 /* the quota in each timer period -
2175 number of bytes transmitted in this period */
2176 m_rs_vn.protocol_counters[protocol].quota =
2177 (u32)(rs_periodic_timeout_usec *
2178 ((double)m_rs_vn.
2179 protocol_counters[protocol].rate/8));
2180 }
2181#endif
2182
2183 if (wsum) {
2184 /* credit for each period of the fairness algorithm:
2185 number of bytes in T_FAIR (the vn share the port rate).
2186 wsum should not be larger than 10000, thus
2187 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2188 m_fair_vn.vn_credit_delta =
2189 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2190 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2191 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2192 m_fair_vn.vn_credit_delta);
2193 }
2194
2195#ifdef BNX2X_PER_PROT_QOS
2196 do {
2197 u32 protocolWeightSum = 0;
2198
2199 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2200 protocolWeightSum +=
2201 drvInit.protocol_min_rate[protocol];
2202 /* per protocol counter -
2203 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2204 if (protocolWeightSum > 0) {
2205 for (protocol = 0;
2206 protocol < NUM_OF_PROTOCOLS; protocol++)
2207 /* credit for each period of the
2208 fairness algorithm - number of bytes in
2209 T_FAIR (the protocol share the vn rate) */
2210 m_fair_vn.protocol_credit_delta[protocol] =
2211 (u32)((vn_min_rate / 8) * t_fair *
2212 protocol_min_rate / protocolWeightSum);
2213 }
2214 } while (0);
2215#endif
2216
2217 /* Store it to internal memory */
2218 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2219 REG_WR(bp, BAR_XSTRORM_INTMEM +
2220 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2221 ((u32 *)(&m_rs_vn))[i]);
2222
2223 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2224 REG_WR(bp, BAR_XSTRORM_INTMEM +
2225 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2226 ((u32 *)(&m_fair_vn))[i]);
2227}
2228
c18487ee
YR
2229/* This function is called upon link interrupt */
2230static void bnx2x_link_attn(struct bnx2x *bp)
2231{
34f80b04
EG
2232 int vn;
2233
bb2a0f7a
YG
2234 /* Make sure that we are synced with the current statistics */
2235 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2236
4a37fb66 2237 bnx2x_acquire_phy_lock(bp);
c18487ee 2238 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2239 bnx2x_release_phy_lock(bp);
a2fbb9ea 2240
bb2a0f7a
YG
2241 if (bp->link_vars.link_up) {
2242
2243 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2244 struct host_port_stats *pstats;
2245
2246 pstats = bnx2x_sp(bp, port_stats);
2247 /* reset old bmac stats */
2248 memset(&(pstats->mac_stx[0]), 0,
2249 sizeof(struct mac_stx));
2250 }
2251 if ((bp->state == BNX2X_STATE_OPEN) ||
2252 (bp->state == BNX2X_STATE_DISABLED))
2253 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2254 }
2255
c18487ee
YR
2256 /* indicate link status */
2257 bnx2x_link_report(bp);
34f80b04
EG
2258
2259 if (IS_E1HMF(bp)) {
2260 int func;
2261
2262 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2263 if (vn == BP_E1HVN(bp))
2264 continue;
2265
2266 func = ((vn << 1) | BP_PORT(bp));
2267
2268 /* Set the attention towards other drivers
2269 on the same port */
2270 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2271 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2272 }
2273 }
2274
2275 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2276 struct cmng_struct_per_port m_cmng_port;
2277 u32 wsum;
2278 int port = BP_PORT(bp);
2279
2280 /* Init RATE SHAPING and FAIRNESS contexts */
2281 wsum = bnx2x_calc_vn_wsum(bp);
2282 bnx2x_init_port_minmax(bp, (int)wsum,
2283 bp->link_vars.line_speed,
2284 &m_cmng_port);
2285 if (IS_E1HMF(bp))
2286 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2287 bnx2x_init_vn_minmax(bp, 2*vn + port,
2288 wsum, bp->link_vars.line_speed,
2289 &m_cmng_port);
2290 }
c18487ee 2291}
a2fbb9ea 2292
c18487ee
YR
2293static void bnx2x__link_status_update(struct bnx2x *bp)
2294{
2295 if (bp->state != BNX2X_STATE_OPEN)
2296 return;
a2fbb9ea 2297
c18487ee 2298 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2299
bb2a0f7a
YG
2300 if (bp->link_vars.link_up)
2301 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2302 else
2303 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2304
c18487ee
YR
2305 /* indicate link status */
2306 bnx2x_link_report(bp);
a2fbb9ea 2307}
a2fbb9ea 2308
34f80b04
EG
2309static void bnx2x_pmf_update(struct bnx2x *bp)
2310{
2311 int port = BP_PORT(bp);
2312 u32 val;
2313
2314 bp->port.pmf = 1;
2315 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2316
2317 /* enable nig attention */
2318 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2319 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2320 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2321
2322 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2323}
2324
c18487ee 2325/* end of Link */
a2fbb9ea
ET
2326
2327/* slow path */
2328
2329/*
2330 * General service functions
2331 */
2332
2333/* the slow path queue is odd since completions arrive on the fastpath ring */
2334static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2335 u32 data_hi, u32 data_lo, int common)
2336{
34f80b04 2337 int func = BP_FUNC(bp);
a2fbb9ea 2338
34f80b04
EG
2339 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2340 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2341 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2342 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2343 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2344
2345#ifdef BNX2X_STOP_ON_ERROR
2346 if (unlikely(bp->panic))
2347 return -EIO;
2348#endif
2349
34f80b04 2350 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2351
2352 if (!bp->spq_left) {
2353 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2354 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2355 bnx2x_panic();
2356 return -EBUSY;
2357 }
f1410647 2358
a2fbb9ea
ET
2359 /* CID needs port number to be encoded int it */
2360 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2361 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2362 HW_CID(bp, cid)));
2363 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2364 if (common)
2365 bp->spq_prod_bd->hdr.type |=
2366 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2367
2368 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2369 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2370
2371 bp->spq_left--;
2372
2373 if (bp->spq_prod_bd == bp->spq_last_bd) {
2374 bp->spq_prod_bd = bp->spq;
2375 bp->spq_prod_idx = 0;
2376 DP(NETIF_MSG_TIMER, "end of spq\n");
2377
2378 } else {
2379 bp->spq_prod_bd++;
2380 bp->spq_prod_idx++;
2381 }
2382
34f80b04 2383 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2384 bp->spq_prod_idx);
2385
34f80b04 2386 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2387 return 0;
2388}
2389
2390/* acquire split MCP access lock register */
4a37fb66 2391static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2392{
a2fbb9ea 2393 u32 i, j, val;
34f80b04 2394 int rc = 0;
a2fbb9ea
ET
2395
2396 might_sleep();
2397 i = 100;
2398 for (j = 0; j < i*10; j++) {
2399 val = (1UL << 31);
2400 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2401 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2402 if (val & (1L << 31))
2403 break;
2404
2405 msleep(5);
2406 }
a2fbb9ea 2407 if (!(val & (1L << 31))) {
19680c48 2408 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2409 rc = -EBUSY;
2410 }
2411
2412 return rc;
2413}
2414
4a37fb66
YG
2415/* release split MCP access lock register */
2416static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2417{
2418 u32 val = 0;
2419
2420 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2421}
2422
2423static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2424{
2425 struct host_def_status_block *def_sb = bp->def_status_blk;
2426 u16 rc = 0;
2427
2428 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2429 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2430 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2431 rc |= 1;
2432 }
2433 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2434 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2435 rc |= 2;
2436 }
2437 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2438 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2439 rc |= 4;
2440 }
2441 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2442 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2443 rc |= 8;
2444 }
2445 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2446 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2447 rc |= 16;
2448 }
2449 return rc;
2450}
2451
2452/*
2453 * slow path service functions
2454 */
2455
2456static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2457{
34f80b04 2458 int port = BP_PORT(bp);
5c862848
EG
2459 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2460 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2461 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2462 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2463 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2464 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2465 u32 aeu_mask;
a2fbb9ea 2466
a2fbb9ea
ET
2467 if (bp->attn_state & asserted)
2468 BNX2X_ERR("IGU ERROR\n");
2469
3fcaf2e5
EG
2470 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2471 aeu_mask = REG_RD(bp, aeu_addr);
2472
a2fbb9ea 2473 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2474 aeu_mask, asserted);
2475 aeu_mask &= ~(asserted & 0xff);
2476 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2477
3fcaf2e5
EG
2478 REG_WR(bp, aeu_addr, aeu_mask);
2479 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2480
3fcaf2e5 2481 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2482 bp->attn_state |= asserted;
3fcaf2e5 2483 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2484
2485 if (asserted & ATTN_HARD_WIRED_MASK) {
2486 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2487
877e9aa4
ET
2488 /* save nig interrupt mask */
2489 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2490 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2491
c18487ee 2492 bnx2x_link_attn(bp);
a2fbb9ea
ET
2493
2494 /* handle unicore attn? */
2495 }
2496 if (asserted & ATTN_SW_TIMER_4_FUNC)
2497 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2498
2499 if (asserted & GPIO_2_FUNC)
2500 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2501
2502 if (asserted & GPIO_3_FUNC)
2503 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2504
2505 if (asserted & GPIO_4_FUNC)
2506 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2507
2508 if (port == 0) {
2509 if (asserted & ATTN_GENERAL_ATTN_1) {
2510 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2511 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2512 }
2513 if (asserted & ATTN_GENERAL_ATTN_2) {
2514 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2515 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2516 }
2517 if (asserted & ATTN_GENERAL_ATTN_3) {
2518 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2519 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2520 }
2521 } else {
2522 if (asserted & ATTN_GENERAL_ATTN_4) {
2523 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2524 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2525 }
2526 if (asserted & ATTN_GENERAL_ATTN_5) {
2527 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2528 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2529 }
2530 if (asserted & ATTN_GENERAL_ATTN_6) {
2531 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2532 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2533 }
2534 }
2535
2536 } /* if hardwired */
2537
5c862848
EG
2538 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2539 asserted, hc_addr);
2540 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2541
2542 /* now set back the mask */
2543 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2544 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2545}
2546
877e9aa4 2547static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2548{
34f80b04 2549 int port = BP_PORT(bp);
877e9aa4
ET
2550 int reg_offset;
2551 u32 val;
2552
34f80b04
EG
2553 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2554 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2555
34f80b04 2556 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2557
2558 val = REG_RD(bp, reg_offset);
2559 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2560 REG_WR(bp, reg_offset, val);
2561
2562 BNX2X_ERR("SPIO5 hw attention\n");
2563
34f80b04 2564 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2565 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2566 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2567 /* Fan failure attention */
2568
17de50b7 2569 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2570 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2571 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2572 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2573 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2574 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2575 /* mark the failure */
c18487ee 2576 bp->link_params.ext_phy_config &=
877e9aa4 2577 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2578 bp->link_params.ext_phy_config |=
877e9aa4
ET
2579 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2580 SHMEM_WR(bp,
2581 dev_info.port_hw_config[port].
2582 external_phy_config,
c18487ee 2583 bp->link_params.ext_phy_config);
877e9aa4
ET
2584 /* log the failure */
2585 printk(KERN_ERR PFX "Fan Failure on Network"
2586 " Controller %s has caused the driver to"
2587 " shutdown the card to prevent permanent"
2588 " damage. Please contact Dell Support for"
2589 " assistance\n", bp->dev->name);
2590 break;
2591
2592 default:
2593 break;
2594 }
2595 }
34f80b04
EG
2596
2597 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2598
2599 val = REG_RD(bp, reg_offset);
2600 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2601 REG_WR(bp, reg_offset, val);
2602
2603 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2604 (attn & HW_INTERRUT_ASSERT_SET_0));
2605 bnx2x_panic();
2606 }
877e9aa4
ET
2607}
2608
2609static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2610{
2611 u32 val;
2612
2613 if (attn & BNX2X_DOORQ_ASSERT) {
2614
2615 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2616 BNX2X_ERR("DB hw attention 0x%x\n", val);
2617 /* DORQ discard attention */
2618 if (val & 0x2)
2619 BNX2X_ERR("FATAL error from DORQ\n");
2620 }
34f80b04
EG
2621
2622 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2623
2624 int port = BP_PORT(bp);
2625 int reg_offset;
2626
2627 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2628 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2629
2630 val = REG_RD(bp, reg_offset);
2631 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2632 REG_WR(bp, reg_offset, val);
2633
2634 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2635 (attn & HW_INTERRUT_ASSERT_SET_1));
2636 bnx2x_panic();
2637 }
877e9aa4
ET
2638}
2639
2640static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2641{
2642 u32 val;
2643
2644 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2645
2646 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2647 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2648 /* CFC error attention */
2649 if (val & 0x2)
2650 BNX2X_ERR("FATAL error from CFC\n");
2651 }
2652
2653 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2654
2655 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2656 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2657 /* RQ_USDMDP_FIFO_OVERFLOW */
2658 if (val & 0x18000)
2659 BNX2X_ERR("FATAL error from PXP\n");
2660 }
34f80b04
EG
2661
2662 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2663
2664 int port = BP_PORT(bp);
2665 int reg_offset;
2666
2667 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2668 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2669
2670 val = REG_RD(bp, reg_offset);
2671 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2672 REG_WR(bp, reg_offset, val);
2673
2674 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2675 (attn & HW_INTERRUT_ASSERT_SET_2));
2676 bnx2x_panic();
2677 }
877e9aa4
ET
2678}
2679
2680static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2681{
34f80b04
EG
2682 u32 val;
2683
877e9aa4
ET
2684 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2685
34f80b04
EG
2686 if (attn & BNX2X_PMF_LINK_ASSERT) {
2687 int func = BP_FUNC(bp);
2688
2689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2690 bnx2x__link_status_update(bp);
2691 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2692 DRV_STATUS_PMF)
2693 bnx2x_pmf_update(bp);
2694
2695 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2696
2697 BNX2X_ERR("MC assert!\n");
2698 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2699 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2700 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2701 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2702 bnx2x_panic();
2703
2704 } else if (attn & BNX2X_MCP_ASSERT) {
2705
2706 BNX2X_ERR("MCP assert!\n");
2707 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2708 bnx2x_fw_dump(bp);
877e9aa4
ET
2709
2710 } else
2711 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2712 }
2713
2714 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2715 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2716 if (attn & BNX2X_GRC_TIMEOUT) {
2717 val = CHIP_IS_E1H(bp) ?
2718 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2719 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2720 }
2721 if (attn & BNX2X_GRC_RSV) {
2722 val = CHIP_IS_E1H(bp) ?
2723 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2724 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2725 }
877e9aa4 2726 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2727 }
2728}
2729
2730static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2731{
a2fbb9ea
ET
2732 struct attn_route attn;
2733 struct attn_route group_mask;
34f80b04 2734 int port = BP_PORT(bp);
877e9aa4 2735 int index;
a2fbb9ea
ET
2736 u32 reg_addr;
2737 u32 val;
3fcaf2e5 2738 u32 aeu_mask;
a2fbb9ea
ET
2739
2740 /* need to take HW lock because MCP or other port might also
2741 try to handle this event */
4a37fb66 2742 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2743
2744 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2745 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2746 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2747 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2748 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2749 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2750
2751 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2752 if (deasserted & (1 << index)) {
2753 group_mask = bp->attn_group[index];
2754
34f80b04
EG
2755 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2756 index, group_mask.sig[0], group_mask.sig[1],
2757 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2758
877e9aa4
ET
2759 bnx2x_attn_int_deasserted3(bp,
2760 attn.sig[3] & group_mask.sig[3]);
2761 bnx2x_attn_int_deasserted1(bp,
2762 attn.sig[1] & group_mask.sig[1]);
2763 bnx2x_attn_int_deasserted2(bp,
2764 attn.sig[2] & group_mask.sig[2]);
2765 bnx2x_attn_int_deasserted0(bp,
2766 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2767
a2fbb9ea
ET
2768 if ((attn.sig[0] & group_mask.sig[0] &
2769 HW_PRTY_ASSERT_SET_0) ||
2770 (attn.sig[1] & group_mask.sig[1] &
2771 HW_PRTY_ASSERT_SET_1) ||
2772 (attn.sig[2] & group_mask.sig[2] &
2773 HW_PRTY_ASSERT_SET_2))
6378c025 2774 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2775 }
2776 }
2777
4a37fb66 2778 bnx2x_release_alr(bp);
a2fbb9ea 2779
5c862848 2780 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2781
2782 val = ~deasserted;
3fcaf2e5
EG
2783 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2784 val, reg_addr);
5c862848 2785 REG_WR(bp, reg_addr, val);
a2fbb9ea 2786
a2fbb9ea 2787 if (~bp->attn_state & deasserted)
3fcaf2e5 2788 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2789
2790 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2791 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2792
3fcaf2e5
EG
2793 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2794 aeu_mask = REG_RD(bp, reg_addr);
2795
2796 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2797 aeu_mask, deasserted);
2798 aeu_mask |= (deasserted & 0xff);
2799 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2800
3fcaf2e5
EG
2801 REG_WR(bp, reg_addr, aeu_mask);
2802 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2803
2804 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2805 bp->attn_state &= ~deasserted;
2806 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2807}
2808
2809static void bnx2x_attn_int(struct bnx2x *bp)
2810{
2811 /* read local copy of bits */
2812 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2813 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2814 u32 attn_state = bp->attn_state;
2815
2816 /* look for changed bits */
2817 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2818 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2819
2820 DP(NETIF_MSG_HW,
2821 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2822 attn_bits, attn_ack, asserted, deasserted);
2823
2824 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2825 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2826
2827 /* handle bits that were raised */
2828 if (asserted)
2829 bnx2x_attn_int_asserted(bp, asserted);
2830
2831 if (deasserted)
2832 bnx2x_attn_int_deasserted(bp, deasserted);
2833}
2834
2835static void bnx2x_sp_task(struct work_struct *work)
2836{
1cf167f2 2837 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2838 u16 status;
2839
34f80b04 2840
a2fbb9ea
ET
2841 /* Return here if interrupt is disabled */
2842 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2843 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2844 return;
2845 }
2846
2847 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2848/* if (status == 0) */
2849/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2850
3196a88a 2851 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2852
877e9aa4
ET
2853 /* HW attentions */
2854 if (status & 0x1)
a2fbb9ea 2855 bnx2x_attn_int(bp);
a2fbb9ea 2856
bb2a0f7a
YG
2857 /* CStorm events: query_stats, port delete ramrod */
2858 if (status & 0x2)
2859 bp->stats_pending = 0;
2860
a2fbb9ea
ET
2861 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2862 IGU_INT_NOP, 1);
2863 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2864 IGU_INT_NOP, 1);
2865 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2866 IGU_INT_NOP, 1);
2867 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2868 IGU_INT_NOP, 1);
2869 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2870 IGU_INT_ENABLE, 1);
877e9aa4 2871
a2fbb9ea
ET
2872}
2873
2874static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2875{
2876 struct net_device *dev = dev_instance;
2877 struct bnx2x *bp = netdev_priv(dev);
2878
2879 /* Return here if interrupt is disabled */
2880 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2881 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2882 return IRQ_HANDLED;
2883 }
2884
877e9aa4 2885 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2886
2887#ifdef BNX2X_STOP_ON_ERROR
2888 if (unlikely(bp->panic))
2889 return IRQ_HANDLED;
2890#endif
2891
1cf167f2 2892 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2893
2894 return IRQ_HANDLED;
2895}
2896
2897/* end of slow path */
2898
2899/* Statistics */
2900
2901/****************************************************************************
2902* Macros
2903****************************************************************************/
2904
a2fbb9ea
ET
2905/* sum[hi:lo] += add[hi:lo] */
2906#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2907 do { \
2908 s_lo += a_lo; \
2909 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2910 } while (0)
2911
2912/* difference = minuend - subtrahend */
2913#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2914 do { \
bb2a0f7a
YG
2915 if (m_lo < s_lo) { \
2916 /* underflow */ \
a2fbb9ea 2917 d_hi = m_hi - s_hi; \
bb2a0f7a 2918 if (d_hi > 0) { \
6378c025 2919 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2920 d_hi--; \
2921 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2922 } else { \
6378c025 2923 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2924 d_hi = 0; \
2925 d_lo = 0; \
2926 } \
bb2a0f7a
YG
2927 } else { \
2928 /* m_lo >= s_lo */ \
a2fbb9ea 2929 if (m_hi < s_hi) { \
bb2a0f7a
YG
2930 d_hi = 0; \
2931 d_lo = 0; \
2932 } else { \
6378c025 2933 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2934 d_hi = m_hi - s_hi; \
2935 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2936 } \
2937 } \
2938 } while (0)
2939
bb2a0f7a 2940#define UPDATE_STAT64(s, t) \
a2fbb9ea 2941 do { \
bb2a0f7a
YG
2942 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2943 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2944 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2945 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2946 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2947 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2948 } while (0)
2949
bb2a0f7a 2950#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2951 do { \
bb2a0f7a
YG
2952 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2953 diff.lo, new->s##_lo, old->s##_lo); \
2954 ADD_64(estats->t##_hi, diff.hi, \
2955 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2956 } while (0)
2957
2958/* sum[hi:lo] += add */
2959#define ADD_EXTEND_64(s_hi, s_lo, a) \
2960 do { \
2961 s_lo += a; \
2962 s_hi += (s_lo < a) ? 1 : 0; \
2963 } while (0)
2964
bb2a0f7a 2965#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2966 do { \
bb2a0f7a
YG
2967 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2968 pstats->mac_stx[1].s##_lo, \
2969 new->s); \
a2fbb9ea
ET
2970 } while (0)
2971
bb2a0f7a 2972#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2973 do { \
2974 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2975 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2976 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2977 } while (0)
2978
2979#define UPDATE_EXTEND_XSTAT(s, t) \
2980 do { \
2981 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2982 old_xclient->s = le32_to_cpu(xclient->s); \
2983 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2984 } while (0)
2985
2986/*
2987 * General service functions
2988 */
2989
2990static inline long bnx2x_hilo(u32 *hiref)
2991{
2992 u32 lo = *(hiref + 1);
2993#if (BITS_PER_LONG == 64)
2994 u32 hi = *hiref;
2995
2996 return HILO_U64(hi, lo);
2997#else
2998 return lo;
2999#endif
3000}
3001
3002/*
3003 * Init service functions
3004 */
3005
bb2a0f7a
YG
3006static void bnx2x_storm_stats_post(struct bnx2x *bp)
3007{
3008 if (!bp->stats_pending) {
3009 struct eth_query_ramrod_data ramrod_data = {0};
3010 int rc;
3011
3012 ramrod_data.drv_counter = bp->stats_counter++;
3013 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3014 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3015
3016 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3017 ((u32 *)&ramrod_data)[1],
3018 ((u32 *)&ramrod_data)[0], 0);
3019 if (rc == 0) {
3020 /* stats ramrod has it's own slot on the spq */
3021 bp->spq_left++;
3022 bp->stats_pending = 1;
3023 }
3024 }
3025}
3026
3027static void bnx2x_stats_init(struct bnx2x *bp)
3028{
3029 int port = BP_PORT(bp);
3030
3031 bp->executer_idx = 0;
3032 bp->stats_counter = 0;
3033
3034 /* port stats */
3035 if (!BP_NOMCP(bp))
3036 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3037 else
3038 bp->port.port_stx = 0;
3039 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3040
3041 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3042 bp->port.old_nig_stats.brb_discard =
3043 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3044 bp->port.old_nig_stats.brb_truncate =
3045 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3046 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3047 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3048 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3049 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3050
3051 /* function stats */
3052 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3053 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3054 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3055 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3056
3057 bp->stats_state = STATS_STATE_DISABLED;
3058 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3059 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3060}
3061
3062static void bnx2x_hw_stats_post(struct bnx2x *bp)
3063{
3064 struct dmae_command *dmae = &bp->stats_dmae;
3065 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3066
3067 *stats_comp = DMAE_COMP_VAL;
3068
3069 /* loader */
3070 if (bp->executer_idx) {
3071 int loader_idx = PMF_DMAE_C(bp);
3072
3073 memset(dmae, 0, sizeof(struct dmae_command));
3074
3075 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3076 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3077 DMAE_CMD_DST_RESET |
3078#ifdef __BIG_ENDIAN
3079 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3080#else
3081 DMAE_CMD_ENDIANITY_DW_SWAP |
3082#endif
3083 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3084 DMAE_CMD_PORT_0) |
3085 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3086 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3087 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3088 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3089 sizeof(struct dmae_command) *
3090 (loader_idx + 1)) >> 2;
3091 dmae->dst_addr_hi = 0;
3092 dmae->len = sizeof(struct dmae_command) >> 2;
3093 if (CHIP_IS_E1(bp))
3094 dmae->len--;
3095 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3096 dmae->comp_addr_hi = 0;
3097 dmae->comp_val = 1;
3098
3099 *stats_comp = 0;
3100 bnx2x_post_dmae(bp, dmae, loader_idx);
3101
3102 } else if (bp->func_stx) {
3103 *stats_comp = 0;
3104 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3105 }
3106}
3107
3108static int bnx2x_stats_comp(struct bnx2x *bp)
3109{
3110 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3111 int cnt = 10;
3112
3113 might_sleep();
3114 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3115 if (!cnt) {
3116 BNX2X_ERR("timeout waiting for stats finished\n");
3117 break;
3118 }
3119 cnt--;
12469401 3120 msleep(1);
bb2a0f7a
YG
3121 }
3122 return 1;
3123}
3124
3125/*
3126 * Statistics service functions
3127 */
3128
3129static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3130{
3131 struct dmae_command *dmae;
3132 u32 opcode;
3133 int loader_idx = PMF_DMAE_C(bp);
3134 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3135
3136 /* sanity */
3137 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3138 BNX2X_ERR("BUG!\n");
3139 return;
3140 }
3141
3142 bp->executer_idx = 0;
3143
3144 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3145 DMAE_CMD_C_ENABLE |
3146 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3147#ifdef __BIG_ENDIAN
3148 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3149#else
3150 DMAE_CMD_ENDIANITY_DW_SWAP |
3151#endif
3152 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3153 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3154
3155 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3156 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3157 dmae->src_addr_lo = bp->port.port_stx >> 2;
3158 dmae->src_addr_hi = 0;
3159 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3160 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3161 dmae->len = DMAE_LEN32_RD_MAX;
3162 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3163 dmae->comp_addr_hi = 0;
3164 dmae->comp_val = 1;
3165
3166 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3167 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3168 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3169 dmae->src_addr_hi = 0;
7a9b2557
VZ
3170 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3171 DMAE_LEN32_RD_MAX * 4);
3172 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3173 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3174 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3175 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3176 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3177 dmae->comp_val = DMAE_COMP_VAL;
3178
3179 *stats_comp = 0;
3180 bnx2x_hw_stats_post(bp);
3181 bnx2x_stats_comp(bp);
3182}
3183
3184static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3185{
3186 struct dmae_command *dmae;
34f80b04 3187 int port = BP_PORT(bp);
bb2a0f7a 3188 int vn = BP_E1HVN(bp);
a2fbb9ea 3189 u32 opcode;
bb2a0f7a 3190 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3191 u32 mac_addr;
bb2a0f7a
YG
3192 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3193
3194 /* sanity */
3195 if (!bp->link_vars.link_up || !bp->port.pmf) {
3196 BNX2X_ERR("BUG!\n");
3197 return;
3198 }
a2fbb9ea
ET
3199
3200 bp->executer_idx = 0;
bb2a0f7a
YG
3201
3202 /* MCP */
3203 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3204 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3205 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3206#ifdef __BIG_ENDIAN
bb2a0f7a 3207 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3208#else
bb2a0f7a 3209 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3210#endif
bb2a0f7a
YG
3211 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3212 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3213
bb2a0f7a 3214 if (bp->port.port_stx) {
a2fbb9ea
ET
3215
3216 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3217 dmae->opcode = opcode;
bb2a0f7a
YG
3218 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3219 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3220 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3221 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3222 dmae->len = sizeof(struct host_port_stats) >> 2;
3223 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3224 dmae->comp_addr_hi = 0;
3225 dmae->comp_val = 1;
a2fbb9ea
ET
3226 }
3227
bb2a0f7a
YG
3228 if (bp->func_stx) {
3229
3230 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231 dmae->opcode = opcode;
3232 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3233 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3234 dmae->dst_addr_lo = bp->func_stx >> 2;
3235 dmae->dst_addr_hi = 0;
3236 dmae->len = sizeof(struct host_func_stats) >> 2;
3237 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3238 dmae->comp_addr_hi = 0;
3239 dmae->comp_val = 1;
a2fbb9ea
ET
3240 }
3241
bb2a0f7a 3242 /* MAC */
a2fbb9ea
ET
3243 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3244 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3245 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3246#ifdef __BIG_ENDIAN
3247 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3248#else
3249 DMAE_CMD_ENDIANITY_DW_SWAP |
3250#endif
bb2a0f7a
YG
3251 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3252 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3253
c18487ee 3254 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3255
3256 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3257 NIG_REG_INGRESS_BMAC0_MEM);
3258
3259 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3260 BIGMAC_REGISTER_TX_STAT_GTBYT */
3261 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3262 dmae->opcode = opcode;
3263 dmae->src_addr_lo = (mac_addr +
3264 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3265 dmae->src_addr_hi = 0;
3266 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3267 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3268 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3269 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3270 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3271 dmae->comp_addr_hi = 0;
3272 dmae->comp_val = 1;
3273
3274 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3275 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3276 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3277 dmae->opcode = opcode;
3278 dmae->src_addr_lo = (mac_addr +
3279 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3280 dmae->src_addr_hi = 0;
3281 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3282 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3283 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3284 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3285 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3286 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3287 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3288 dmae->comp_addr_hi = 0;
3289 dmae->comp_val = 1;
3290
c18487ee 3291 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3292
3293 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3294
3295 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3296 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297 dmae->opcode = opcode;
3298 dmae->src_addr_lo = (mac_addr +
3299 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3300 dmae->src_addr_hi = 0;
3301 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3302 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3303 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3304 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3305 dmae->comp_addr_hi = 0;
3306 dmae->comp_val = 1;
3307
3308 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3309 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3310 dmae->opcode = opcode;
3311 dmae->src_addr_lo = (mac_addr +
3312 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3313 dmae->src_addr_hi = 0;
3314 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3315 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3316 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3317 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3318 dmae->len = 1;
3319 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3320 dmae->comp_addr_hi = 0;
3321 dmae->comp_val = 1;
3322
3323 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3324 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3325 dmae->opcode = opcode;
3326 dmae->src_addr_lo = (mac_addr +
3327 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3328 dmae->src_addr_hi = 0;
3329 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3330 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3331 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3332 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3333 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3334 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3335 dmae->comp_addr_hi = 0;
3336 dmae->comp_val = 1;
3337 }
3338
3339 /* NIG */
bb2a0f7a
YG
3340 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341 dmae->opcode = opcode;
3342 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3343 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3344 dmae->src_addr_hi = 0;
3345 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3346 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3347 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3348 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349 dmae->comp_addr_hi = 0;
3350 dmae->comp_val = 1;
3351
3352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353 dmae->opcode = opcode;
3354 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3355 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3356 dmae->src_addr_hi = 0;
3357 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3358 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3360 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3361 dmae->len = (2*sizeof(u32)) >> 2;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3364 dmae->comp_val = 1;
3365
a2fbb9ea
ET
3366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3368 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3369 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3370#ifdef __BIG_ENDIAN
3371 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3372#else
3373 DMAE_CMD_ENDIANITY_DW_SWAP |
3374#endif
bb2a0f7a
YG
3375 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3376 (vn << DMAE_CMD_E1HVN_SHIFT));
3377 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3378 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3379 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3380 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3381 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3382 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3383 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3384 dmae->len = (2*sizeof(u32)) >> 2;
3385 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3386 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3387 dmae->comp_val = DMAE_COMP_VAL;
3388
3389 *stats_comp = 0;
a2fbb9ea
ET
3390}
3391
bb2a0f7a 3392static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3393{
bb2a0f7a
YG
3394 struct dmae_command *dmae = &bp->stats_dmae;
3395 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3396
bb2a0f7a
YG
3397 /* sanity */
3398 if (!bp->func_stx) {
3399 BNX2X_ERR("BUG!\n");
3400 return;
3401 }
a2fbb9ea 3402
bb2a0f7a
YG
3403 bp->executer_idx = 0;
3404 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3405
bb2a0f7a
YG
3406 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3407 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3408 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3409#ifdef __BIG_ENDIAN
3410 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3411#else
3412 DMAE_CMD_ENDIANITY_DW_SWAP |
3413#endif
3414 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3415 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3416 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3417 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3418 dmae->dst_addr_lo = bp->func_stx >> 2;
3419 dmae->dst_addr_hi = 0;
3420 dmae->len = sizeof(struct host_func_stats) >> 2;
3421 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3422 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3423 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3424
bb2a0f7a
YG
3425 *stats_comp = 0;
3426}
a2fbb9ea 3427
bb2a0f7a
YG
3428static void bnx2x_stats_start(struct bnx2x *bp)
3429{
3430 if (bp->port.pmf)
3431 bnx2x_port_stats_init(bp);
3432
3433 else if (bp->func_stx)
3434 bnx2x_func_stats_init(bp);
3435
3436 bnx2x_hw_stats_post(bp);
3437 bnx2x_storm_stats_post(bp);
3438}
3439
3440static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3441{
3442 bnx2x_stats_comp(bp);
3443 bnx2x_stats_pmf_update(bp);
3444 bnx2x_stats_start(bp);
3445}
3446
3447static void bnx2x_stats_restart(struct bnx2x *bp)
3448{
3449 bnx2x_stats_comp(bp);
3450 bnx2x_stats_start(bp);
3451}
3452
3453static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3454{
3455 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3456 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3457 struct regpair diff;
3458
3459 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3460 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3461 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3462 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3463 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3464 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3465 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3466 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3467 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3468 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3469 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3470 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3471 UPDATE_STAT64(tx_stat_gt127,
3472 tx_stat_etherstatspkts65octetsto127octets);
3473 UPDATE_STAT64(tx_stat_gt255,
3474 tx_stat_etherstatspkts128octetsto255octets);
3475 UPDATE_STAT64(tx_stat_gt511,
3476 tx_stat_etherstatspkts256octetsto511octets);
3477 UPDATE_STAT64(tx_stat_gt1023,
3478 tx_stat_etherstatspkts512octetsto1023octets);
3479 UPDATE_STAT64(tx_stat_gt1518,
3480 tx_stat_etherstatspkts1024octetsto1522octets);
3481 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3482 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3483 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3484 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3485 UPDATE_STAT64(tx_stat_gterr,
3486 tx_stat_dot3statsinternalmactransmiterrors);
3487 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3488}
3489
3490static void bnx2x_emac_stats_update(struct bnx2x *bp)
3491{
3492 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3493 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3494
3495 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3496 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3497 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3498 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3499 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3500 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3501 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3502 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3503 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3504 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3505 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3506 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3507 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3508 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3509 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3510 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3511 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3516 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3517 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3518 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3519 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3520 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3523 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3524 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3525 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3526}
3527
3528static int bnx2x_hw_stats_update(struct bnx2x *bp)
3529{
3530 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3531 struct nig_stats *old = &(bp->port.old_nig_stats);
3532 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3533 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3534 struct regpair diff;
3535
3536 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3537 bnx2x_bmac_stats_update(bp);
3538
3539 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3540 bnx2x_emac_stats_update(bp);
3541
3542 else { /* unreached */
3543 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3544 return -1;
3545 }
a2fbb9ea 3546
bb2a0f7a
YG
3547 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3548 new->brb_discard - old->brb_discard);
66e855f3
YG
3549 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3550 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3551
bb2a0f7a
YG
3552 UPDATE_STAT64_NIG(egress_mac_pkt0,
3553 etherstatspkts1024octetsto1522octets);
3554 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3555
bb2a0f7a 3556 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3557
bb2a0f7a
YG
3558 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3559 sizeof(struct mac_stx));
3560 estats->brb_drop_hi = pstats->brb_drop_hi;
3561 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3562
bb2a0f7a 3563 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3564
bb2a0f7a 3565 return 0;
a2fbb9ea
ET
3566}
3567
bb2a0f7a 3568static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3569{
3570 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3571 int cl_id = BP_CL_ID(bp);
3572 struct tstorm_per_port_stats *tport =
3573 &stats->tstorm_common.port_statistics;
a2fbb9ea 3574 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3575 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3576 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3577 struct xstorm_per_client_stats *xclient =
3578 &stats->xstorm_common.client_statistics[cl_id];
3579 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3580 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3581 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3582 u32 diff;
3583
bb2a0f7a
YG
3584 /* are storm stats valid? */
3585 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3586 bp->stats_counter) {
3587 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3588 " tstorm counter (%d) != stats_counter (%d)\n",
3589 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3590 return -1;
3591 }
bb2a0f7a
YG
3592 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3593 bp->stats_counter) {
3594 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3595 " xstorm counter (%d) != stats_counter (%d)\n",
3596 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3597 return -2;
3598 }
a2fbb9ea 3599
bb2a0f7a
YG
3600 fstats->total_bytes_received_hi =
3601 fstats->valid_bytes_received_hi =
a2fbb9ea 3602 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3603 fstats->total_bytes_received_lo =
3604 fstats->valid_bytes_received_lo =
a2fbb9ea 3605 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3606
3607 estats->error_bytes_received_hi =
3608 le32_to_cpu(tclient->rcv_error_bytes.hi);
3609 estats->error_bytes_received_lo =
3610 le32_to_cpu(tclient->rcv_error_bytes.lo);
3611 ADD_64(estats->error_bytes_received_hi,
3612 estats->rx_stat_ifhcinbadoctets_hi,
3613 estats->error_bytes_received_lo,
3614 estats->rx_stat_ifhcinbadoctets_lo);
3615
3616 ADD_64(fstats->total_bytes_received_hi,
3617 estats->error_bytes_received_hi,
3618 fstats->total_bytes_received_lo,
3619 estats->error_bytes_received_lo);
3620
3621 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3622 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3623 total_multicast_packets_received);
a2fbb9ea 3624 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3625 total_broadcast_packets_received);
3626
3627 fstats->total_bytes_transmitted_hi =
3628 le32_to_cpu(xclient->total_sent_bytes.hi);
3629 fstats->total_bytes_transmitted_lo =
3630 le32_to_cpu(xclient->total_sent_bytes.lo);
3631
3632 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3633 total_unicast_packets_transmitted);
3634 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3635 total_multicast_packets_transmitted);
3636 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3637 total_broadcast_packets_transmitted);
3638
3639 memcpy(estats, &(fstats->total_bytes_received_hi),
3640 sizeof(struct host_func_stats) - 2*sizeof(u32));
3641
3642 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3643 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3644 estats->brb_truncate_discard =
3645 le32_to_cpu(tport->brb_truncate_discard);
3646 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3647
3648 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3649 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3650 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3651 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3652 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3653 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3654 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3655 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3656 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3657 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3658 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3659 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3660 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3661
bb2a0f7a
YG
3662 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3663 old_tclient->packets_too_big_discard =
a2fbb9ea 3664 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3665 estats->no_buff_discard =
3666 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3667 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3668
3669 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3670 old_xclient->unicast_bytes_sent.hi =
3671 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3672 old_xclient->unicast_bytes_sent.lo =
3673 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3674 old_xclient->multicast_bytes_sent.hi =
3675 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3676 old_xclient->multicast_bytes_sent.lo =
3677 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3678 old_xclient->broadcast_bytes_sent.hi =
3679 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3680 old_xclient->broadcast_bytes_sent.lo =
3681 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3682
3683 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3684
3685 return 0;
3686}
3687
bb2a0f7a 3688static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3689{
bb2a0f7a
YG
3690 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3691 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3692 struct net_device_stats *nstats = &bp->dev->stats;
3693
3694 nstats->rx_packets =
3695 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3696 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3697 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3698
3699 nstats->tx_packets =
3700 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3701 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3702 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3703
bb2a0f7a 3704 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3705
0e39e645 3706 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3707
bb2a0f7a
YG
3708 nstats->rx_dropped = old_tclient->checksum_discard +
3709 estats->mac_discard;
a2fbb9ea
ET
3710 nstats->tx_dropped = 0;
3711
3712 nstats->multicast =
3713 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3714
bb2a0f7a
YG
3715 nstats->collisions =
3716 estats->tx_stat_dot3statssinglecollisionframes_lo +
3717 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3718 estats->tx_stat_dot3statslatecollisions_lo +
3719 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3720
bb2a0f7a
YG
3721 estats->jabber_packets_received =
3722 old_tclient->packets_too_big_discard +
3723 estats->rx_stat_dot3statsframestoolong_lo;
3724
3725 nstats->rx_length_errors =
3726 estats->rx_stat_etherstatsundersizepkts_lo +
3727 estats->jabber_packets_received;
66e855f3 3728 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3729 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3730 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3731 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3732 nstats->rx_missed_errors = estats->xxoverflow_discard;
3733
3734 nstats->rx_errors = nstats->rx_length_errors +
3735 nstats->rx_over_errors +
3736 nstats->rx_crc_errors +
3737 nstats->rx_frame_errors +
0e39e645
ET
3738 nstats->rx_fifo_errors +
3739 nstats->rx_missed_errors;
a2fbb9ea 3740
bb2a0f7a
YG
3741 nstats->tx_aborted_errors =
3742 estats->tx_stat_dot3statslatecollisions_lo +
3743 estats->tx_stat_dot3statsexcessivecollisions_lo;
3744 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3745 nstats->tx_fifo_errors = 0;
3746 nstats->tx_heartbeat_errors = 0;
3747 nstats->tx_window_errors = 0;
3748
3749 nstats->tx_errors = nstats->tx_aborted_errors +
3750 nstats->tx_carrier_errors;
a2fbb9ea
ET
3751}
3752
bb2a0f7a 3753static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3754{
bb2a0f7a
YG
3755 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3756 int update = 0;
a2fbb9ea 3757
bb2a0f7a
YG
3758 if (*stats_comp != DMAE_COMP_VAL)
3759 return;
3760
3761 if (bp->port.pmf)
3762 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3763
bb2a0f7a 3764 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3765
bb2a0f7a
YG
3766 if (update)
3767 bnx2x_net_stats_update(bp);
a2fbb9ea 3768
bb2a0f7a
YG
3769 else {
3770 if (bp->stats_pending) {
3771 bp->stats_pending++;
3772 if (bp->stats_pending == 3) {
3773 BNX2X_ERR("stats not updated for 3 times\n");
3774 bnx2x_panic();
3775 return;
3776 }
3777 }
a2fbb9ea
ET
3778 }
3779
3780 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3781 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3782 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3783 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3784 int i;
a2fbb9ea
ET
3785
3786 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3787 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3788 " tx pkt (%lx)\n",
3789 bnx2x_tx_avail(bp->fp),
7a9b2557 3790 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3791 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3792 " rx pkt (%lx)\n",
7a9b2557
VZ
3793 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3794 bp->fp->rx_comp_cons),
3795 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3796 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3797 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3798 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3799 printk(KERN_DEBUG "tstats: checksum_discard %u "
3800 "packets_too_big_discard %u no_buff_discard %u "
3801 "mac_discard %u mac_filter_discard %u "
3802 "xxovrflow_discard %u brb_truncate_discard %u "
3803 "ttl0_discard %u\n",
bb2a0f7a
YG
3804 old_tclient->checksum_discard,
3805 old_tclient->packets_too_big_discard,
3806 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3807 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3808 estats->brb_truncate_discard,
3809 old_tclient->ttl0_discard);
a2fbb9ea
ET
3810
3811 for_each_queue(bp, i) {
3812 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3813 bnx2x_fp(bp, i, tx_pkt),
3814 bnx2x_fp(bp, i, rx_pkt),
3815 bnx2x_fp(bp, i, rx_calls));
3816 }
3817 }
3818
bb2a0f7a
YG
3819 bnx2x_hw_stats_post(bp);
3820 bnx2x_storm_stats_post(bp);
3821}
a2fbb9ea 3822
bb2a0f7a
YG
3823static void bnx2x_port_stats_stop(struct bnx2x *bp)
3824{
3825 struct dmae_command *dmae;
3826 u32 opcode;
3827 int loader_idx = PMF_DMAE_C(bp);
3828 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3829
bb2a0f7a 3830 bp->executer_idx = 0;
a2fbb9ea 3831
bb2a0f7a
YG
3832 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3833 DMAE_CMD_C_ENABLE |
3834 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3835#ifdef __BIG_ENDIAN
bb2a0f7a 3836 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3837#else
bb2a0f7a 3838 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3839#endif
bb2a0f7a
YG
3840 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3841 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3842
3843 if (bp->port.port_stx) {
3844
3845 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3846 if (bp->func_stx)
3847 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3848 else
3849 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3850 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3851 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3852 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3853 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3854 dmae->len = sizeof(struct host_port_stats) >> 2;
3855 if (bp->func_stx) {
3856 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3857 dmae->comp_addr_hi = 0;
3858 dmae->comp_val = 1;
3859 } else {
3860 dmae->comp_addr_lo =
3861 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3862 dmae->comp_addr_hi =
3863 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3864 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3865
bb2a0f7a
YG
3866 *stats_comp = 0;
3867 }
a2fbb9ea
ET
3868 }
3869
bb2a0f7a
YG
3870 if (bp->func_stx) {
3871
3872 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3873 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3874 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3875 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3876 dmae->dst_addr_lo = bp->func_stx >> 2;
3877 dmae->dst_addr_hi = 0;
3878 dmae->len = sizeof(struct host_func_stats) >> 2;
3879 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3880 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3881 dmae->comp_val = DMAE_COMP_VAL;
3882
3883 *stats_comp = 0;
a2fbb9ea 3884 }
bb2a0f7a
YG
3885}
3886
3887static void bnx2x_stats_stop(struct bnx2x *bp)
3888{
3889 int update = 0;
3890
3891 bnx2x_stats_comp(bp);
3892
3893 if (bp->port.pmf)
3894 update = (bnx2x_hw_stats_update(bp) == 0);
3895
3896 update |= (bnx2x_storm_stats_update(bp) == 0);
3897
3898 if (update) {
3899 bnx2x_net_stats_update(bp);
a2fbb9ea 3900
bb2a0f7a
YG
3901 if (bp->port.pmf)
3902 bnx2x_port_stats_stop(bp);
3903
3904 bnx2x_hw_stats_post(bp);
3905 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3906 }
3907}
3908
bb2a0f7a
YG
3909static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3910{
3911}
3912
3913static const struct {
3914 void (*action)(struct bnx2x *bp);
3915 enum bnx2x_stats_state next_state;
3916} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3917/* state event */
3918{
3919/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3920/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3921/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3922/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3923},
3924{
3925/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3926/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3927/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3928/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3929}
3930};
3931
3932static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3933{
3934 enum bnx2x_stats_state state = bp->stats_state;
3935
3936 bnx2x_stats_stm[state][event].action(bp);
3937 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3938
3939 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3940 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3941 state, event, bp->stats_state);
3942}
3943
a2fbb9ea
ET
3944static void bnx2x_timer(unsigned long data)
3945{
3946 struct bnx2x *bp = (struct bnx2x *) data;
3947
3948 if (!netif_running(bp->dev))
3949 return;
3950
3951 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3952 goto timer_restart;
a2fbb9ea
ET
3953
3954 if (poll) {
3955 struct bnx2x_fastpath *fp = &bp->fp[0];
3956 int rc;
3957
3958 bnx2x_tx_int(fp, 1000);
3959 rc = bnx2x_rx_int(fp, 1000);
3960 }
3961
34f80b04
EG
3962 if (!BP_NOMCP(bp)) {
3963 int func = BP_FUNC(bp);
a2fbb9ea
ET
3964 u32 drv_pulse;
3965 u32 mcp_pulse;
3966
3967 ++bp->fw_drv_pulse_wr_seq;
3968 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3969 /* TBD - add SYSTEM_TIME */
3970 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3971 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3972
34f80b04 3973 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3974 MCP_PULSE_SEQ_MASK);
3975 /* The delta between driver pulse and mcp response
3976 * should be 1 (before mcp response) or 0 (after mcp response)
3977 */
3978 if ((drv_pulse != mcp_pulse) &&
3979 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3980 /* someone lost a heartbeat... */
3981 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3982 drv_pulse, mcp_pulse);
3983 }
3984 }
3985
bb2a0f7a
YG
3986 if ((bp->state == BNX2X_STATE_OPEN) ||
3987 (bp->state == BNX2X_STATE_DISABLED))
3988 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3989
f1410647 3990timer_restart:
a2fbb9ea
ET
3991 mod_timer(&bp->timer, jiffies + bp->current_interval);
3992}
3993
3994/* end of Statistics */
3995
3996/* nic init */
3997
3998/*
3999 * nic init service functions
4000 */
4001
34f80b04 4002static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4003{
34f80b04
EG
4004 int port = BP_PORT(bp);
4005
4006 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4007 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4008 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4009 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4010 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4011 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4012}
4013
5c862848
EG
4014static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4015 dma_addr_t mapping, int sb_id)
34f80b04
EG
4016{
4017 int port = BP_PORT(bp);
bb2a0f7a 4018 int func = BP_FUNC(bp);
a2fbb9ea 4019 int index;
34f80b04 4020 u64 section;
a2fbb9ea
ET
4021
4022 /* USTORM */
4023 section = ((u64)mapping) + offsetof(struct host_status_block,
4024 u_status_block);
34f80b04 4025 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4026
4027 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4028 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4029 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4030 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4031 U64_HI(section));
bb2a0f7a
YG
4032 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4033 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4034
4035 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4036 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4037 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4038
4039 /* CSTORM */
4040 section = ((u64)mapping) + offsetof(struct host_status_block,
4041 c_status_block);
34f80b04 4042 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4043
4044 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4045 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4046 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4047 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4048 U64_HI(section));
7a9b2557
VZ
4049 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4050 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4051
4052 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4053 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4054 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4055
4056 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4057}
4058
4059static void bnx2x_zero_def_sb(struct bnx2x *bp)
4060{
4061 int func = BP_FUNC(bp);
a2fbb9ea 4062
34f80b04
EG
4063 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4064 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4065 sizeof(struct ustorm_def_status_block)/4);
4066 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4067 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4068 sizeof(struct cstorm_def_status_block)/4);
4069 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4070 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4071 sizeof(struct xstorm_def_status_block)/4);
4072 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4073 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4074 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4075}
4076
4077static void bnx2x_init_def_sb(struct bnx2x *bp,
4078 struct host_def_status_block *def_sb,
34f80b04 4079 dma_addr_t mapping, int sb_id)
a2fbb9ea 4080{
34f80b04
EG
4081 int port = BP_PORT(bp);
4082 int func = BP_FUNC(bp);
a2fbb9ea
ET
4083 int index, val, reg_offset;
4084 u64 section;
4085
4086 /* ATTN */
4087 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4088 atten_status_block);
34f80b04 4089 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4090
49d66772
ET
4091 bp->attn_state = 0;
4092
a2fbb9ea
ET
4093 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4094 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4095
34f80b04 4096 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4097 bp->attn_group[index].sig[0] = REG_RD(bp,
4098 reg_offset + 0x10*index);
4099 bp->attn_group[index].sig[1] = REG_RD(bp,
4100 reg_offset + 0x4 + 0x10*index);
4101 bp->attn_group[index].sig[2] = REG_RD(bp,
4102 reg_offset + 0x8 + 0x10*index);
4103 bp->attn_group[index].sig[3] = REG_RD(bp,
4104 reg_offset + 0xc + 0x10*index);
4105 }
4106
a2fbb9ea
ET
4107 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4108 HC_REG_ATTN_MSG0_ADDR_L);
4109
4110 REG_WR(bp, reg_offset, U64_LO(section));
4111 REG_WR(bp, reg_offset + 4, U64_HI(section));
4112
4113 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4114
4115 val = REG_RD(bp, reg_offset);
34f80b04 4116 val |= sb_id;
a2fbb9ea
ET
4117 REG_WR(bp, reg_offset, val);
4118
4119 /* USTORM */
4120 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4121 u_def_status_block);
34f80b04 4122 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4123
4124 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4125 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4126 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4127 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4128 U64_HI(section));
5c862848 4129 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4130 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4131
4132 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4133 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4134 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4135
4136 /* CSTORM */
4137 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4138 c_def_status_block);
34f80b04 4139 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4140
4141 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4142 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4143 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4144 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4145 U64_HI(section));
5c862848 4146 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4147 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4148
4149 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4150 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4151 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4152
4153 /* TSTORM */
4154 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4155 t_def_status_block);
34f80b04 4156 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4157
4158 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4159 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4160 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4161 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4162 U64_HI(section));
5c862848 4163 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4164 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4165
4166 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4167 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4168 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4169
4170 /* XSTORM */
4171 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4172 x_def_status_block);
34f80b04 4173 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4174
4175 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4176 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4177 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4178 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4179 U64_HI(section));
5c862848 4180 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4181 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4182
4183 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4184 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4185 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4186
bb2a0f7a 4187 bp->stats_pending = 0;
66e855f3 4188 bp->set_mac_pending = 0;
bb2a0f7a 4189
34f80b04 4190 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4191}
4192
4193static void bnx2x_update_coalesce(struct bnx2x *bp)
4194{
34f80b04 4195 int port = BP_PORT(bp);
a2fbb9ea
ET
4196 int i;
4197
4198 for_each_queue(bp, i) {
34f80b04 4199 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4200
4201 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4202 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4203 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4204 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4205 bp->rx_ticks/12);
a2fbb9ea 4206 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4207 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4208 U_SB_ETH_RX_CQ_INDEX),
4209 bp->rx_ticks ? 0 : 1);
4210 REG_WR16(bp, BAR_USTRORM_INTMEM +
4211 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4212 U_SB_ETH_RX_BD_INDEX),
34f80b04 4213 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4214
4215 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4216 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4217 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4218 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4219 bp->tx_ticks/12);
a2fbb9ea 4220 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4221 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4222 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4223 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4224 }
4225}
4226
7a9b2557
VZ
4227static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4228 struct bnx2x_fastpath *fp, int last)
4229{
4230 int i;
4231
4232 for (i = 0; i < last; i++) {
4233 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4234 struct sk_buff *skb = rx_buf->skb;
4235
4236 if (skb == NULL) {
4237 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4238 continue;
4239 }
4240
4241 if (fp->tpa_state[i] == BNX2X_TPA_START)
4242 pci_unmap_single(bp->pdev,
4243 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4244 bp->rx_buf_size,
7a9b2557
VZ
4245 PCI_DMA_FROMDEVICE);
4246
4247 dev_kfree_skb(skb);
4248 rx_buf->skb = NULL;
4249 }
4250}
4251
a2fbb9ea
ET
4252static void bnx2x_init_rx_rings(struct bnx2x *bp)
4253{
7a9b2557 4254 int func = BP_FUNC(bp);
32626230
EG
4255 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4256 ETH_MAX_AGGREGATION_QUEUES_E1H;
4257 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4258 int i, j;
a2fbb9ea 4259
437cf2f1
EG
4260 bp->rx_buf_size = bp->dev->mtu;
4261 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4262 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4263
7a9b2557
VZ
4264 if (bp->flags & TPA_ENABLE_FLAG) {
4265 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4266 "rx_buf_size %d effective_mtu %d\n",
4267 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4268
4269 for_each_queue(bp, j) {
32626230 4270 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4271
32626230 4272 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4273 fp->tpa_pool[i].skb =
4274 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4275 if (!fp->tpa_pool[i].skb) {
4276 BNX2X_ERR("Failed to allocate TPA "
4277 "skb pool for queue[%d] - "
4278 "disabling TPA on this "
4279 "queue!\n", j);
4280 bnx2x_free_tpa_pool(bp, fp, i);
4281 fp->disable_tpa = 1;
4282 break;
4283 }
4284 pci_unmap_addr_set((struct sw_rx_bd *)
4285 &bp->fp->tpa_pool[i],
4286 mapping, 0);
4287 fp->tpa_state[i] = BNX2X_TPA_STOP;
4288 }
4289 }
4290 }
4291
a2fbb9ea
ET
4292 for_each_queue(bp, j) {
4293 struct bnx2x_fastpath *fp = &bp->fp[j];
4294
4295 fp->rx_bd_cons = 0;
4296 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4297 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4298
4299 /* "next page" elements initialization */
4300 /* SGE ring */
4301 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4302 struct eth_rx_sge *sge;
4303
4304 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4305 sge->addr_hi =
4306 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4307 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4308 sge->addr_lo =
4309 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4310 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4311 }
4312
4313 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4314
7a9b2557 4315 /* RX BD ring */
a2fbb9ea
ET
4316 for (i = 1; i <= NUM_RX_RINGS; i++) {
4317 struct eth_rx_bd *rx_bd;
4318
4319 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4320 rx_bd->addr_hi =
4321 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4322 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4323 rx_bd->addr_lo =
4324 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4325 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4326 }
4327
34f80b04 4328 /* CQ ring */
a2fbb9ea
ET
4329 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4330 struct eth_rx_cqe_next_page *nextpg;
4331
4332 nextpg = (struct eth_rx_cqe_next_page *)
4333 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4334 nextpg->addr_hi =
4335 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4336 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4337 nextpg->addr_lo =
4338 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4339 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4340 }
4341
7a9b2557
VZ
4342 /* Allocate SGEs and initialize the ring elements */
4343 for (i = 0, ring_prod = 0;
4344 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4345
7a9b2557
VZ
4346 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4347 BNX2X_ERR("was only able to allocate "
4348 "%d rx sges\n", i);
4349 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4350 /* Cleanup already allocated elements */
4351 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4352 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4353 fp->disable_tpa = 1;
4354 ring_prod = 0;
4355 break;
4356 }
4357 ring_prod = NEXT_SGE_IDX(ring_prod);
4358 }
4359 fp->rx_sge_prod = ring_prod;
4360
4361 /* Allocate BDs and initialize BD ring */
66e855f3 4362 fp->rx_comp_cons = 0;
7a9b2557 4363 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4364 for (i = 0; i < bp->rx_ring_size; i++) {
4365 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4366 BNX2X_ERR("was only able to allocate "
4367 "%d rx skbs\n", i);
66e855f3 4368 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4369 break;
4370 }
4371 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4372 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4373 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4374 }
4375
7a9b2557
VZ
4376 fp->rx_bd_prod = ring_prod;
4377 /* must not have more available CQEs than BDs */
4378 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4379 cqe_ring_prod);
a2fbb9ea
ET
4380 fp->rx_pkt = fp->rx_calls = 0;
4381
7a9b2557
VZ
4382 /* Warning!
4383 * this will generate an interrupt (to the TSTORM)
4384 * must only be done after chip is initialized
4385 */
4386 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4387 fp->rx_sge_prod);
a2fbb9ea
ET
4388 if (j != 0)
4389 continue;
4390
4391 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4392 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4393 U64_LO(fp->rx_comp_mapping));
4394 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4395 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4396 U64_HI(fp->rx_comp_mapping));
4397 }
4398}
4399
4400static void bnx2x_init_tx_ring(struct bnx2x *bp)
4401{
4402 int i, j;
4403
4404 for_each_queue(bp, j) {
4405 struct bnx2x_fastpath *fp = &bp->fp[j];
4406
4407 for (i = 1; i <= NUM_TX_RINGS; i++) {
4408 struct eth_tx_bd *tx_bd =
4409 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4410
4411 tx_bd->addr_hi =
4412 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4413 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4414 tx_bd->addr_lo =
4415 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4416 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4417 }
4418
4419 fp->tx_pkt_prod = 0;
4420 fp->tx_pkt_cons = 0;
4421 fp->tx_bd_prod = 0;
4422 fp->tx_bd_cons = 0;
4423 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4424 fp->tx_pkt = 0;
4425 }
4426}
4427
4428static void bnx2x_init_sp_ring(struct bnx2x *bp)
4429{
34f80b04 4430 int func = BP_FUNC(bp);
a2fbb9ea
ET
4431
4432 spin_lock_init(&bp->spq_lock);
4433
4434 bp->spq_left = MAX_SPQ_PENDING;
4435 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4436 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4437 bp->spq_prod_bd = bp->spq;
4438 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4439
34f80b04 4440 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4441 U64_LO(bp->spq_mapping));
34f80b04
EG
4442 REG_WR(bp,
4443 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4444 U64_HI(bp->spq_mapping));
4445
34f80b04 4446 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4447 bp->spq_prod_idx);
4448}
4449
4450static void bnx2x_init_context(struct bnx2x *bp)
4451{
4452 int i;
4453
4454 for_each_queue(bp, i) {
4455 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4456 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4457 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4458
4459 context->xstorm_st_context.tx_bd_page_base_hi =
4460 U64_HI(fp->tx_desc_mapping);
4461 context->xstorm_st_context.tx_bd_page_base_lo =
4462 U64_LO(fp->tx_desc_mapping);
4463 context->xstorm_st_context.db_data_addr_hi =
4464 U64_HI(fp->tx_prods_mapping);
4465 context->xstorm_st_context.db_data_addr_lo =
4466 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4467 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4468 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4469
4470 context->ustorm_st_context.common.sb_index_numbers =
4471 BNX2X_RX_SB_INDEX_NUM;
4472 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4473 context->ustorm_st_context.common.status_block_id = sb_id;
4474 context->ustorm_st_context.common.flags =
4475 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4476 context->ustorm_st_context.common.mc_alignment_size =
4477 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4478 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4479 bp->rx_buf_size;
34f80b04 4480 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4481 U64_HI(fp->rx_desc_mapping);
34f80b04 4482 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4483 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4484 if (!fp->disable_tpa) {
4485 context->ustorm_st_context.common.flags |=
4486 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4487 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4488 context->ustorm_st_context.common.sge_buff_size =
4489 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4490 context->ustorm_st_context.common.sge_page_base_hi =
4491 U64_HI(fp->rx_sge_mapping);
4492 context->ustorm_st_context.common.sge_page_base_lo =
4493 U64_LO(fp->rx_sge_mapping);
4494 }
4495
a2fbb9ea 4496 context->cstorm_st_context.sb_index_number =
5c862848 4497 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4498 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4499
4500 context->xstorm_ag_context.cdu_reserved =
4501 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4502 CDU_REGION_NUMBER_XCM_AG,
4503 ETH_CONNECTION_TYPE);
4504 context->ustorm_ag_context.cdu_usage =
4505 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4506 CDU_REGION_NUMBER_UCM_AG,
4507 ETH_CONNECTION_TYPE);
4508 }
4509}
4510
4511static void bnx2x_init_ind_table(struct bnx2x *bp)
4512{
34f80b04 4513 int port = BP_PORT(bp);
a2fbb9ea
ET
4514 int i;
4515
4516 if (!is_multi(bp))
4517 return;
4518
34f80b04 4519 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4520 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4521 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4522 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4523 i % bp->num_queues);
4524
4525 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4526}
4527
49d66772
ET
4528static void bnx2x_set_client_config(struct bnx2x *bp)
4529{
49d66772 4530 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4531 int port = BP_PORT(bp);
4532 int i;
49d66772 4533
34f80b04 4534 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4535 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4536 tstorm_client.config_flags =
4537 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4538#ifdef BCM_VLAN
34f80b04 4539 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4540 tstorm_client.config_flags |=
4541 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4542 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4543 }
4544#endif
49d66772 4545
7a9b2557
VZ
4546 if (bp->flags & TPA_ENABLE_FLAG) {
4547 tstorm_client.max_sges_for_packet =
4f40f2cb 4548 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4549 tstorm_client.max_sges_for_packet =
4550 ((tstorm_client.max_sges_for_packet +
4551 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4552 PAGES_PER_SGE_SHIFT;
4553
4554 tstorm_client.config_flags |=
4555 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4556 }
4557
49d66772
ET
4558 for_each_queue(bp, i) {
4559 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4560 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4561 ((u32 *)&tstorm_client)[0]);
4562 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4563 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4564 ((u32 *)&tstorm_client)[1]);
4565 }
4566
34f80b04
EG
4567 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4568 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4569}
4570
a2fbb9ea
ET
4571static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4572{
a2fbb9ea 4573 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4574 int mode = bp->rx_mode;
4575 int mask = (1 << BP_L_ID(bp));
4576 int func = BP_FUNC(bp);
a2fbb9ea
ET
4577 int i;
4578
3196a88a 4579 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4580
4581 switch (mode) {
4582 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4583 tstorm_mac_filter.ucast_drop_all = mask;
4584 tstorm_mac_filter.mcast_drop_all = mask;
4585 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4586 break;
4587 case BNX2X_RX_MODE_NORMAL:
34f80b04 4588 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4589 break;
4590 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4591 tstorm_mac_filter.mcast_accept_all = mask;
4592 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4593 break;
4594 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4595 tstorm_mac_filter.ucast_accept_all = mask;
4596 tstorm_mac_filter.mcast_accept_all = mask;
4597 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4598 break;
4599 default:
34f80b04
EG
4600 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4601 break;
a2fbb9ea
ET
4602 }
4603
4604 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4605 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4606 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4607 ((u32 *)&tstorm_mac_filter)[i]);
4608
34f80b04 4609/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4610 ((u32 *)&tstorm_mac_filter)[i]); */
4611 }
a2fbb9ea 4612
49d66772
ET
4613 if (mode != BNX2X_RX_MODE_NONE)
4614 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4615}
4616
471de716
EG
4617static void bnx2x_init_internal_common(struct bnx2x *bp)
4618{
4619 int i;
4620
3cdf1db7
YG
4621 if (bp->flags & TPA_ENABLE_FLAG) {
4622 struct tstorm_eth_tpa_exist tpa = {0};
4623
4624 tpa.tpa_exist = 1;
4625
4626 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4627 ((u32 *)&tpa)[0]);
4628 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4629 ((u32 *)&tpa)[1]);
4630 }
4631
471de716
EG
4632 /* Zero this manually as its initialization is
4633 currently missing in the initTool */
4634 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4635 REG_WR(bp, BAR_USTRORM_INTMEM +
4636 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4637}
4638
4639static void bnx2x_init_internal_port(struct bnx2x *bp)
4640{
4641 int port = BP_PORT(bp);
4642
4643 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4644 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4645 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4646 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4647}
4648
4649static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4650{
a2fbb9ea
ET
4651 struct tstorm_eth_function_common_config tstorm_config = {0};
4652 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4653 int port = BP_PORT(bp);
4654 int func = BP_FUNC(bp);
4655 int i;
471de716 4656 u16 max_agg_size;
a2fbb9ea
ET
4657
4658 if (is_multi(bp)) {
4659 tstorm_config.config_flags = MULTI_FLAGS;
4660 tstorm_config.rss_result_mask = MULTI_MASK;
4661 }
4662
34f80b04
EG
4663 tstorm_config.leading_client_id = BP_L_ID(bp);
4664
a2fbb9ea 4665 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4666 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4667 (*(u32 *)&tstorm_config));
4668
c14423fe 4669 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4670 bnx2x_set_storm_rx_mode(bp);
4671
66e855f3
YG
4672 /* reset xstorm per client statistics */
4673 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4674 REG_WR(bp, BAR_XSTRORM_INTMEM +
4675 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4676 i*4, 0);
4677 }
4678 /* reset tstorm per client statistics */
4679 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4680 REG_WR(bp, BAR_TSTRORM_INTMEM +
4681 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4682 i*4, 0);
4683 }
4684
4685 /* Init statistics related context */
34f80b04 4686 stats_flags.collect_eth = 1;
a2fbb9ea 4687
66e855f3 4688 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4689 ((u32 *)&stats_flags)[0]);
66e855f3 4690 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4691 ((u32 *)&stats_flags)[1]);
4692
66e855f3 4693 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4694 ((u32 *)&stats_flags)[0]);
66e855f3 4695 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4696 ((u32 *)&stats_flags)[1]);
4697
66e855f3 4698 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4699 ((u32 *)&stats_flags)[0]);
66e855f3 4700 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4701 ((u32 *)&stats_flags)[1]);
4702
66e855f3
YG
4703 REG_WR(bp, BAR_XSTRORM_INTMEM +
4704 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4705 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4706 REG_WR(bp, BAR_XSTRORM_INTMEM +
4707 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4708 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4709
4710 REG_WR(bp, BAR_TSTRORM_INTMEM +
4711 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4712 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4713 REG_WR(bp, BAR_TSTRORM_INTMEM +
4714 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4715 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4716
4717 if (CHIP_IS_E1H(bp)) {
4718 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4719 IS_E1HMF(bp));
4720 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4721 IS_E1HMF(bp));
4722 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4723 IS_E1HMF(bp));
4724 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4725 IS_E1HMF(bp));
4726
7a9b2557
VZ
4727 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4728 bp->e1hov);
34f80b04
EG
4729 }
4730
4f40f2cb
EG
4731 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4732 max_agg_size =
4733 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4734 SGE_PAGE_SIZE * PAGES_PER_SGE),
4735 (u32)0xffff);
7a9b2557
VZ
4736 for_each_queue(bp, i) {
4737 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4738
4739 REG_WR(bp, BAR_USTRORM_INTMEM +
4740 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4741 U64_LO(fp->rx_comp_mapping));
4742 REG_WR(bp, BAR_USTRORM_INTMEM +
4743 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4744 U64_HI(fp->rx_comp_mapping));
4745
7a9b2557
VZ
4746 REG_WR16(bp, BAR_USTRORM_INTMEM +
4747 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4748 max_agg_size);
4749 }
a2fbb9ea
ET
4750}
4751
471de716
EG
4752static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4753{
4754 switch (load_code) {
4755 case FW_MSG_CODE_DRV_LOAD_COMMON:
4756 bnx2x_init_internal_common(bp);
4757 /* no break */
4758
4759 case FW_MSG_CODE_DRV_LOAD_PORT:
4760 bnx2x_init_internal_port(bp);
4761 /* no break */
4762
4763 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4764 bnx2x_init_internal_func(bp);
4765 break;
4766
4767 default:
4768 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4769 break;
4770 }
4771}
4772
4773static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4774{
4775 int i;
4776
4777 for_each_queue(bp, i) {
4778 struct bnx2x_fastpath *fp = &bp->fp[i];
4779
34f80b04 4780 fp->bp = bp;
a2fbb9ea 4781 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4782 fp->index = i;
34f80b04
EG
4783 fp->cl_id = BP_L_ID(bp) + i;
4784 fp->sb_id = fp->cl_id;
4785 DP(NETIF_MSG_IFUP,
4786 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4787 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4788 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4789 FP_SB_ID(fp));
4790 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4791 }
4792
5c862848
EG
4793 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4794 DEF_SB_ID);
4795 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4796 bnx2x_update_coalesce(bp);
4797 bnx2x_init_rx_rings(bp);
4798 bnx2x_init_tx_ring(bp);
4799 bnx2x_init_sp_ring(bp);
4800 bnx2x_init_context(bp);
471de716 4801 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4802 bnx2x_init_ind_table(bp);
615f8fd9 4803 bnx2x_int_enable(bp);
a2fbb9ea
ET
4804}
4805
4806/* end of nic init */
4807
4808/*
4809 * gzip service functions
4810 */
4811
4812static int bnx2x_gunzip_init(struct bnx2x *bp)
4813{
4814 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4815 &bp->gunzip_mapping);
4816 if (bp->gunzip_buf == NULL)
4817 goto gunzip_nomem1;
4818
4819 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4820 if (bp->strm == NULL)
4821 goto gunzip_nomem2;
4822
4823 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4824 GFP_KERNEL);
4825 if (bp->strm->workspace == NULL)
4826 goto gunzip_nomem3;
4827
4828 return 0;
4829
4830gunzip_nomem3:
4831 kfree(bp->strm);
4832 bp->strm = NULL;
4833
4834gunzip_nomem2:
4835 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4836 bp->gunzip_mapping);
4837 bp->gunzip_buf = NULL;
4838
4839gunzip_nomem1:
4840 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4841 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4842 return -ENOMEM;
4843}
4844
4845static void bnx2x_gunzip_end(struct bnx2x *bp)
4846{
4847 kfree(bp->strm->workspace);
4848
4849 kfree(bp->strm);
4850 bp->strm = NULL;
4851
4852 if (bp->gunzip_buf) {
4853 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4854 bp->gunzip_mapping);
4855 bp->gunzip_buf = NULL;
4856 }
4857}
4858
4859static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4860{
4861 int n, rc;
4862
4863 /* check gzip header */
4864 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4865 return -EINVAL;
4866
4867 n = 10;
4868
34f80b04 4869#define FNAME 0x8
a2fbb9ea
ET
4870
4871 if (zbuf[3] & FNAME)
4872 while ((zbuf[n++] != 0) && (n < len));
4873
4874 bp->strm->next_in = zbuf + n;
4875 bp->strm->avail_in = len - n;
4876 bp->strm->next_out = bp->gunzip_buf;
4877 bp->strm->avail_out = FW_BUF_SIZE;
4878
4879 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4880 if (rc != Z_OK)
4881 return rc;
4882
4883 rc = zlib_inflate(bp->strm, Z_FINISH);
4884 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4885 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4886 bp->dev->name, bp->strm->msg);
4887
4888 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4889 if (bp->gunzip_outlen & 0x3)
4890 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4891 " gunzip_outlen (%d) not aligned\n",
4892 bp->dev->name, bp->gunzip_outlen);
4893 bp->gunzip_outlen >>= 2;
4894
4895 zlib_inflateEnd(bp->strm);
4896
4897 if (rc == Z_STREAM_END)
4898 return 0;
4899
4900 return rc;
4901}
4902
4903/* nic load/unload */
4904
4905/*
34f80b04 4906 * General service functions
a2fbb9ea
ET
4907 */
4908
4909/* send a NIG loopback debug packet */
4910static void bnx2x_lb_pckt(struct bnx2x *bp)
4911{
a2fbb9ea 4912 u32 wb_write[3];
a2fbb9ea
ET
4913
4914 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4915 wb_write[0] = 0x55555555;
4916 wb_write[1] = 0x55555555;
34f80b04 4917 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4918 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4919
4920 /* NON-IP protocol */
a2fbb9ea
ET
4921 wb_write[0] = 0x09000000;
4922 wb_write[1] = 0x55555555;
34f80b04 4923 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4924 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4925}
4926
4927/* some of the internal memories
4928 * are not directly readable from the driver
4929 * to test them we send debug packets
4930 */
4931static int bnx2x_int_mem_test(struct bnx2x *bp)
4932{
4933 int factor;
4934 int count, i;
4935 u32 val = 0;
4936
ad8d3948 4937 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4938 factor = 120;
ad8d3948
EG
4939 else if (CHIP_REV_IS_EMUL(bp))
4940 factor = 200;
4941 else
a2fbb9ea 4942 factor = 1;
a2fbb9ea
ET
4943
4944 DP(NETIF_MSG_HW, "start part1\n");
4945
4946 /* Disable inputs of parser neighbor blocks */
4947 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4948 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4949 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4950 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4951
4952 /* Write 0 to parser credits for CFC search request */
4953 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4954
4955 /* send Ethernet packet */
4956 bnx2x_lb_pckt(bp);
4957
4958 /* TODO do i reset NIG statistic? */
4959 /* Wait until NIG register shows 1 packet of size 0x10 */
4960 count = 1000 * factor;
4961 while (count) {
34f80b04 4962
a2fbb9ea
ET
4963 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4964 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4965 if (val == 0x10)
4966 break;
4967
4968 msleep(10);
4969 count--;
4970 }
4971 if (val != 0x10) {
4972 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4973 return -1;
4974 }
4975
4976 /* Wait until PRS register shows 1 packet */
4977 count = 1000 * factor;
4978 while (count) {
4979 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4980 if (val == 1)
4981 break;
4982
4983 msleep(10);
4984 count--;
4985 }
4986 if (val != 0x1) {
4987 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4988 return -2;
4989 }
4990
4991 /* Reset and init BRB, PRS */
34f80b04 4992 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4993 msleep(50);
34f80b04 4994 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4995 msleep(50);
4996 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4997 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4998
4999 DP(NETIF_MSG_HW, "part2\n");
5000
5001 /* Disable inputs of parser neighbor blocks */
5002 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5003 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5004 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5005 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5006
5007 /* Write 0 to parser credits for CFC search request */
5008 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5009
5010 /* send 10 Ethernet packets */
5011 for (i = 0; i < 10; i++)
5012 bnx2x_lb_pckt(bp);
5013
5014 /* Wait until NIG register shows 10 + 1
5015 packets of size 11*0x10 = 0xb0 */
5016 count = 1000 * factor;
5017 while (count) {
34f80b04 5018
a2fbb9ea
ET
5019 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5020 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5021 if (val == 0xb0)
5022 break;
5023
5024 msleep(10);
5025 count--;
5026 }
5027 if (val != 0xb0) {
5028 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5029 return -3;
5030 }
5031
5032 /* Wait until PRS register shows 2 packets */
5033 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5034 if (val != 2)
5035 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5036
5037 /* Write 1 to parser credits for CFC search request */
5038 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5039
5040 /* Wait until PRS register shows 3 packets */
5041 msleep(10 * factor);
5042 /* Wait until NIG register shows 1 packet of size 0x10 */
5043 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5044 if (val != 3)
5045 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5046
5047 /* clear NIG EOP FIFO */
5048 for (i = 0; i < 11; i++)
5049 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5050 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5051 if (val != 1) {
5052 BNX2X_ERR("clear of NIG failed\n");
5053 return -4;
5054 }
5055
5056 /* Reset and init BRB, PRS, NIG */
5057 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5058 msleep(50);
5059 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5060 msleep(50);
5061 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5062 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5063#ifndef BCM_ISCSI
5064 /* set NIC mode */
5065 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5066#endif
5067
5068 /* Enable inputs of parser neighbor blocks */
5069 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5070 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5071 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5072 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5073
5074 DP(NETIF_MSG_HW, "done\n");
5075
5076 return 0; /* OK */
5077}
5078
5079static void enable_blocks_attention(struct bnx2x *bp)
5080{
5081 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5082 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5083 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5084 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5085 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5086 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5087 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5088 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5089 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5090/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5091/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5092 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5093 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5094 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5095/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5096/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5097 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5098 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5099 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5100 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5101/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5102/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5103 if (CHIP_REV_IS_FPGA(bp))
5104 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5105 else
5106 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5107 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5108 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5109 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5110/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5111/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5112 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5113 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5114/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5115 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5116}
5117
34f80b04
EG
5118
5119static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5120{
a2fbb9ea 5121 u32 val, i;
a2fbb9ea 5122
34f80b04 5123 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5124
34f80b04
EG
5125 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5126 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5127
34f80b04
EG
5128 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5129 if (CHIP_IS_E1H(bp))
5130 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5131
34f80b04
EG
5132 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5133 msleep(30);
5134 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5135
34f80b04
EG
5136 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5137 if (CHIP_IS_E1(bp)) {
5138 /* enable HW interrupt from PXP on USDM overflow
5139 bit 16 on INT_MASK_0 */
5140 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5141 }
a2fbb9ea 5142
34f80b04
EG
5143 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5144 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5145
5146#ifdef __BIG_ENDIAN
34f80b04
EG
5147 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5148 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5149 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5150 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5151 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5152 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5153
5154/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5155 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5156 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5157 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5158 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5159#endif
5160
34f80b04 5161 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5162#ifdef BCM_ISCSI
34f80b04
EG
5163 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5164 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5165 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5166#endif
5167
34f80b04
EG
5168 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5169 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5170
34f80b04
EG
5171 /* let the HW do it's magic ... */
5172 msleep(100);
5173 /* finish PXP init */
5174 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5175 if (val != 1) {
5176 BNX2X_ERR("PXP2 CFG failed\n");
5177 return -EBUSY;
5178 }
5179 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5180 if (val != 1) {
5181 BNX2X_ERR("PXP2 RD_INIT failed\n");
5182 return -EBUSY;
5183 }
a2fbb9ea 5184
34f80b04
EG
5185 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5186 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5187
34f80b04 5188 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5189
34f80b04
EG
5190 /* clean the DMAE memory */
5191 bp->dmae_ready = 1;
5192 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5193
34f80b04
EG
5194 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5195 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5196 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5197 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5198
34f80b04
EG
5199 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5200 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5201 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5202 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5203
5204 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5205 /* soft reset pulse */
5206 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5207 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5208
5209#ifdef BCM_ISCSI
34f80b04 5210 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5211#endif
a2fbb9ea 5212
34f80b04
EG
5213 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5214 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5215 if (!CHIP_REV_IS_SLOW(bp)) {
5216 /* enable hw interrupt from doorbell Q */
5217 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5218 }
a2fbb9ea 5219
34f80b04
EG
5220 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5221 if (CHIP_REV_IS_SLOW(bp)) {
5222 /* fix for emulation and FPGA for no pause */
5223 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5224 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5225 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5226 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5227 }
a2fbb9ea 5228
34f80b04 5229 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3196a88a
EG
5230 /* set NIC mode */
5231 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5232 if (CHIP_IS_E1H(bp))
5233 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5234
34f80b04
EG
5235 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5236 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5237 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5238 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5239
34f80b04
EG
5240 if (CHIP_IS_E1H(bp)) {
5241 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5242 STORM_INTMEM_SIZE_E1H/2);
5243 bnx2x_init_fill(bp,
5244 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5245 0, STORM_INTMEM_SIZE_E1H/2);
5246 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5247 STORM_INTMEM_SIZE_E1H/2);
5248 bnx2x_init_fill(bp,
5249 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5250 0, STORM_INTMEM_SIZE_E1H/2);
5251 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5252 STORM_INTMEM_SIZE_E1H/2);
5253 bnx2x_init_fill(bp,
5254 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5255 0, STORM_INTMEM_SIZE_E1H/2);
5256 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5257 STORM_INTMEM_SIZE_E1H/2);
5258 bnx2x_init_fill(bp,
5259 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5260 0, STORM_INTMEM_SIZE_E1H/2);
5261 } else { /* E1 */
ad8d3948
EG
5262 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5263 STORM_INTMEM_SIZE_E1);
5264 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5265 STORM_INTMEM_SIZE_E1);
5266 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5267 STORM_INTMEM_SIZE_E1);
5268 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5269 STORM_INTMEM_SIZE_E1);
34f80b04 5270 }
a2fbb9ea 5271
34f80b04
EG
5272 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5273 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5274 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5275 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5276
34f80b04
EG
5277 /* sync semi rtc */
5278 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5279 0x80000000);
5280 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5281 0x80000000);
a2fbb9ea 5282
34f80b04
EG
5283 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5284 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5285 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5286
34f80b04
EG
5287 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5288 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5289 REG_WR(bp, i, 0xc0cac01a);
5290 /* TODO: replace with something meaningful */
5291 }
5292 if (CHIP_IS_E1H(bp))
5293 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5294 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5295
34f80b04
EG
5296 if (sizeof(union cdu_context) != 1024)
5297 /* we currently assume that a context is 1024 bytes */
5298 printk(KERN_ALERT PFX "please adjust the size of"
5299 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5300
34f80b04
EG
5301 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5302 val = (4 << 24) + (0 << 12) + 1024;
5303 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5304 if (CHIP_IS_E1(bp)) {
5305 /* !!! fix pxp client crdit until excel update */
5306 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5307 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5308 }
a2fbb9ea 5309
34f80b04
EG
5310 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5311 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5312
34f80b04
EG
5313 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5314 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5315
34f80b04
EG
5316 /* PXPCS COMMON comes here */
5317 /* Reset PCIE errors for debug */
5318 REG_WR(bp, 0x2814, 0xffffffff);
5319 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5320
34f80b04
EG
5321 /* EMAC0 COMMON comes here */
5322 /* EMAC1 COMMON comes here */
5323 /* DBU COMMON comes here */
5324 /* DBG COMMON comes here */
5325
5326 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5327 if (CHIP_IS_E1H(bp)) {
5328 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5329 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5330 }
5331
5332 if (CHIP_REV_IS_SLOW(bp))
5333 msleep(200);
5334
5335 /* finish CFC init */
5336 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5337 if (val != 1) {
5338 BNX2X_ERR("CFC LL_INIT failed\n");
5339 return -EBUSY;
5340 }
5341 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5342 if (val != 1) {
5343 BNX2X_ERR("CFC AC_INIT failed\n");
5344 return -EBUSY;
5345 }
5346 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5347 if (val != 1) {
5348 BNX2X_ERR("CFC CAM_INIT failed\n");
5349 return -EBUSY;
5350 }
5351 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5352
34f80b04
EG
5353 /* read NIG statistic
5354 to see if this is our first up since powerup */
5355 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5356 val = *bnx2x_sp(bp, wb_data[0]);
5357
5358 /* do internal memory self test */
5359 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5360 BNX2X_ERR("internal mem self test failed\n");
5361 return -EBUSY;
5362 }
5363
5364 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5365 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5366 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5367 /* Fan failure is indicated by SPIO 5 */
5368 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5369 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5370
5371 /* set to active low mode */
5372 val = REG_RD(bp, MISC_REG_SPIO_INT);
5373 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5374 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5375 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5376
34f80b04
EG
5377 /* enable interrupt to signal the IGU */
5378 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5379 val |= (1 << MISC_REGISTERS_SPIO_5);
5380 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5381 break;
f1410647 5382
34f80b04
EG
5383 default:
5384 break;
5385 }
f1410647 5386
34f80b04
EG
5387 /* clear PXP2 attentions */
5388 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5389
34f80b04 5390 enable_blocks_attention(bp);
a2fbb9ea 5391
6bbca910
YR
5392 if (!BP_NOMCP(bp)) {
5393 bnx2x_acquire_phy_lock(bp);
5394 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5395 bnx2x_release_phy_lock(bp);
5396 } else
5397 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5398
34f80b04
EG
5399 return 0;
5400}
a2fbb9ea 5401
34f80b04
EG
5402static int bnx2x_init_port(struct bnx2x *bp)
5403{
5404 int port = BP_PORT(bp);
5405 u32 val;
a2fbb9ea 5406
34f80b04
EG
5407 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5408
5409 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5410
5411 /* Port PXP comes here */
5412 /* Port PXP2 comes here */
a2fbb9ea
ET
5413#ifdef BCM_ISCSI
5414 /* Port0 1
5415 * Port1 385 */
5416 i++;
5417 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5418 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5419 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5420 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5421
5422 /* Port0 2
5423 * Port1 386 */
5424 i++;
5425 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5426 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5427 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5428 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5429
5430 /* Port0 3
5431 * Port1 387 */
5432 i++;
5433 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5434 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5435 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5436 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5437#endif
34f80b04 5438 /* Port CMs come here */
a2fbb9ea
ET
5439
5440 /* Port QM comes here */
a2fbb9ea
ET
5441#ifdef BCM_ISCSI
5442 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5443 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5444
5445 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5446 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5447#endif
5448 /* Port DQ comes here */
5449 /* Port BRB1 comes here */
ad8d3948 5450 /* Port PRS comes here */
a2fbb9ea
ET
5451 /* Port TSDM comes here */
5452 /* Port CSDM comes here */
5453 /* Port USDM comes here */
5454 /* Port XSDM comes here */
34f80b04
EG
5455 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5456 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5457 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5458 port ? USEM_PORT1_END : USEM_PORT0_END);
5459 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5460 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5461 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5462 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5463 /* Port UPB comes here */
34f80b04
EG
5464 /* Port XPB comes here */
5465
5466 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5467 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5468
5469 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5470 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5471
5472 /* update threshold */
34f80b04 5473 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5474 /* update init credit */
34f80b04 5475 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5476
5477 /* probe changes */
34f80b04 5478 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5479 msleep(5);
34f80b04 5480 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5481
5482#ifdef BCM_ISCSI
5483 /* tell the searcher where the T2 table is */
5484 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5485
5486 wb_write[0] = U64_LO(bp->t2_mapping);
5487 wb_write[1] = U64_HI(bp->t2_mapping);
5488 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5489 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5490 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5491 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5492
5493 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5494 /* Port SRCH comes here */
5495#endif
5496 /* Port CDU comes here */
5497 /* Port CFC comes here */
34f80b04
EG
5498
5499 if (CHIP_IS_E1(bp)) {
5500 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5501 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5502 }
5503 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5504 port ? HC_PORT1_END : HC_PORT0_END);
5505
5506 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5507 MISC_AEU_PORT0_START,
34f80b04
EG
5508 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5509 /* init aeu_mask_attn_func_0/1:
5510 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5511 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5512 * bits 4-7 are used for "per vn group attention" */
5513 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5514 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5515
a2fbb9ea
ET
5516 /* Port PXPCS comes here */
5517 /* Port EMAC0 comes here */
5518 /* Port EMAC1 comes here */
5519 /* Port DBU comes here */
5520 /* Port DBG comes here */
34f80b04
EG
5521 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5522 port ? NIG_PORT1_END : NIG_PORT0_END);
5523
5524 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5525
5526 if (CHIP_IS_E1H(bp)) {
5527 u32 wsum;
5528 struct cmng_struct_per_port m_cmng_port;
5529 int vn;
5530
5531 /* 0x2 disable e1hov, 0x1 enable */
5532 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5533 (IS_E1HMF(bp) ? 0x1 : 0x2));
5534
5535 /* Init RATE SHAPING and FAIRNESS contexts.
5536 Initialize as if there is 10G link. */
5537 wsum = bnx2x_calc_vn_wsum(bp);
5538 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5539 if (IS_E1HMF(bp))
5540 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5541 bnx2x_init_vn_minmax(bp, 2*vn + port,
5542 wsum, 10000, &m_cmng_port);
5543 }
5544
a2fbb9ea
ET
5545 /* Port MCP comes here */
5546 /* Port DMAE comes here */
5547
34f80b04 5548 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5549 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5550 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5551 /* add SPIO 5 to group 0 */
5552 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5553 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5554 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5555 break;
5556
5557 default:
5558 break;
5559 }
5560
c18487ee 5561 bnx2x__link_reset(bp);
a2fbb9ea 5562
34f80b04
EG
5563 return 0;
5564}
5565
5566#define ILT_PER_FUNC (768/2)
5567#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5568/* the phys address is shifted right 12 bits and has an added
5569 1=valid bit added to the 53rd bit
5570 then since this is a wide register(TM)
5571 we split it into two 32 bit writes
5572 */
5573#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5574#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5575#define PXP_ONE_ILT(x) (((x) << 10) | x)
5576#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5577
5578#define CNIC_ILT_LINES 0
5579
5580static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5581{
5582 int reg;
5583
5584 if (CHIP_IS_E1H(bp))
5585 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5586 else /* E1 */
5587 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5588
5589 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5590}
5591
5592static int bnx2x_init_func(struct bnx2x *bp)
5593{
5594 int port = BP_PORT(bp);
5595 int func = BP_FUNC(bp);
5596 int i;
5597
5598 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5599
5600 i = FUNC_ILT_BASE(func);
5601
5602 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5603 if (CHIP_IS_E1H(bp)) {
5604 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5605 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5606 } else /* E1 */
5607 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5608 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5609
5610
5611 if (CHIP_IS_E1H(bp)) {
5612 for (i = 0; i < 9; i++)
5613 bnx2x_init_block(bp,
5614 cm_start[func][i], cm_end[func][i]);
5615
5616 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5617 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5618 }
5619
5620 /* HC init per function */
5621 if (CHIP_IS_E1H(bp)) {
5622 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5623
5624 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5625 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5626 }
5627 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5628
5629 if (CHIP_IS_E1H(bp))
5630 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5631
c14423fe 5632 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5633 REG_WR(bp, 0x2114, 0xffffffff);
5634 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5635
34f80b04
EG
5636 return 0;
5637}
5638
5639static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5640{
5641 int i, rc = 0;
a2fbb9ea 5642
34f80b04
EG
5643 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5644 BP_FUNC(bp), load_code);
a2fbb9ea 5645
34f80b04
EG
5646 bp->dmae_ready = 0;
5647 mutex_init(&bp->dmae_mutex);
5648 bnx2x_gunzip_init(bp);
a2fbb9ea 5649
34f80b04
EG
5650 switch (load_code) {
5651 case FW_MSG_CODE_DRV_LOAD_COMMON:
5652 rc = bnx2x_init_common(bp);
5653 if (rc)
5654 goto init_hw_err;
5655 /* no break */
5656
5657 case FW_MSG_CODE_DRV_LOAD_PORT:
5658 bp->dmae_ready = 1;
5659 rc = bnx2x_init_port(bp);
5660 if (rc)
5661 goto init_hw_err;
5662 /* no break */
5663
5664 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5665 bp->dmae_ready = 1;
5666 rc = bnx2x_init_func(bp);
5667 if (rc)
5668 goto init_hw_err;
5669 break;
5670
5671 default:
5672 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5673 break;
5674 }
5675
5676 if (!BP_NOMCP(bp)) {
5677 int func = BP_FUNC(bp);
a2fbb9ea
ET
5678
5679 bp->fw_drv_pulse_wr_seq =
34f80b04 5680 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5681 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5682 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5683 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5684 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5685 } else
5686 bp->func_stx = 0;
a2fbb9ea 5687
34f80b04
EG
5688 /* this needs to be done before gunzip end */
5689 bnx2x_zero_def_sb(bp);
5690 for_each_queue(bp, i)
5691 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5692
5693init_hw_err:
5694 bnx2x_gunzip_end(bp);
5695
5696 return rc;
a2fbb9ea
ET
5697}
5698
c14423fe 5699/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5700static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5701{
34f80b04 5702 int func = BP_FUNC(bp);
f1410647
ET
5703 u32 seq = ++bp->fw_seq;
5704 u32 rc = 0;
19680c48
EG
5705 u32 cnt = 1;
5706 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5707
34f80b04 5708 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5709 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5710
19680c48
EG
5711 do {
5712 /* let the FW do it's magic ... */
5713 msleep(delay);
a2fbb9ea 5714
19680c48 5715 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5716
19680c48
EG
5717 /* Give the FW up to 2 second (200*10ms) */
5718 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5719
5720 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5721 cnt*delay, rc, seq);
a2fbb9ea
ET
5722
5723 /* is this a reply to our command? */
5724 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5725 rc &= FW_MSG_CODE_MASK;
f1410647 5726
a2fbb9ea
ET
5727 } else {
5728 /* FW BUG! */
5729 BNX2X_ERR("FW failed to respond!\n");
5730 bnx2x_fw_dump(bp);
5731 rc = 0;
5732 }
f1410647 5733
a2fbb9ea
ET
5734 return rc;
5735}
5736
5737static void bnx2x_free_mem(struct bnx2x *bp)
5738{
5739
5740#define BNX2X_PCI_FREE(x, y, size) \
5741 do { \
5742 if (x) { \
5743 pci_free_consistent(bp->pdev, size, x, y); \
5744 x = NULL; \
5745 y = 0; \
5746 } \
5747 } while (0)
5748
5749#define BNX2X_FREE(x) \
5750 do { \
5751 if (x) { \
5752 vfree(x); \
5753 x = NULL; \
5754 } \
5755 } while (0)
5756
5757 int i;
5758
5759 /* fastpath */
5760 for_each_queue(bp, i) {
5761
5762 /* Status blocks */
5763 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5764 bnx2x_fp(bp, i, status_blk_mapping),
5765 sizeof(struct host_status_block) +
5766 sizeof(struct eth_tx_db_data));
5767
5768 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5769 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5770 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5771 bnx2x_fp(bp, i, tx_desc_mapping),
5772 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5773
5774 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5775 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5776 bnx2x_fp(bp, i, rx_desc_mapping),
5777 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5778
5779 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5780 bnx2x_fp(bp, i, rx_comp_mapping),
5781 sizeof(struct eth_fast_path_rx_cqe) *
5782 NUM_RCQ_BD);
a2fbb9ea 5783
7a9b2557 5784 /* SGE ring */
32626230 5785 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5786 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5787 bnx2x_fp(bp, i, rx_sge_mapping),
5788 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5789 }
a2fbb9ea
ET
5790 /* end of fastpath */
5791
5792 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5793 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5794
5795 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5796 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5797
5798#ifdef BCM_ISCSI
5799 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5800 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5801 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5802 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5803#endif
7a9b2557 5804 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5805
5806#undef BNX2X_PCI_FREE
5807#undef BNX2X_KFREE
5808}
5809
5810static int bnx2x_alloc_mem(struct bnx2x *bp)
5811{
5812
5813#define BNX2X_PCI_ALLOC(x, y, size) \
5814 do { \
5815 x = pci_alloc_consistent(bp->pdev, size, y); \
5816 if (x == NULL) \
5817 goto alloc_mem_err; \
5818 memset(x, 0, size); \
5819 } while (0)
5820
5821#define BNX2X_ALLOC(x, size) \
5822 do { \
5823 x = vmalloc(size); \
5824 if (x == NULL) \
5825 goto alloc_mem_err; \
5826 memset(x, 0, size); \
5827 } while (0)
5828
5829 int i;
5830
5831 /* fastpath */
a2fbb9ea
ET
5832 for_each_queue(bp, i) {
5833 bnx2x_fp(bp, i, bp) = bp;
5834
5835 /* Status blocks */
5836 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5837 &bnx2x_fp(bp, i, status_blk_mapping),
5838 sizeof(struct host_status_block) +
5839 sizeof(struct eth_tx_db_data));
5840
5841 bnx2x_fp(bp, i, hw_tx_prods) =
5842 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5843
5844 bnx2x_fp(bp, i, tx_prods_mapping) =
5845 bnx2x_fp(bp, i, status_blk_mapping) +
5846 sizeof(struct host_status_block);
5847
5848 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5849 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5850 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5851 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5852 &bnx2x_fp(bp, i, tx_desc_mapping),
5853 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5854
5855 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5856 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5857 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5858 &bnx2x_fp(bp, i, rx_desc_mapping),
5859 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5860
5861 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5862 &bnx2x_fp(bp, i, rx_comp_mapping),
5863 sizeof(struct eth_fast_path_rx_cqe) *
5864 NUM_RCQ_BD);
5865
7a9b2557
VZ
5866 /* SGE ring */
5867 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5868 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5869 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5870 &bnx2x_fp(bp, i, rx_sge_mapping),
5871 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5872 }
5873 /* end of fastpath */
5874
5875 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5876 sizeof(struct host_def_status_block));
5877
5878 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5879 sizeof(struct bnx2x_slowpath));
5880
5881#ifdef BCM_ISCSI
5882 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5883
5884 /* Initialize T1 */
5885 for (i = 0; i < 64*1024; i += 64) {
5886 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5887 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5888 }
5889
5890 /* allocate searcher T2 table
5891 we allocate 1/4 of alloc num for T2
5892 (which is not entered into the ILT) */
5893 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5894
5895 /* Initialize T2 */
5896 for (i = 0; i < 16*1024; i += 64)
5897 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5898
c14423fe 5899 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5900 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5901
5902 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5903 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5904
5905 /* QM queues (128*MAX_CONN) */
5906 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5907#endif
5908
5909 /* Slow path ring */
5910 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5911
5912 return 0;
5913
5914alloc_mem_err:
5915 bnx2x_free_mem(bp);
5916 return -ENOMEM;
5917
5918#undef BNX2X_PCI_ALLOC
5919#undef BNX2X_ALLOC
5920}
5921
5922static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5923{
5924 int i;
5925
5926 for_each_queue(bp, i) {
5927 struct bnx2x_fastpath *fp = &bp->fp[i];
5928
5929 u16 bd_cons = fp->tx_bd_cons;
5930 u16 sw_prod = fp->tx_pkt_prod;
5931 u16 sw_cons = fp->tx_pkt_cons;
5932
a2fbb9ea
ET
5933 while (sw_cons != sw_prod) {
5934 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5935 sw_cons++;
5936 }
5937 }
5938}
5939
5940static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5941{
5942 int i, j;
5943
5944 for_each_queue(bp, j) {
5945 struct bnx2x_fastpath *fp = &bp->fp[j];
5946
a2fbb9ea
ET
5947 for (i = 0; i < NUM_RX_BD; i++) {
5948 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5949 struct sk_buff *skb = rx_buf->skb;
5950
5951 if (skb == NULL)
5952 continue;
5953
5954 pci_unmap_single(bp->pdev,
5955 pci_unmap_addr(rx_buf, mapping),
437cf2f1 5956 bp->rx_buf_size,
a2fbb9ea
ET
5957 PCI_DMA_FROMDEVICE);
5958
5959 rx_buf->skb = NULL;
5960 dev_kfree_skb(skb);
5961 }
7a9b2557 5962 if (!fp->disable_tpa)
32626230
EG
5963 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5964 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5965 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5966 }
5967}
5968
5969static void bnx2x_free_skbs(struct bnx2x *bp)
5970{
5971 bnx2x_free_tx_skbs(bp);
5972 bnx2x_free_rx_skbs(bp);
5973}
5974
5975static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5976{
34f80b04 5977 int i, offset = 1;
a2fbb9ea
ET
5978
5979 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5980 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5981 bp->msix_table[0].vector);
5982
5983 for_each_queue(bp, i) {
c14423fe 5984 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5985 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5986 bnx2x_fp(bp, i, state));
5987
228241eb
ET
5988 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5989 BNX2X_ERR("IRQ of fp #%d being freed while "
5990 "state != closed\n", i);
a2fbb9ea 5991
34f80b04 5992 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5993 }
a2fbb9ea
ET
5994}
5995
5996static void bnx2x_free_irq(struct bnx2x *bp)
5997{
a2fbb9ea 5998 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5999 bnx2x_free_msix_irqs(bp);
6000 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6001 bp->flags &= ~USING_MSIX_FLAG;
6002
6003 } else
6004 free_irq(bp->pdev->irq, bp->dev);
6005}
6006
6007static int bnx2x_enable_msix(struct bnx2x *bp)
6008{
34f80b04 6009 int i, rc, offset;
a2fbb9ea
ET
6010
6011 bp->msix_table[0].entry = 0;
34f80b04
EG
6012 offset = 1;
6013 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6014
34f80b04
EG
6015 for_each_queue(bp, i) {
6016 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6017
34f80b04
EG
6018 bp->msix_table[i + offset].entry = igu_vec;
6019 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6020 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6021 }
6022
34f80b04
EG
6023 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6024 bp->num_queues + offset);
6025 if (rc) {
6026 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6027 return -1;
6028 }
a2fbb9ea
ET
6029 bp->flags |= USING_MSIX_FLAG;
6030
6031 return 0;
a2fbb9ea
ET
6032}
6033
a2fbb9ea
ET
6034static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6035{
34f80b04 6036 int i, rc, offset = 1;
a2fbb9ea 6037
a2fbb9ea
ET
6038 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6039 bp->dev->name, bp->dev);
a2fbb9ea
ET
6040 if (rc) {
6041 BNX2X_ERR("request sp irq failed\n");
6042 return -EBUSY;
6043 }
6044
6045 for_each_queue(bp, i) {
34f80b04 6046 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6047 bnx2x_msix_fp_int, 0,
6048 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6049 if (rc) {
3196a88a
EG
6050 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6051 i + offset, -rc);
a2fbb9ea
ET
6052 bnx2x_free_msix_irqs(bp);
6053 return -EBUSY;
6054 }
6055
6056 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6057 }
6058
6059 return 0;
a2fbb9ea
ET
6060}
6061
6062static int bnx2x_req_irq(struct bnx2x *bp)
6063{
34f80b04 6064 int rc;
a2fbb9ea 6065
34f80b04
EG
6066 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6067 bp->dev->name, bp->dev);
a2fbb9ea
ET
6068 if (!rc)
6069 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6070
6071 return rc;
a2fbb9ea
ET
6072}
6073
65abd74d
YG
6074static void bnx2x_napi_enable(struct bnx2x *bp)
6075{
6076 int i;
6077
6078 for_each_queue(bp, i)
6079 napi_enable(&bnx2x_fp(bp, i, napi));
6080}
6081
6082static void bnx2x_napi_disable(struct bnx2x *bp)
6083{
6084 int i;
6085
6086 for_each_queue(bp, i)
6087 napi_disable(&bnx2x_fp(bp, i, napi));
6088}
6089
6090static void bnx2x_netif_start(struct bnx2x *bp)
6091{
6092 if (atomic_dec_and_test(&bp->intr_sem)) {
6093 if (netif_running(bp->dev)) {
6094 if (bp->state == BNX2X_STATE_OPEN)
6095 netif_wake_queue(bp->dev);
6096 bnx2x_napi_enable(bp);
6097 bnx2x_int_enable(bp);
6098 }
6099 }
6100}
6101
f8ef6e44 6102static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6103{
f8ef6e44 6104 bnx2x_int_disable_sync(bp, disable_hw);
65abd74d
YG
6105 if (netif_running(bp->dev)) {
6106 bnx2x_napi_disable(bp);
6107 netif_tx_disable(bp->dev);
6108 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6109 }
6110}
6111
a2fbb9ea
ET
6112/*
6113 * Init service functions
6114 */
6115
3101c2bc 6116static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6117{
6118 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6119 int port = BP_PORT(bp);
a2fbb9ea
ET
6120
6121 /* CAM allocation
6122 * unicasts 0-31:port0 32-63:port1
6123 * multicast 64-127:port0 128-191:port1
6124 */
6125 config->hdr.length_6b = 2;
34f80b04
EG
6126 config->hdr.offset = port ? 31 : 0;
6127 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6128 config->hdr.reserved1 = 0;
6129
6130 /* primary MAC */
6131 config->config_table[0].cam_entry.msb_mac_addr =
6132 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6133 config->config_table[0].cam_entry.middle_mac_addr =
6134 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6135 config->config_table[0].cam_entry.lsb_mac_addr =
6136 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6137 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6138 if (set)
6139 config->config_table[0].target_table_entry.flags = 0;
6140 else
6141 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6142 config->config_table[0].target_table_entry.client_id = 0;
6143 config->config_table[0].target_table_entry.vlan_id = 0;
6144
3101c2bc
YG
6145 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6146 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6147 config->config_table[0].cam_entry.msb_mac_addr,
6148 config->config_table[0].cam_entry.middle_mac_addr,
6149 config->config_table[0].cam_entry.lsb_mac_addr);
6150
6151 /* broadcast */
6152 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6153 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6154 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6155 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6156 if (set)
6157 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6158 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6159 else
6160 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6161 config->config_table[1].target_table_entry.client_id = 0;
6162 config->config_table[1].target_table_entry.vlan_id = 0;
6163
6164 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6165 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6166 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6167}
6168
3101c2bc 6169static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6170{
6171 struct mac_configuration_cmd_e1h *config =
6172 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6173
3101c2bc 6174 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6175 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6176 return;
6177 }
6178
6179 /* CAM allocation for E1H
6180 * unicasts: by func number
6181 * multicast: 20+FUNC*20, 20 each
6182 */
6183 config->hdr.length_6b = 1;
6184 config->hdr.offset = BP_FUNC(bp);
6185 config->hdr.client_id = BP_CL_ID(bp);
6186 config->hdr.reserved1 = 0;
6187
6188 /* primary MAC */
6189 config->config_table[0].msb_mac_addr =
6190 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6191 config->config_table[0].middle_mac_addr =
6192 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6193 config->config_table[0].lsb_mac_addr =
6194 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6195 config->config_table[0].client_id = BP_L_ID(bp);
6196 config->config_table[0].vlan_id = 0;
6197 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6198 if (set)
6199 config->config_table[0].flags = BP_PORT(bp);
6200 else
6201 config->config_table[0].flags =
6202 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6203
3101c2bc
YG
6204 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6205 (set ? "setting" : "clearing"),
34f80b04
EG
6206 config->config_table[0].msb_mac_addr,
6207 config->config_table[0].middle_mac_addr,
6208 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6209
6210 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6211 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6212 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6213}
6214
a2fbb9ea
ET
6215static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6216 int *state_p, int poll)
6217{
6218 /* can take a while if any port is running */
34f80b04 6219 int cnt = 500;
a2fbb9ea 6220
c14423fe
ET
6221 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6222 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6223
6224 might_sleep();
34f80b04 6225 while (cnt--) {
a2fbb9ea
ET
6226 if (poll) {
6227 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6228 /* if index is different from 0
6229 * the reply for some commands will
3101c2bc 6230 * be on the non default queue
a2fbb9ea
ET
6231 */
6232 if (idx)
6233 bnx2x_rx_int(&bp->fp[idx], 10);
6234 }
a2fbb9ea 6235
3101c2bc 6236 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6237 if (*state_p == state)
a2fbb9ea
ET
6238 return 0;
6239
a2fbb9ea 6240 msleep(1);
a2fbb9ea
ET
6241 }
6242
a2fbb9ea 6243 /* timeout! */
49d66772
ET
6244 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6245 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6246#ifdef BNX2X_STOP_ON_ERROR
6247 bnx2x_panic();
6248#endif
a2fbb9ea 6249
49d66772 6250 return -EBUSY;
a2fbb9ea
ET
6251}
6252
6253static int bnx2x_setup_leading(struct bnx2x *bp)
6254{
34f80b04 6255 int rc;
a2fbb9ea 6256
c14423fe 6257 /* reset IGU state */
34f80b04 6258 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6259
6260 /* SETUP ramrod */
6261 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6262
34f80b04
EG
6263 /* Wait for completion */
6264 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6265
34f80b04 6266 return rc;
a2fbb9ea
ET
6267}
6268
6269static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6270{
a2fbb9ea 6271 /* reset IGU state */
34f80b04 6272 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6273
228241eb 6274 /* SETUP ramrod */
a2fbb9ea
ET
6275 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6276 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6277
6278 /* Wait for completion */
6279 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6280 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6281}
6282
a2fbb9ea
ET
6283static int bnx2x_poll(struct napi_struct *napi, int budget);
6284static void bnx2x_set_rx_mode(struct net_device *dev);
6285
34f80b04
EG
6286/* must be called with rtnl_lock */
6287static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6288{
228241eb 6289 u32 load_code;
34f80b04 6290 int i, rc;
34f80b04
EG
6291#ifdef BNX2X_STOP_ON_ERROR
6292 if (unlikely(bp->panic))
6293 return -EPERM;
6294#endif
a2fbb9ea
ET
6295
6296 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6297
34f80b04
EG
6298 /* Send LOAD_REQUEST command to MCP
6299 Returns the type of LOAD command:
6300 if it is the first port to be initialized
6301 common blocks should be initialized, otherwise - not
a2fbb9ea 6302 */
34f80b04 6303 if (!BP_NOMCP(bp)) {
228241eb
ET
6304 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6305 if (!load_code) {
da5a662a 6306 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6307 return -EBUSY;
6308 }
34f80b04 6309 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6310 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6311
a2fbb9ea 6312 } else {
da5a662a
VZ
6313 int port = BP_PORT(bp);
6314
34f80b04
EG
6315 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6316 load_count[0], load_count[1], load_count[2]);
6317 load_count[0]++;
da5a662a 6318 load_count[1 + port]++;
34f80b04
EG
6319 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6320 load_count[0], load_count[1], load_count[2]);
6321 if (load_count[0] == 1)
6322 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6323 else if (load_count[1 + port] == 1)
34f80b04
EG
6324 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6325 else
6326 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6327 }
6328
34f80b04
EG
6329 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6330 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6331 bp->port.pmf = 1;
6332 else
6333 bp->port.pmf = 0;
6334 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6335
6336 /* if we can't use MSI-X we only need one fp,
6337 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6338 * and fallback to inta with one fp
6339 */
34f80b04
EG
6340 if (use_inta) {
6341 bp->num_queues = 1;
6342
6343 } else {
6344 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6345 /* user requested number */
6346 bp->num_queues = use_multi;
6347
6348 else if (use_multi)
6349 bp->num_queues = min_t(u32, num_online_cpus(),
6350 BP_MAX_QUEUES(bp));
6351 else
a2fbb9ea 6352 bp->num_queues = 1;
34f80b04
EG
6353
6354 if (bnx2x_enable_msix(bp)) {
6355 /* failed to enable MSI-X */
6356 bp->num_queues = 1;
6357 if (use_multi)
6358 BNX2X_ERR("Multi requested but failed"
6359 " to enable MSI-X\n");
a2fbb9ea
ET
6360 }
6361 }
34f80b04
EG
6362 DP(NETIF_MSG_IFUP,
6363 "set number of queues to %d\n", bp->num_queues);
c14423fe 6364
a2fbb9ea
ET
6365 if (bnx2x_alloc_mem(bp))
6366 return -ENOMEM;
6367
7a9b2557
VZ
6368 for_each_queue(bp, i)
6369 bnx2x_fp(bp, i, disable_tpa) =
6370 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6371
34f80b04
EG
6372 if (bp->flags & USING_MSIX_FLAG) {
6373 rc = bnx2x_req_msix_irqs(bp);
6374 if (rc) {
6375 pci_disable_msix(bp->pdev);
6376 goto load_error;
6377 }
6378 } else {
6379 bnx2x_ack_int(bp);
6380 rc = bnx2x_req_irq(bp);
6381 if (rc) {
6382 BNX2X_ERR("IRQ request failed, aborting\n");
6383 goto load_error;
a2fbb9ea
ET
6384 }
6385 }
6386
6387 for_each_queue(bp, i)
6388 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6389 bnx2x_poll, 128);
6390
a2fbb9ea 6391 /* Initialize HW */
34f80b04
EG
6392 rc = bnx2x_init_hw(bp, load_code);
6393 if (rc) {
a2fbb9ea 6394 BNX2X_ERR("HW init failed, aborting\n");
d1014634 6395 goto load_int_disable;
a2fbb9ea
ET
6396 }
6397
a2fbb9ea 6398 /* Setup NIC internals and enable interrupts */
471de716 6399 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6400
6401 /* Send LOAD_DONE command to MCP */
34f80b04 6402 if (!BP_NOMCP(bp)) {
228241eb
ET
6403 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6404 if (!load_code) {
da5a662a 6405 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6406 rc = -EBUSY;
d1014634 6407 goto load_rings_free;
a2fbb9ea
ET
6408 }
6409 }
6410
bb2a0f7a
YG
6411 bnx2x_stats_init(bp);
6412
a2fbb9ea
ET
6413 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6414
6415 /* Enable Rx interrupt handling before sending the ramrod
6416 as it's completed on Rx FP queue */
65abd74d 6417 bnx2x_napi_enable(bp);
a2fbb9ea 6418
da5a662a
VZ
6419 /* Enable interrupt handling */
6420 atomic_set(&bp->intr_sem, 0);
6421
34f80b04
EG
6422 rc = bnx2x_setup_leading(bp);
6423 if (rc) {
da5a662a 6424 BNX2X_ERR("Setup leading failed!\n");
d1014634 6425 goto load_netif_stop;
34f80b04 6426 }
a2fbb9ea 6427
34f80b04
EG
6428 if (CHIP_IS_E1H(bp))
6429 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6430 BNX2X_ERR("!!! mf_cfg function disabled\n");
6431 bp->state = BNX2X_STATE_DISABLED;
6432 }
a2fbb9ea 6433
34f80b04
EG
6434 if (bp->state == BNX2X_STATE_OPEN)
6435 for_each_nondefault_queue(bp, i) {
6436 rc = bnx2x_setup_multi(bp, i);
6437 if (rc)
d1014634 6438 goto load_netif_stop;
34f80b04 6439 }
a2fbb9ea 6440
34f80b04 6441 if (CHIP_IS_E1(bp))
3101c2bc 6442 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6443 else
3101c2bc 6444 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6445
6446 if (bp->port.pmf)
6447 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6448
6449 /* Start fast path */
34f80b04
EG
6450 switch (load_mode) {
6451 case LOAD_NORMAL:
6452 /* Tx queue should be only reenabled */
6453 netif_wake_queue(bp->dev);
6454 bnx2x_set_rx_mode(bp->dev);
6455 break;
6456
6457 case LOAD_OPEN:
a2fbb9ea 6458 netif_start_queue(bp->dev);
34f80b04 6459 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6460 if (bp->flags & USING_MSIX_FLAG)
6461 printk(KERN_INFO PFX "%s: using MSI-X\n",
6462 bp->dev->name);
34f80b04 6463 break;
a2fbb9ea 6464
34f80b04 6465 case LOAD_DIAG:
a2fbb9ea 6466 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6467 bp->state = BNX2X_STATE_DIAG;
6468 break;
6469
6470 default:
6471 break;
a2fbb9ea
ET
6472 }
6473
34f80b04
EG
6474 if (!bp->port.pmf)
6475 bnx2x__link_status_update(bp);
6476
a2fbb9ea
ET
6477 /* start the timer */
6478 mod_timer(&bp->timer, jiffies + bp->current_interval);
6479
34f80b04 6480
a2fbb9ea
ET
6481 return 0;
6482
d1014634 6483load_netif_stop:
65abd74d 6484 bnx2x_napi_disable(bp);
d1014634 6485load_rings_free:
7a9b2557
VZ
6486 /* Free SKBs, SGEs, TPA pool and driver internals */
6487 bnx2x_free_skbs(bp);
6488 for_each_queue(bp, i)
3196a88a 6489 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d1014634 6490load_int_disable:
f8ef6e44 6491 bnx2x_int_disable_sync(bp, 1);
d1014634
YG
6492 /* Release IRQs */
6493 bnx2x_free_irq(bp);
228241eb 6494load_error:
a2fbb9ea 6495 bnx2x_free_mem(bp);
9a035440 6496 bp->port.pmf = 0;
a2fbb9ea
ET
6497
6498 /* TBD we really need to reset the chip
6499 if we want to recover from this */
34f80b04 6500 return rc;
a2fbb9ea
ET
6501}
6502
6503static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6504{
a2fbb9ea
ET
6505 int rc;
6506
c14423fe 6507 /* halt the connection */
a2fbb9ea 6508 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6509 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6510
34f80b04 6511 /* Wait for completion */
a2fbb9ea 6512 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6513 &(bp->fp[index].state), 1);
c14423fe 6514 if (rc) /* timeout */
a2fbb9ea
ET
6515 return rc;
6516
6517 /* delete cfc entry */
6518 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6519
34f80b04
EG
6520 /* Wait for completion */
6521 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6522 &(bp->fp[index].state), 1);
6523 return rc;
a2fbb9ea
ET
6524}
6525
da5a662a 6526static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6527{
49d66772 6528 u16 dsb_sp_prod_idx;
c14423fe 6529 /* if the other port is handling traffic,
a2fbb9ea 6530 this can take a lot of time */
34f80b04
EG
6531 int cnt = 500;
6532 int rc;
a2fbb9ea
ET
6533
6534 might_sleep();
6535
6536 /* Send HALT ramrod */
6537 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6538 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6539
34f80b04
EG
6540 /* Wait for completion */
6541 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6542 &(bp->fp[0].state), 1);
6543 if (rc) /* timeout */
da5a662a 6544 return rc;
a2fbb9ea 6545
49d66772 6546 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6547
228241eb 6548 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6549 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6550
49d66772 6551 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6552 we are going to reset the chip anyway
6553 so there is not much to do if this times out
6554 */
34f80b04 6555 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6556 if (!cnt) {
6557 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6558 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6559 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6560#ifdef BNX2X_STOP_ON_ERROR
6561 bnx2x_panic();
da5a662a
VZ
6562#else
6563 rc = -EBUSY;
34f80b04
EG
6564#endif
6565 break;
6566 }
6567 cnt--;
da5a662a 6568 msleep(1);
49d66772
ET
6569 }
6570 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6571 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6572
6573 return rc;
a2fbb9ea
ET
6574}
6575
34f80b04
EG
6576static void bnx2x_reset_func(struct bnx2x *bp)
6577{
6578 int port = BP_PORT(bp);
6579 int func = BP_FUNC(bp);
6580 int base, i;
6581
6582 /* Configure IGU */
6583 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6584 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6585
6586 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6587
6588 /* Clear ILT */
6589 base = FUNC_ILT_BASE(func);
6590 for (i = base; i < base + ILT_PER_FUNC; i++)
6591 bnx2x_ilt_wr(bp, i, 0);
6592}
6593
6594static void bnx2x_reset_port(struct bnx2x *bp)
6595{
6596 int port = BP_PORT(bp);
6597 u32 val;
6598
6599 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6600
6601 /* Do not rcv packets to BRB */
6602 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6603 /* Do not direct rcv packets that are not for MCP to the BRB */
6604 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6605 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6606
6607 /* Configure AEU */
6608 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6609
6610 msleep(100);
6611 /* Check for BRB port occupancy */
6612 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6613 if (val)
6614 DP(NETIF_MSG_IFDOWN,
33471629 6615 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6616
6617 /* TODO: Close Doorbell port? */
6618}
6619
6620static void bnx2x_reset_common(struct bnx2x *bp)
6621{
6622 /* reset_common */
6623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6624 0xd3ffff7f);
6625 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6626}
6627
6628static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6629{
6630 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6631 BP_FUNC(bp), reset_code);
6632
6633 switch (reset_code) {
6634 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6635 bnx2x_reset_port(bp);
6636 bnx2x_reset_func(bp);
6637 bnx2x_reset_common(bp);
6638 break;
6639
6640 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6641 bnx2x_reset_port(bp);
6642 bnx2x_reset_func(bp);
6643 break;
6644
6645 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6646 bnx2x_reset_func(bp);
6647 break;
49d66772 6648
34f80b04
EG
6649 default:
6650 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6651 break;
6652 }
6653}
6654
33471629 6655/* must be called with rtnl_lock */
34f80b04 6656static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6657{
da5a662a 6658 int port = BP_PORT(bp);
a2fbb9ea 6659 u32 reset_code = 0;
da5a662a 6660 int i, cnt, rc;
a2fbb9ea
ET
6661
6662 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6663
228241eb
ET
6664 bp->rx_mode = BNX2X_RX_MODE_NONE;
6665 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6666
f8ef6e44 6667 bnx2x_netif_stop(bp, 1);
65abd74d
YG
6668 if (!netif_running(bp->dev))
6669 bnx2x_napi_disable(bp);
34f80b04
EG
6670 del_timer_sync(&bp->timer);
6671 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6672 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6673 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6674
da5a662a 6675 /* Wait until tx fast path tasks complete */
228241eb
ET
6676 for_each_queue(bp, i) {
6677 struct bnx2x_fastpath *fp = &bp->fp[i];
6678
34f80b04
EG
6679 cnt = 1000;
6680 smp_rmb();
da5a662a
VZ
6681 while (BNX2X_HAS_TX_WORK(fp)) {
6682
65abd74d 6683 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6684 if (!cnt) {
6685 BNX2X_ERR("timeout waiting for queue[%d]\n",
6686 i);
6687#ifdef BNX2X_STOP_ON_ERROR
6688 bnx2x_panic();
6689 return -EBUSY;
6690#else
6691 break;
6692#endif
6693 }
6694 cnt--;
da5a662a 6695 msleep(1);
34f80b04
EG
6696 smp_rmb();
6697 }
228241eb 6698 }
da5a662a
VZ
6699 /* Give HW time to discard old tx messages */
6700 msleep(1);
a2fbb9ea 6701
34f80b04
EG
6702 /* Release IRQs */
6703 bnx2x_free_irq(bp);
6704
3101c2bc
YG
6705 if (CHIP_IS_E1(bp)) {
6706 struct mac_configuration_cmd *config =
6707 bnx2x_sp(bp, mcast_config);
6708
6709 bnx2x_set_mac_addr_e1(bp, 0);
6710
6711 for (i = 0; i < config->hdr.length_6b; i++)
6712 CAM_INVALIDATE(config->config_table[i]);
6713
6714 config->hdr.length_6b = i;
6715 if (CHIP_REV_IS_SLOW(bp))
6716 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6717 else
6718 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6719 config->hdr.client_id = BP_CL_ID(bp);
6720 config->hdr.reserved1 = 0;
6721
6722 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6723 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6724 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6725
6726 } else { /* E1H */
65abd74d
YG
6727 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6728
3101c2bc
YG
6729 bnx2x_set_mac_addr_e1h(bp, 0);
6730
6731 for (i = 0; i < MC_HASH_SIZE; i++)
6732 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6733 }
6734
65abd74d
YG
6735 if (unload_mode == UNLOAD_NORMAL)
6736 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6737
6738 else if (bp->flags & NO_WOL_FLAG) {
6739 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6740 if (CHIP_IS_E1H(bp))
6741 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6742
6743 } else if (bp->wol) {
6744 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6745 u8 *mac_addr = bp->dev->dev_addr;
6746 u32 val;
6747 /* The mac address is written to entries 1-4 to
6748 preserve entry 0 which is used by the PMF */
6749 u8 entry = (BP_E1HVN(bp) + 1)*8;
6750
6751 val = (mac_addr[0] << 8) | mac_addr[1];
6752 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6753
6754 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6755 (mac_addr[4] << 8) | mac_addr[5];
6756 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6757
6758 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6759
6760 } else
6761 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6762
34f80b04
EG
6763 /* Close multi and leading connections
6764 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6765 for_each_nondefault_queue(bp, i)
6766 if (bnx2x_stop_multi(bp, i))
228241eb 6767 goto unload_error;
a2fbb9ea 6768
da5a662a
VZ
6769 rc = bnx2x_stop_leading(bp);
6770 if (rc) {
34f80b04 6771 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6772#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6773 return -EBUSY;
da5a662a
VZ
6774#else
6775 goto unload_error;
34f80b04 6776#endif
228241eb
ET
6777 }
6778
6779unload_error:
34f80b04 6780 if (!BP_NOMCP(bp))
228241eb 6781 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6782 else {
6783 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6784 load_count[0], load_count[1], load_count[2]);
6785 load_count[0]--;
da5a662a 6786 load_count[1 + port]--;
34f80b04
EG
6787 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6788 load_count[0], load_count[1], load_count[2]);
6789 if (load_count[0] == 0)
6790 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6791 else if (load_count[1 + port] == 0)
34f80b04
EG
6792 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6793 else
6794 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6795 }
a2fbb9ea 6796
34f80b04
EG
6797 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6798 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6799 bnx2x__link_reset(bp);
a2fbb9ea
ET
6800
6801 /* Reset the chip */
228241eb 6802 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6803
6804 /* Report UNLOAD_DONE to MCP */
34f80b04 6805 if (!BP_NOMCP(bp))
a2fbb9ea 6806 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6807 bp->port.pmf = 0;
a2fbb9ea 6808
7a9b2557 6809 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6810 bnx2x_free_skbs(bp);
7a9b2557 6811 for_each_queue(bp, i)
3196a88a 6812 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
a2fbb9ea
ET
6813 bnx2x_free_mem(bp);
6814
6815 bp->state = BNX2X_STATE_CLOSED;
228241eb 6816
a2fbb9ea
ET
6817 netif_carrier_off(bp->dev);
6818
6819 return 0;
6820}
6821
34f80b04
EG
6822static void bnx2x_reset_task(struct work_struct *work)
6823{
6824 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6825
6826#ifdef BNX2X_STOP_ON_ERROR
6827 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6828 " so reset not done to allow debug dump,\n"
6829 KERN_ERR " you will need to reboot when done\n");
6830 return;
6831#endif
6832
6833 rtnl_lock();
6834
6835 if (!netif_running(bp->dev))
6836 goto reset_task_exit;
6837
6838 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6839 bnx2x_nic_load(bp, LOAD_NORMAL);
6840
6841reset_task_exit:
6842 rtnl_unlock();
6843}
6844
a2fbb9ea
ET
6845/* end of nic load/unload */
6846
6847/* ethtool_ops */
6848
6849/*
6850 * Init service functions
6851 */
6852
34f80b04
EG
6853static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6854{
6855 u32 val;
6856
6857 /* Check if there is any driver already loaded */
6858 val = REG_RD(bp, MISC_REG_UNPREPARED);
6859 if (val == 0x1) {
6860 /* Check if it is the UNDI driver
6861 * UNDI driver initializes CID offset for normal bell to 0x7
6862 */
4a37fb66 6863 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04 6864 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
76b190c5
EG
6865 if (val == 0x7)
6866 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6867 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6868
34f80b04
EG
6869 if (val == 0x7) {
6870 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6871 /* save our func */
34f80b04 6872 int func = BP_FUNC(bp);
da5a662a
VZ
6873 u32 swap_en;
6874 u32 swap_val;
34f80b04
EG
6875
6876 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6877
6878 /* try unload UNDI on port 0 */
6879 bp->func = 0;
da5a662a
VZ
6880 bp->fw_seq =
6881 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6882 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6883 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6884
6885 /* if UNDI is loaded on the other port */
6886 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6887
da5a662a
VZ
6888 /* send "DONE" for previous unload */
6889 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6890
6891 /* unload UNDI on port 1 */
34f80b04 6892 bp->func = 1;
da5a662a
VZ
6893 bp->fw_seq =
6894 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6895 DRV_MSG_SEQ_NUMBER_MASK);
6896 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6897
6898 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6899 }
6900
da5a662a
VZ
6901 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6902 HC_REG_CONFIG_0), 0x1000);
6903
6904 /* close input traffic and wait for it */
6905 /* Do not rcv packets to BRB */
6906 REG_WR(bp,
6907 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6908 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6909 /* Do not direct rcv packets that are not for MCP to
6910 * the BRB */
6911 REG_WR(bp,
6912 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6913 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6914 /* clear AEU */
6915 REG_WR(bp,
6916 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6917 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6918 msleep(10);
6919
6920 /* save NIG port swap info */
6921 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6922 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6923 /* reset device */
6924 REG_WR(bp,
6925 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6926 0xd3ffffff);
34f80b04
EG
6927 REG_WR(bp,
6928 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6929 0x1403);
da5a662a
VZ
6930 /* take the NIG out of reset and restore swap values */
6931 REG_WR(bp,
6932 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6933 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6934 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6935 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6936
6937 /* send unload done to the MCP */
6938 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6939
6940 /* restore our func and fw_seq */
6941 bp->func = func;
6942 bp->fw_seq =
6943 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6944 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
6945 }
6946 }
6947}
6948
6949static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6950{
6951 u32 val, val2, val3, val4, id;
72ce58c3 6952 u16 pmc;
34f80b04
EG
6953
6954 /* Get the chip revision id and number. */
6955 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6956 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6957 id = ((val & 0xffff) << 16);
6958 val = REG_RD(bp, MISC_REG_CHIP_REV);
6959 id |= ((val & 0xf) << 12);
6960 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6961 id |= ((val & 0xff) << 4);
6962 REG_RD(bp, MISC_REG_BOND_ID);
6963 id |= (val & 0xf);
6964 bp->common.chip_id = id;
6965 bp->link_params.chip_id = bp->common.chip_id;
6966 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6967
6968 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6969 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6970 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6971 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6972 bp->common.flash_size, bp->common.flash_size);
6973
6974 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6975 bp->link_params.shmem_base = bp->common.shmem_base;
6976 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6977
6978 if (!bp->common.shmem_base ||
6979 (bp->common.shmem_base < 0xA0000) ||
6980 (bp->common.shmem_base >= 0xC0000)) {
6981 BNX2X_DEV_INFO("MCP not active\n");
6982 bp->flags |= NO_MCP_FLAG;
6983 return;
6984 }
6985
6986 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6987 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6988 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6989 BNX2X_ERR("BAD MCP validity signature\n");
6990
6991 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6992 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6993
6994 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6995 bp->common.hw_config, bp->common.board);
6996
6997 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6998 SHARED_HW_CFG_LED_MODE_MASK) >>
6999 SHARED_HW_CFG_LED_MODE_SHIFT);
7000
7001 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7002 bp->common.bc_ver = val;
7003 BNX2X_DEV_INFO("bc_ver %X\n", val);
7004 if (val < BNX2X_BC_VER) {
7005 /* for now only warn
7006 * later we might need to enforce this */
7007 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7008 " please upgrade BC\n", BNX2X_BC_VER, val);
7009 }
72ce58c3
EG
7010
7011 if (BP_E1HVN(bp) == 0) {
7012 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7013 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7014 } else {
7015 /* no WOL capability for E1HVN != 0 */
7016 bp->flags |= NO_WOL_FLAG;
7017 }
7018 BNX2X_DEV_INFO("%sWoL capable\n",
7019 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7020
7021 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7022 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7023 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7024 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7025
7026 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7027 val, val2, val3, val4);
7028}
7029
7030static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7031 u32 switch_cfg)
a2fbb9ea 7032{
34f80b04 7033 int port = BP_PORT(bp);
a2fbb9ea
ET
7034 u32 ext_phy_type;
7035
a2fbb9ea
ET
7036 switch (switch_cfg) {
7037 case SWITCH_CFG_1G:
7038 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7039
c18487ee
YR
7040 ext_phy_type =
7041 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7042 switch (ext_phy_type) {
7043 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7044 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7045 ext_phy_type);
7046
34f80b04
EG
7047 bp->port.supported |= (SUPPORTED_10baseT_Half |
7048 SUPPORTED_10baseT_Full |
7049 SUPPORTED_100baseT_Half |
7050 SUPPORTED_100baseT_Full |
7051 SUPPORTED_1000baseT_Full |
7052 SUPPORTED_2500baseX_Full |
7053 SUPPORTED_TP |
7054 SUPPORTED_FIBRE |
7055 SUPPORTED_Autoneg |
7056 SUPPORTED_Pause |
7057 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7058 break;
7059
7060 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7061 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7062 ext_phy_type);
7063
34f80b04
EG
7064 bp->port.supported |= (SUPPORTED_10baseT_Half |
7065 SUPPORTED_10baseT_Full |
7066 SUPPORTED_100baseT_Half |
7067 SUPPORTED_100baseT_Full |
7068 SUPPORTED_1000baseT_Full |
7069 SUPPORTED_TP |
7070 SUPPORTED_FIBRE |
7071 SUPPORTED_Autoneg |
7072 SUPPORTED_Pause |
7073 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7074 break;
7075
7076 default:
7077 BNX2X_ERR("NVRAM config error. "
7078 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7079 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7080 return;
7081 }
7082
34f80b04
EG
7083 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7084 port*0x10);
7085 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7086 break;
7087
7088 case SWITCH_CFG_10G:
7089 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7090
c18487ee
YR
7091 ext_phy_type =
7092 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7093 switch (ext_phy_type) {
7094 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7095 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7096 ext_phy_type);
7097
34f80b04
EG
7098 bp->port.supported |= (SUPPORTED_10baseT_Half |
7099 SUPPORTED_10baseT_Full |
7100 SUPPORTED_100baseT_Half |
7101 SUPPORTED_100baseT_Full |
7102 SUPPORTED_1000baseT_Full |
7103 SUPPORTED_2500baseX_Full |
7104 SUPPORTED_10000baseT_Full |
7105 SUPPORTED_TP |
7106 SUPPORTED_FIBRE |
7107 SUPPORTED_Autoneg |
7108 SUPPORTED_Pause |
7109 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7110 break;
7111
7112 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7113 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7114 ext_phy_type);
f1410647 7115
34f80b04
EG
7116 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7117 SUPPORTED_FIBRE |
7118 SUPPORTED_Pause |
7119 SUPPORTED_Asym_Pause);
f1410647
ET
7120 break;
7121
a2fbb9ea 7122 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7123 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7124 ext_phy_type);
7125
34f80b04
EG
7126 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7127 SUPPORTED_1000baseT_Full |
7128 SUPPORTED_FIBRE |
7129 SUPPORTED_Pause |
7130 SUPPORTED_Asym_Pause);
f1410647
ET
7131 break;
7132
7133 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7134 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7135 ext_phy_type);
7136
34f80b04
EG
7137 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7138 SUPPORTED_1000baseT_Full |
7139 SUPPORTED_FIBRE |
7140 SUPPORTED_Autoneg |
7141 SUPPORTED_Pause |
7142 SUPPORTED_Asym_Pause);
f1410647
ET
7143 break;
7144
c18487ee
YR
7145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7146 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7147 ext_phy_type);
7148
34f80b04
EG
7149 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7150 SUPPORTED_2500baseX_Full |
7151 SUPPORTED_1000baseT_Full |
7152 SUPPORTED_FIBRE |
7153 SUPPORTED_Autoneg |
7154 SUPPORTED_Pause |
7155 SUPPORTED_Asym_Pause);
c18487ee
YR
7156 break;
7157
f1410647
ET
7158 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7159 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7160 ext_phy_type);
7161
34f80b04
EG
7162 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7163 SUPPORTED_TP |
7164 SUPPORTED_Autoneg |
7165 SUPPORTED_Pause |
7166 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7167 break;
7168
c18487ee
YR
7169 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7170 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7171 bp->link_params.ext_phy_config);
7172 break;
7173
a2fbb9ea
ET
7174 default:
7175 BNX2X_ERR("NVRAM config error. "
7176 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7177 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7178 return;
7179 }
7180
34f80b04
EG
7181 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7182 port*0x18);
7183 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7184
a2fbb9ea
ET
7185 break;
7186
7187 default:
7188 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7189 bp->port.link_config);
a2fbb9ea
ET
7190 return;
7191 }
34f80b04 7192 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7193
7194 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7195 if (!(bp->link_params.speed_cap_mask &
7196 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7197 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7198
c18487ee
YR
7199 if (!(bp->link_params.speed_cap_mask &
7200 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7201 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7202
c18487ee
YR
7203 if (!(bp->link_params.speed_cap_mask &
7204 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7205 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7206
c18487ee
YR
7207 if (!(bp->link_params.speed_cap_mask &
7208 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7209 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7210
c18487ee
YR
7211 if (!(bp->link_params.speed_cap_mask &
7212 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7213 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7214 SUPPORTED_1000baseT_Full);
a2fbb9ea 7215
c18487ee
YR
7216 if (!(bp->link_params.speed_cap_mask &
7217 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7218 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7219
c18487ee
YR
7220 if (!(bp->link_params.speed_cap_mask &
7221 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7222 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7223
34f80b04 7224 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7225}
7226
34f80b04 7227static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7228{
c18487ee 7229 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7230
34f80b04 7231 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7232 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7233 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7234 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7235 bp->port.advertising = bp->port.supported;
a2fbb9ea 7236 } else {
c18487ee
YR
7237 u32 ext_phy_type =
7238 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7239
7240 if ((ext_phy_type ==
7241 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7242 (ext_phy_type ==
7243 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7244 /* force 10G, no AN */
c18487ee 7245 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7246 bp->port.advertising =
a2fbb9ea
ET
7247 (ADVERTISED_10000baseT_Full |
7248 ADVERTISED_FIBRE);
7249 break;
7250 }
7251 BNX2X_ERR("NVRAM config error. "
7252 "Invalid link_config 0x%x"
7253 " Autoneg not supported\n",
34f80b04 7254 bp->port.link_config);
a2fbb9ea
ET
7255 return;
7256 }
7257 break;
7258
7259 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7260 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7261 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7262 bp->port.advertising = (ADVERTISED_10baseT_Full |
7263 ADVERTISED_TP);
a2fbb9ea
ET
7264 } else {
7265 BNX2X_ERR("NVRAM config error. "
7266 "Invalid link_config 0x%x"
7267 " speed_cap_mask 0x%x\n",
34f80b04 7268 bp->port.link_config,
c18487ee 7269 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7270 return;
7271 }
7272 break;
7273
7274 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7275 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7276 bp->link_params.req_line_speed = SPEED_10;
7277 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7278 bp->port.advertising = (ADVERTISED_10baseT_Half |
7279 ADVERTISED_TP);
a2fbb9ea
ET
7280 } else {
7281 BNX2X_ERR("NVRAM config error. "
7282 "Invalid link_config 0x%x"
7283 " speed_cap_mask 0x%x\n",
34f80b04 7284 bp->port.link_config,
c18487ee 7285 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7286 return;
7287 }
7288 break;
7289
7290 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7291 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7292 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7293 bp->port.advertising = (ADVERTISED_100baseT_Full |
7294 ADVERTISED_TP);
a2fbb9ea
ET
7295 } else {
7296 BNX2X_ERR("NVRAM config error. "
7297 "Invalid link_config 0x%x"
7298 " speed_cap_mask 0x%x\n",
34f80b04 7299 bp->port.link_config,
c18487ee 7300 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7301 return;
7302 }
7303 break;
7304
7305 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7306 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7307 bp->link_params.req_line_speed = SPEED_100;
7308 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7309 bp->port.advertising = (ADVERTISED_100baseT_Half |
7310 ADVERTISED_TP);
a2fbb9ea
ET
7311 } else {
7312 BNX2X_ERR("NVRAM config error. "
7313 "Invalid link_config 0x%x"
7314 " speed_cap_mask 0x%x\n",
34f80b04 7315 bp->port.link_config,
c18487ee 7316 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7317 return;
7318 }
7319 break;
7320
7321 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7322 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7323 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7324 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7325 ADVERTISED_TP);
a2fbb9ea
ET
7326 } else {
7327 BNX2X_ERR("NVRAM config error. "
7328 "Invalid link_config 0x%x"
7329 " speed_cap_mask 0x%x\n",
34f80b04 7330 bp->port.link_config,
c18487ee 7331 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7332 return;
7333 }
7334 break;
7335
7336 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7337 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7338 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7339 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7340 ADVERTISED_TP);
a2fbb9ea
ET
7341 } else {
7342 BNX2X_ERR("NVRAM config error. "
7343 "Invalid link_config 0x%x"
7344 " speed_cap_mask 0x%x\n",
34f80b04 7345 bp->port.link_config,
c18487ee 7346 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7347 return;
7348 }
7349 break;
7350
7351 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7352 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7353 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7354 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7355 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7356 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7357 ADVERTISED_FIBRE);
a2fbb9ea
ET
7358 } else {
7359 BNX2X_ERR("NVRAM config error. "
7360 "Invalid link_config 0x%x"
7361 " speed_cap_mask 0x%x\n",
34f80b04 7362 bp->port.link_config,
c18487ee 7363 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7364 return;
7365 }
7366 break;
7367
7368 default:
7369 BNX2X_ERR("NVRAM config error. "
7370 "BAD link speed link_config 0x%x\n",
34f80b04 7371 bp->port.link_config);
c18487ee 7372 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7373 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7374 break;
7375 }
a2fbb9ea 7376
34f80b04
EG
7377 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7378 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7379 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7380 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7381 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7382
c18487ee 7383 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7384 " advertising 0x%x\n",
c18487ee
YR
7385 bp->link_params.req_line_speed,
7386 bp->link_params.req_duplex,
34f80b04 7387 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7388}
7389
34f80b04 7390static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7391{
34f80b04
EG
7392 int port = BP_PORT(bp);
7393 u32 val, val2;
a2fbb9ea 7394
c18487ee 7395 bp->link_params.bp = bp;
34f80b04 7396 bp->link_params.port = port;
c18487ee 7397
c18487ee 7398 bp->link_params.serdes_config =
f1410647 7399 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7400 bp->link_params.lane_config =
a2fbb9ea 7401 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7402 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7403 SHMEM_RD(bp,
7404 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7405 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7406 SHMEM_RD(bp,
7407 dev_info.port_hw_config[port].speed_capability_mask);
7408
34f80b04 7409 bp->port.link_config =
a2fbb9ea
ET
7410 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7411
34f80b04
EG
7412 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7413 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7414 " link_config 0x%08x\n",
c18487ee
YR
7415 bp->link_params.serdes_config,
7416 bp->link_params.lane_config,
7417 bp->link_params.ext_phy_config,
34f80b04 7418 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7419
34f80b04 7420 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7421 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7422 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7423
7424 bnx2x_link_settings_requested(bp);
7425
7426 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7427 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7428 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7429 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7430 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7431 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7432 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7433 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7434 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7435 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7436}
7437
7438static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7439{
7440 int func = BP_FUNC(bp);
7441 u32 val, val2;
7442 int rc = 0;
a2fbb9ea 7443
34f80b04 7444 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7445
34f80b04
EG
7446 bp->e1hov = 0;
7447 bp->e1hmf = 0;
7448 if (CHIP_IS_E1H(bp)) {
7449 bp->mf_config =
7450 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7451
3196a88a
EG
7452 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7453 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7454 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7455
34f80b04
EG
7456 bp->e1hov = val;
7457 bp->e1hmf = 1;
7458 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7459 "(0x%04x)\n",
7460 func, bp->e1hov, bp->e1hov);
7461 } else {
7462 BNX2X_DEV_INFO("Single function mode\n");
7463 if (BP_E1HVN(bp)) {
7464 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7465 " aborting\n", func);
7466 rc = -EPERM;
7467 }
7468 }
7469 }
a2fbb9ea 7470
34f80b04
EG
7471 if (!BP_NOMCP(bp)) {
7472 bnx2x_get_port_hwinfo(bp);
7473
7474 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7475 DRV_MSG_SEQ_NUMBER_MASK);
7476 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7477 }
7478
7479 if (IS_E1HMF(bp)) {
7480 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7481 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7482 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7483 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7484 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7485 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7486 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7487 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7488 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7489 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7490 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7491 ETH_ALEN);
7492 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7493 ETH_ALEN);
a2fbb9ea 7494 }
34f80b04
EG
7495
7496 return rc;
a2fbb9ea
ET
7497 }
7498
34f80b04
EG
7499 if (BP_NOMCP(bp)) {
7500 /* only supposed to happen on emulation/FPGA */
33471629 7501 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7502 random_ether_addr(bp->dev->dev_addr);
7503 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7504 }
a2fbb9ea 7505
34f80b04
EG
7506 return rc;
7507}
7508
7509static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7510{
7511 int func = BP_FUNC(bp);
7512 int rc;
7513
da5a662a
VZ
7514 /* Disable interrupt handling until HW is initialized */
7515 atomic_set(&bp->intr_sem, 1);
7516
34f80b04 7517 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7518
1cf167f2 7519 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7520 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7521
7522 rc = bnx2x_get_hwinfo(bp);
7523
7524 /* need to reset chip if undi was active */
7525 if (!BP_NOMCP(bp))
7526 bnx2x_undi_unload(bp);
7527
7528 if (CHIP_REV_IS_FPGA(bp))
7529 printk(KERN_ERR PFX "FPGA detected\n");
7530
7531 if (BP_NOMCP(bp) && (func == 0))
7532 printk(KERN_ERR PFX
7533 "MCP disabled, must load devices in order!\n");
7534
7a9b2557
VZ
7535 /* Set TPA flags */
7536 if (disable_tpa) {
7537 bp->flags &= ~TPA_ENABLE_FLAG;
7538 bp->dev->features &= ~NETIF_F_LRO;
7539 } else {
7540 bp->flags |= TPA_ENABLE_FLAG;
7541 bp->dev->features |= NETIF_F_LRO;
7542 }
7543
7544
34f80b04
EG
7545 bp->tx_ring_size = MAX_TX_AVAIL;
7546 bp->rx_ring_size = MAX_RX_AVAIL;
7547
7548 bp->rx_csum = 1;
7549 bp->rx_offset = 0;
7550
7551 bp->tx_ticks = 50;
7552 bp->rx_ticks = 25;
7553
34f80b04
EG
7554 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7555 bp->current_interval = (poll ? poll : bp->timer_interval);
7556
7557 init_timer(&bp->timer);
7558 bp->timer.expires = jiffies + bp->current_interval;
7559 bp->timer.data = (unsigned long) bp;
7560 bp->timer.function = bnx2x_timer;
7561
7562 return rc;
a2fbb9ea
ET
7563}
7564
7565/*
7566 * ethtool service functions
7567 */
7568
7569/* All ethtool functions called with rtnl_lock */
7570
7571static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7572{
7573 struct bnx2x *bp = netdev_priv(dev);
7574
34f80b04
EG
7575 cmd->supported = bp->port.supported;
7576 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7577
7578 if (netif_carrier_ok(dev)) {
c18487ee
YR
7579 cmd->speed = bp->link_vars.line_speed;
7580 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7581 } else {
c18487ee
YR
7582 cmd->speed = bp->link_params.req_line_speed;
7583 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7584 }
34f80b04
EG
7585 if (IS_E1HMF(bp)) {
7586 u16 vn_max_rate;
7587
7588 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7589 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7590 if (vn_max_rate < cmd->speed)
7591 cmd->speed = vn_max_rate;
7592 }
a2fbb9ea 7593
c18487ee
YR
7594 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7595 u32 ext_phy_type =
7596 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7597
7598 switch (ext_phy_type) {
7599 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7600 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7602 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7603 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7604 cmd->port = PORT_FIBRE;
7605 break;
7606
7607 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7608 cmd->port = PORT_TP;
7609 break;
7610
c18487ee
YR
7611 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7612 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7613 bp->link_params.ext_phy_config);
7614 break;
7615
f1410647
ET
7616 default:
7617 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7618 bp->link_params.ext_phy_config);
7619 break;
f1410647
ET
7620 }
7621 } else
a2fbb9ea 7622 cmd->port = PORT_TP;
a2fbb9ea 7623
34f80b04 7624 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7625 cmd->transceiver = XCVR_INTERNAL;
7626
c18487ee 7627 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7628 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7629 else
a2fbb9ea 7630 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7631
7632 cmd->maxtxpkt = 0;
7633 cmd->maxrxpkt = 0;
7634
7635 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7636 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7637 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7638 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7639 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7640 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7641 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7642
7643 return 0;
7644}
7645
7646static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7647{
7648 struct bnx2x *bp = netdev_priv(dev);
7649 u32 advertising;
7650
34f80b04
EG
7651 if (IS_E1HMF(bp))
7652 return 0;
7653
a2fbb9ea
ET
7654 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7655 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7656 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7657 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7658 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7659 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7660 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7661
a2fbb9ea 7662 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7663 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7664 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7665 return -EINVAL;
f1410647 7666 }
a2fbb9ea
ET
7667
7668 /* advertise the requested speed and duplex if supported */
34f80b04 7669 cmd->advertising &= bp->port.supported;
a2fbb9ea 7670
c18487ee
YR
7671 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7672 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7673 bp->port.advertising |= (ADVERTISED_Autoneg |
7674 cmd->advertising);
a2fbb9ea
ET
7675
7676 } else { /* forced speed */
7677 /* advertise the requested speed and duplex if supported */
7678 switch (cmd->speed) {
7679 case SPEED_10:
7680 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7681 if (!(bp->port.supported &
f1410647
ET
7682 SUPPORTED_10baseT_Full)) {
7683 DP(NETIF_MSG_LINK,
7684 "10M full not supported\n");
a2fbb9ea 7685 return -EINVAL;
f1410647 7686 }
a2fbb9ea
ET
7687
7688 advertising = (ADVERTISED_10baseT_Full |
7689 ADVERTISED_TP);
7690 } else {
34f80b04 7691 if (!(bp->port.supported &
f1410647
ET
7692 SUPPORTED_10baseT_Half)) {
7693 DP(NETIF_MSG_LINK,
7694 "10M half not supported\n");
a2fbb9ea 7695 return -EINVAL;
f1410647 7696 }
a2fbb9ea
ET
7697
7698 advertising = (ADVERTISED_10baseT_Half |
7699 ADVERTISED_TP);
7700 }
7701 break;
7702
7703 case SPEED_100:
7704 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7705 if (!(bp->port.supported &
f1410647
ET
7706 SUPPORTED_100baseT_Full)) {
7707 DP(NETIF_MSG_LINK,
7708 "100M full not supported\n");
a2fbb9ea 7709 return -EINVAL;
f1410647 7710 }
a2fbb9ea
ET
7711
7712 advertising = (ADVERTISED_100baseT_Full |
7713 ADVERTISED_TP);
7714 } else {
34f80b04 7715 if (!(bp->port.supported &
f1410647
ET
7716 SUPPORTED_100baseT_Half)) {
7717 DP(NETIF_MSG_LINK,
7718 "100M half not supported\n");
a2fbb9ea 7719 return -EINVAL;
f1410647 7720 }
a2fbb9ea
ET
7721
7722 advertising = (ADVERTISED_100baseT_Half |
7723 ADVERTISED_TP);
7724 }
7725 break;
7726
7727 case SPEED_1000:
f1410647
ET
7728 if (cmd->duplex != DUPLEX_FULL) {
7729 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7730 return -EINVAL;
f1410647 7731 }
a2fbb9ea 7732
34f80b04 7733 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7734 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7735 return -EINVAL;
f1410647 7736 }
a2fbb9ea
ET
7737
7738 advertising = (ADVERTISED_1000baseT_Full |
7739 ADVERTISED_TP);
7740 break;
7741
7742 case SPEED_2500:
f1410647
ET
7743 if (cmd->duplex != DUPLEX_FULL) {
7744 DP(NETIF_MSG_LINK,
7745 "2.5G half not supported\n");
a2fbb9ea 7746 return -EINVAL;
f1410647 7747 }
a2fbb9ea 7748
34f80b04 7749 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7750 DP(NETIF_MSG_LINK,
7751 "2.5G full not supported\n");
a2fbb9ea 7752 return -EINVAL;
f1410647 7753 }
a2fbb9ea 7754
f1410647 7755 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7756 ADVERTISED_TP);
7757 break;
7758
7759 case SPEED_10000:
f1410647
ET
7760 if (cmd->duplex != DUPLEX_FULL) {
7761 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7762 return -EINVAL;
f1410647 7763 }
a2fbb9ea 7764
34f80b04 7765 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7766 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7767 return -EINVAL;
f1410647 7768 }
a2fbb9ea
ET
7769
7770 advertising = (ADVERTISED_10000baseT_Full |
7771 ADVERTISED_FIBRE);
7772 break;
7773
7774 default:
f1410647 7775 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7776 return -EINVAL;
7777 }
7778
c18487ee
YR
7779 bp->link_params.req_line_speed = cmd->speed;
7780 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7781 bp->port.advertising = advertising;
a2fbb9ea
ET
7782 }
7783
c18487ee 7784 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7785 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7786 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7787 bp->port.advertising);
a2fbb9ea 7788
34f80b04 7789 if (netif_running(dev)) {
bb2a0f7a 7790 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7791 bnx2x_link_set(bp);
7792 }
a2fbb9ea
ET
7793
7794 return 0;
7795}
7796
c18487ee
YR
7797#define PHY_FW_VER_LEN 10
7798
a2fbb9ea
ET
7799static void bnx2x_get_drvinfo(struct net_device *dev,
7800 struct ethtool_drvinfo *info)
7801{
7802 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7803 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7804
7805 strcpy(info->driver, DRV_MODULE_NAME);
7806 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7807
7808 phy_fw_ver[0] = '\0';
34f80b04 7809 if (bp->port.pmf) {
4a37fb66 7810 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7811 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7812 (bp->state != BNX2X_STATE_CLOSED),
7813 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7814 bnx2x_release_phy_lock(bp);
34f80b04 7815 }
c18487ee 7816
f0e53a84
EG
7817 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7818 (bp->common.bc_ver & 0xff0000) >> 16,
7819 (bp->common.bc_ver & 0xff00) >> 8,
7820 (bp->common.bc_ver & 0xff),
7821 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7822 strcpy(info->bus_info, pci_name(bp->pdev));
7823 info->n_stats = BNX2X_NUM_STATS;
7824 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7825 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7826 info->regdump_len = 0;
7827}
7828
7829static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7830{
7831 struct bnx2x *bp = netdev_priv(dev);
7832
7833 if (bp->flags & NO_WOL_FLAG) {
7834 wol->supported = 0;
7835 wol->wolopts = 0;
7836 } else {
7837 wol->supported = WAKE_MAGIC;
7838 if (bp->wol)
7839 wol->wolopts = WAKE_MAGIC;
7840 else
7841 wol->wolopts = 0;
7842 }
7843 memset(&wol->sopass, 0, sizeof(wol->sopass));
7844}
7845
7846static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7847{
7848 struct bnx2x *bp = netdev_priv(dev);
7849
7850 if (wol->wolopts & ~WAKE_MAGIC)
7851 return -EINVAL;
7852
7853 if (wol->wolopts & WAKE_MAGIC) {
7854 if (bp->flags & NO_WOL_FLAG)
7855 return -EINVAL;
7856
7857 bp->wol = 1;
34f80b04 7858 } else
a2fbb9ea 7859 bp->wol = 0;
34f80b04 7860
a2fbb9ea
ET
7861 return 0;
7862}
7863
7864static u32 bnx2x_get_msglevel(struct net_device *dev)
7865{
7866 struct bnx2x *bp = netdev_priv(dev);
7867
7868 return bp->msglevel;
7869}
7870
7871static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7872{
7873 struct bnx2x *bp = netdev_priv(dev);
7874
7875 if (capable(CAP_NET_ADMIN))
7876 bp->msglevel = level;
7877}
7878
7879static int bnx2x_nway_reset(struct net_device *dev)
7880{
7881 struct bnx2x *bp = netdev_priv(dev);
7882
34f80b04
EG
7883 if (!bp->port.pmf)
7884 return 0;
a2fbb9ea 7885
34f80b04 7886 if (netif_running(dev)) {
bb2a0f7a 7887 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7888 bnx2x_link_set(bp);
7889 }
a2fbb9ea
ET
7890
7891 return 0;
7892}
7893
7894static int bnx2x_get_eeprom_len(struct net_device *dev)
7895{
7896 struct bnx2x *bp = netdev_priv(dev);
7897
34f80b04 7898 return bp->common.flash_size;
a2fbb9ea
ET
7899}
7900
7901static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7902{
34f80b04 7903 int port = BP_PORT(bp);
a2fbb9ea
ET
7904 int count, i;
7905 u32 val = 0;
7906
7907 /* adjust timeout for emulation/FPGA */
7908 count = NVRAM_TIMEOUT_COUNT;
7909 if (CHIP_REV_IS_SLOW(bp))
7910 count *= 100;
7911
7912 /* request access to nvram interface */
7913 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7914 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7915
7916 for (i = 0; i < count*10; i++) {
7917 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7918 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7919 break;
7920
7921 udelay(5);
7922 }
7923
7924 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7925 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7926 return -EBUSY;
7927 }
7928
7929 return 0;
7930}
7931
7932static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7933{
34f80b04 7934 int port = BP_PORT(bp);
a2fbb9ea
ET
7935 int count, i;
7936 u32 val = 0;
7937
7938 /* adjust timeout for emulation/FPGA */
7939 count = NVRAM_TIMEOUT_COUNT;
7940 if (CHIP_REV_IS_SLOW(bp))
7941 count *= 100;
7942
7943 /* relinquish nvram interface */
7944 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7945 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7946
7947 for (i = 0; i < count*10; i++) {
7948 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7949 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7950 break;
7951
7952 udelay(5);
7953 }
7954
7955 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7956 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7957 return -EBUSY;
7958 }
7959
7960 return 0;
7961}
7962
7963static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7964{
7965 u32 val;
7966
7967 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7968
7969 /* enable both bits, even on read */
7970 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7971 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7972 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7973}
7974
7975static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7976{
7977 u32 val;
7978
7979 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7980
7981 /* disable both bits, even after read */
7982 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7983 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7984 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7985}
7986
7987static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7988 u32 cmd_flags)
7989{
f1410647 7990 int count, i, rc;
a2fbb9ea
ET
7991 u32 val;
7992
7993 /* build the command word */
7994 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7995
7996 /* need to clear DONE bit separately */
7997 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7998
7999 /* address of the NVRAM to read from */
8000 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8001 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8002
8003 /* issue a read command */
8004 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8005
8006 /* adjust timeout for emulation/FPGA */
8007 count = NVRAM_TIMEOUT_COUNT;
8008 if (CHIP_REV_IS_SLOW(bp))
8009 count *= 100;
8010
8011 /* wait for completion */
8012 *ret_val = 0;
8013 rc = -EBUSY;
8014 for (i = 0; i < count; i++) {
8015 udelay(5);
8016 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8017
8018 if (val & MCPR_NVM_COMMAND_DONE) {
8019 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8020 /* we read nvram data in cpu order
8021 * but ethtool sees it as an array of bytes
8022 * converting to big-endian will do the work */
8023 val = cpu_to_be32(val);
8024 *ret_val = val;
8025 rc = 0;
8026 break;
8027 }
8028 }
8029
8030 return rc;
8031}
8032
8033static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8034 int buf_size)
8035{
8036 int rc;
8037 u32 cmd_flags;
8038 u32 val;
8039
8040 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8041 DP(BNX2X_MSG_NVM,
c14423fe 8042 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8043 offset, buf_size);
8044 return -EINVAL;
8045 }
8046
34f80b04
EG
8047 if (offset + buf_size > bp->common.flash_size) {
8048 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8049 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8050 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8051 return -EINVAL;
8052 }
8053
8054 /* request access to nvram interface */
8055 rc = bnx2x_acquire_nvram_lock(bp);
8056 if (rc)
8057 return rc;
8058
8059 /* enable access to nvram interface */
8060 bnx2x_enable_nvram_access(bp);
8061
8062 /* read the first word(s) */
8063 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8064 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8065 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8066 memcpy(ret_buf, &val, 4);
8067
8068 /* advance to the next dword */
8069 offset += sizeof(u32);
8070 ret_buf += sizeof(u32);
8071 buf_size -= sizeof(u32);
8072 cmd_flags = 0;
8073 }
8074
8075 if (rc == 0) {
8076 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8077 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8078 memcpy(ret_buf, &val, 4);
8079 }
8080
8081 /* disable access to nvram interface */
8082 bnx2x_disable_nvram_access(bp);
8083 bnx2x_release_nvram_lock(bp);
8084
8085 return rc;
8086}
8087
8088static int bnx2x_get_eeprom(struct net_device *dev,
8089 struct ethtool_eeprom *eeprom, u8 *eebuf)
8090{
8091 struct bnx2x *bp = netdev_priv(dev);
8092 int rc;
8093
34f80b04 8094 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8095 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8096 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8097 eeprom->len, eeprom->len);
8098
8099 /* parameters already validated in ethtool_get_eeprom */
8100
8101 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8102
8103 return rc;
8104}
8105
8106static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8107 u32 cmd_flags)
8108{
f1410647 8109 int count, i, rc;
a2fbb9ea
ET
8110
8111 /* build the command word */
8112 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8113
8114 /* need to clear DONE bit separately */
8115 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8116
8117 /* write the data */
8118 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8119
8120 /* address of the NVRAM to write to */
8121 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8122 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8123
8124 /* issue the write command */
8125 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8126
8127 /* adjust timeout for emulation/FPGA */
8128 count = NVRAM_TIMEOUT_COUNT;
8129 if (CHIP_REV_IS_SLOW(bp))
8130 count *= 100;
8131
8132 /* wait for completion */
8133 rc = -EBUSY;
8134 for (i = 0; i < count; i++) {
8135 udelay(5);
8136 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8137 if (val & MCPR_NVM_COMMAND_DONE) {
8138 rc = 0;
8139 break;
8140 }
8141 }
8142
8143 return rc;
8144}
8145
f1410647 8146#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8147
8148static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8149 int buf_size)
8150{
8151 int rc;
8152 u32 cmd_flags;
8153 u32 align_offset;
8154 u32 val;
8155
34f80b04
EG
8156 if (offset + buf_size > bp->common.flash_size) {
8157 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8158 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8159 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8160 return -EINVAL;
8161 }
8162
8163 /* request access to nvram interface */
8164 rc = bnx2x_acquire_nvram_lock(bp);
8165 if (rc)
8166 return rc;
8167
8168 /* enable access to nvram interface */
8169 bnx2x_enable_nvram_access(bp);
8170
8171 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8172 align_offset = (offset & ~0x03);
8173 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8174
8175 if (rc == 0) {
8176 val &= ~(0xff << BYTE_OFFSET(offset));
8177 val |= (*data_buf << BYTE_OFFSET(offset));
8178
8179 /* nvram data is returned as an array of bytes
8180 * convert it back to cpu order */
8181 val = be32_to_cpu(val);
8182
a2fbb9ea
ET
8183 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8184 cmd_flags);
8185 }
8186
8187 /* disable access to nvram interface */
8188 bnx2x_disable_nvram_access(bp);
8189 bnx2x_release_nvram_lock(bp);
8190
8191 return rc;
8192}
8193
8194static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8195 int buf_size)
8196{
8197 int rc;
8198 u32 cmd_flags;
8199 u32 val;
8200 u32 written_so_far;
8201
34f80b04 8202 if (buf_size == 1) /* ethtool */
a2fbb9ea 8203 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8204
8205 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8206 DP(BNX2X_MSG_NVM,
c14423fe 8207 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8208 offset, buf_size);
8209 return -EINVAL;
8210 }
8211
34f80b04
EG
8212 if (offset + buf_size > bp->common.flash_size) {
8213 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8214 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8215 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8216 return -EINVAL;
8217 }
8218
8219 /* request access to nvram interface */
8220 rc = bnx2x_acquire_nvram_lock(bp);
8221 if (rc)
8222 return rc;
8223
8224 /* enable access to nvram interface */
8225 bnx2x_enable_nvram_access(bp);
8226
8227 written_so_far = 0;
8228 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8229 while ((written_so_far < buf_size) && (rc == 0)) {
8230 if (written_so_far == (buf_size - sizeof(u32)))
8231 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8232 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8233 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8234 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8235 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8236
8237 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8238
8239 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8240
8241 /* advance to the next dword */
8242 offset += sizeof(u32);
8243 data_buf += sizeof(u32);
8244 written_so_far += sizeof(u32);
8245 cmd_flags = 0;
8246 }
8247
8248 /* disable access to nvram interface */
8249 bnx2x_disable_nvram_access(bp);
8250 bnx2x_release_nvram_lock(bp);
8251
8252 return rc;
8253}
8254
8255static int bnx2x_set_eeprom(struct net_device *dev,
8256 struct ethtool_eeprom *eeprom, u8 *eebuf)
8257{
8258 struct bnx2x *bp = netdev_priv(dev);
8259 int rc;
8260
9f4c9583
EG
8261 if (!netif_running(dev))
8262 return -EAGAIN;
8263
34f80b04 8264 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8265 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8266 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8267 eeprom->len, eeprom->len);
8268
8269 /* parameters already validated in ethtool_set_eeprom */
8270
c18487ee 8271 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8272 if (eeprom->magic == 0x00504859)
8273 if (bp->port.pmf) {
8274
4a37fb66 8275 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8276 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8277 bp->link_params.ext_phy_config,
8278 (bp->state != BNX2X_STATE_CLOSED),
8279 eebuf, eeprom->len);
bb2a0f7a
YG
8280 if ((bp->state == BNX2X_STATE_OPEN) ||
8281 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8282 rc |= bnx2x_link_reset(&bp->link_params,
8283 &bp->link_vars);
8284 rc |= bnx2x_phy_init(&bp->link_params,
8285 &bp->link_vars);
bb2a0f7a 8286 }
4a37fb66 8287 bnx2x_release_phy_lock(bp);
34f80b04
EG
8288
8289 } else /* Only the PMF can access the PHY */
8290 return -EINVAL;
8291 else
c18487ee 8292 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8293
8294 return rc;
8295}
8296
8297static int bnx2x_get_coalesce(struct net_device *dev,
8298 struct ethtool_coalesce *coal)
8299{
8300 struct bnx2x *bp = netdev_priv(dev);
8301
8302 memset(coal, 0, sizeof(struct ethtool_coalesce));
8303
8304 coal->rx_coalesce_usecs = bp->rx_ticks;
8305 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8306
8307 return 0;
8308}
8309
8310static int bnx2x_set_coalesce(struct net_device *dev,
8311 struct ethtool_coalesce *coal)
8312{
8313 struct bnx2x *bp = netdev_priv(dev);
8314
8315 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8316 if (bp->rx_ticks > 3000)
8317 bp->rx_ticks = 3000;
8318
8319 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8320 if (bp->tx_ticks > 0x3000)
8321 bp->tx_ticks = 0x3000;
8322
34f80b04 8323 if (netif_running(dev))
a2fbb9ea
ET
8324 bnx2x_update_coalesce(bp);
8325
8326 return 0;
8327}
8328
8329static void bnx2x_get_ringparam(struct net_device *dev,
8330 struct ethtool_ringparam *ering)
8331{
8332 struct bnx2x *bp = netdev_priv(dev);
8333
8334 ering->rx_max_pending = MAX_RX_AVAIL;
8335 ering->rx_mini_max_pending = 0;
8336 ering->rx_jumbo_max_pending = 0;
8337
8338 ering->rx_pending = bp->rx_ring_size;
8339 ering->rx_mini_pending = 0;
8340 ering->rx_jumbo_pending = 0;
8341
8342 ering->tx_max_pending = MAX_TX_AVAIL;
8343 ering->tx_pending = bp->tx_ring_size;
8344}
8345
8346static int bnx2x_set_ringparam(struct net_device *dev,
8347 struct ethtool_ringparam *ering)
8348{
8349 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8350 int rc = 0;
a2fbb9ea
ET
8351
8352 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8353 (ering->tx_pending > MAX_TX_AVAIL) ||
8354 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8355 return -EINVAL;
8356
8357 bp->rx_ring_size = ering->rx_pending;
8358 bp->tx_ring_size = ering->tx_pending;
8359
34f80b04
EG
8360 if (netif_running(dev)) {
8361 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8362 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8363 }
8364
34f80b04 8365 return rc;
a2fbb9ea
ET
8366}
8367
8368static void bnx2x_get_pauseparam(struct net_device *dev,
8369 struct ethtool_pauseparam *epause)
8370{
8371 struct bnx2x *bp = netdev_priv(dev);
8372
c0700f90 8373 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8374 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8375
c0700f90
DM
8376 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8377 BNX2X_FLOW_CTRL_RX);
8378 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8379 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8380
8381 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8382 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8383 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8384}
8385
8386static int bnx2x_set_pauseparam(struct net_device *dev,
8387 struct ethtool_pauseparam *epause)
8388{
8389 struct bnx2x *bp = netdev_priv(dev);
8390
34f80b04
EG
8391 if (IS_E1HMF(bp))
8392 return 0;
8393
a2fbb9ea
ET
8394 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8395 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8396 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8397
c0700f90 8398 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8399
f1410647 8400 if (epause->rx_pause)
c0700f90 8401 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8402
f1410647 8403 if (epause->tx_pause)
c0700f90 8404 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8405
c0700f90
DM
8406 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8407 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8408
c18487ee 8409 if (epause->autoneg) {
34f80b04 8410 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8411 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8412 return -EINVAL;
8413 }
a2fbb9ea 8414
c18487ee 8415 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8416 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8417 }
a2fbb9ea 8418
c18487ee
YR
8419 DP(NETIF_MSG_LINK,
8420 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8421
8422 if (netif_running(dev)) {
bb2a0f7a 8423 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8424 bnx2x_link_set(bp);
8425 }
a2fbb9ea
ET
8426
8427 return 0;
8428}
8429
df0f2343
VZ
8430static int bnx2x_set_flags(struct net_device *dev, u32 data)
8431{
8432 struct bnx2x *bp = netdev_priv(dev);
8433 int changed = 0;
8434 int rc = 0;
8435
8436 /* TPA requires Rx CSUM offloading */
8437 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8438 if (!(dev->features & NETIF_F_LRO)) {
8439 dev->features |= NETIF_F_LRO;
8440 bp->flags |= TPA_ENABLE_FLAG;
8441 changed = 1;
8442 }
8443
8444 } else if (dev->features & NETIF_F_LRO) {
8445 dev->features &= ~NETIF_F_LRO;
8446 bp->flags &= ~TPA_ENABLE_FLAG;
8447 changed = 1;
8448 }
8449
8450 if (changed && netif_running(dev)) {
8451 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8452 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8453 }
8454
8455 return rc;
8456}
8457
a2fbb9ea
ET
8458static u32 bnx2x_get_rx_csum(struct net_device *dev)
8459{
8460 struct bnx2x *bp = netdev_priv(dev);
8461
8462 return bp->rx_csum;
8463}
8464
8465static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8466{
8467 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8468 int rc = 0;
a2fbb9ea
ET
8469
8470 bp->rx_csum = data;
df0f2343
VZ
8471
8472 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8473 TPA'ed packets will be discarded due to wrong TCP CSUM */
8474 if (!data) {
8475 u32 flags = ethtool_op_get_flags(dev);
8476
8477 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8478 }
8479
8480 return rc;
a2fbb9ea
ET
8481}
8482
8483static int bnx2x_set_tso(struct net_device *dev, u32 data)
8484{
755735eb 8485 if (data) {
a2fbb9ea 8486 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8487 dev->features |= NETIF_F_TSO6;
8488 } else {
a2fbb9ea 8489 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8490 dev->features &= ~NETIF_F_TSO6;
8491 }
8492
a2fbb9ea
ET
8493 return 0;
8494}
8495
f3c87cdd 8496static const struct {
a2fbb9ea
ET
8497 char string[ETH_GSTRING_LEN];
8498} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8499 { "register_test (offline)" },
8500 { "memory_test (offline)" },
8501 { "loopback_test (offline)" },
8502 { "nvram_test (online)" },
8503 { "interrupt_test (online)" },
8504 { "link_test (online)" },
8505 { "idle check (online)" },
8506 { "MC errors (online)" }
a2fbb9ea
ET
8507};
8508
8509static int bnx2x_self_test_count(struct net_device *dev)
8510{
8511 return BNX2X_NUM_TESTS;
8512}
8513
f3c87cdd
YG
8514static int bnx2x_test_registers(struct bnx2x *bp)
8515{
8516 int idx, i, rc = -ENODEV;
8517 u32 wr_val = 0;
9dabc424 8518 int port = BP_PORT(bp);
f3c87cdd
YG
8519 static const struct {
8520 u32 offset0;
8521 u32 offset1;
8522 u32 mask;
8523 } reg_tbl[] = {
8524/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8525 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8526 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8527 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8528 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8529 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8530 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8531 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8532 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8533 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8534/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8535 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8536 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8537 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8538 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8539 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8540 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8541 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8542 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8543 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8544/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8545 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8546 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8547 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8548 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8549 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8550 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8551 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8552 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8553 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8554/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8555 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8556 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8557 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8558 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8559 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8560 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8561 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8562
8563 { 0xffffffff, 0, 0x00000000 }
8564 };
8565
8566 if (!netif_running(bp->dev))
8567 return rc;
8568
8569 /* Repeat the test twice:
8570 First by writing 0x00000000, second by writing 0xffffffff */
8571 for (idx = 0; idx < 2; idx++) {
8572
8573 switch (idx) {
8574 case 0:
8575 wr_val = 0;
8576 break;
8577 case 1:
8578 wr_val = 0xffffffff;
8579 break;
8580 }
8581
8582 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8583 u32 offset, mask, save_val, val;
f3c87cdd
YG
8584
8585 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8586 mask = reg_tbl[i].mask;
8587
8588 save_val = REG_RD(bp, offset);
8589
8590 REG_WR(bp, offset, wr_val);
8591 val = REG_RD(bp, offset);
8592
8593 /* Restore the original register's value */
8594 REG_WR(bp, offset, save_val);
8595
8596 /* verify that value is as expected value */
8597 if ((val & mask) != (wr_val & mask))
8598 goto test_reg_exit;
8599 }
8600 }
8601
8602 rc = 0;
8603
8604test_reg_exit:
8605 return rc;
8606}
8607
8608static int bnx2x_test_memory(struct bnx2x *bp)
8609{
8610 int i, j, rc = -ENODEV;
8611 u32 val;
8612 static const struct {
8613 u32 offset;
8614 int size;
8615 } mem_tbl[] = {
8616 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8617 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8618 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8619 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8620 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8621 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8622 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8623
8624 { 0xffffffff, 0 }
8625 };
8626 static const struct {
8627 char *name;
8628 u32 offset;
9dabc424
YG
8629 u32 e1_mask;
8630 u32 e1h_mask;
f3c87cdd 8631 } prty_tbl[] = {
9dabc424
YG
8632 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8633 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8634 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8635 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8636 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8637 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8638
8639 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8640 };
8641
8642 if (!netif_running(bp->dev))
8643 return rc;
8644
8645 /* Go through all the memories */
8646 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8647 for (j = 0; j < mem_tbl[i].size; j++)
8648 REG_RD(bp, mem_tbl[i].offset + j*4);
8649
8650 /* Check the parity status */
8651 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8652 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8653 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8654 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8655 DP(NETIF_MSG_HW,
8656 "%s is 0x%x\n", prty_tbl[i].name, val);
8657 goto test_mem_exit;
8658 }
8659 }
8660
8661 rc = 0;
8662
8663test_mem_exit:
8664 return rc;
8665}
8666
f3c87cdd
YG
8667static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8668{
8669 int cnt = 1000;
8670
8671 if (link_up)
8672 while (bnx2x_link_test(bp) && cnt--)
8673 msleep(10);
8674}
8675
8676static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8677{
8678 unsigned int pkt_size, num_pkts, i;
8679 struct sk_buff *skb;
8680 unsigned char *packet;
8681 struct bnx2x_fastpath *fp = &bp->fp[0];
8682 u16 tx_start_idx, tx_idx;
8683 u16 rx_start_idx, rx_idx;
8684 u16 pkt_prod;
8685 struct sw_tx_bd *tx_buf;
8686 struct eth_tx_bd *tx_bd;
8687 dma_addr_t mapping;
8688 union eth_rx_cqe *cqe;
8689 u8 cqe_fp_flags;
8690 struct sw_rx_bd *rx_buf;
8691 u16 len;
8692 int rc = -ENODEV;
8693
8694 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8695 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8696 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8697 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8698 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8699
8700 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8701 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8702 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8703 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8704 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8705 /* wait until link state is restored */
8706 bnx2x_wait_for_link(bp, link_up);
8707
8708 } else
8709 return -EINVAL;
8710
8711 pkt_size = 1514;
8712 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8713 if (!skb) {
8714 rc = -ENOMEM;
8715 goto test_loopback_exit;
8716 }
8717 packet = skb_put(skb, pkt_size);
8718 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8719 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8720 for (i = ETH_HLEN; i < pkt_size; i++)
8721 packet[i] = (unsigned char) (i & 0xff);
8722
8723 num_pkts = 0;
8724 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8725 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8726
8727 pkt_prod = fp->tx_pkt_prod++;
8728 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8729 tx_buf->first_bd = fp->tx_bd_prod;
8730 tx_buf->skb = skb;
8731
8732 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8733 mapping = pci_map_single(bp->pdev, skb->data,
8734 skb_headlen(skb), PCI_DMA_TODEVICE);
8735 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8736 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8737 tx_bd->nbd = cpu_to_le16(1);
8738 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8739 tx_bd->vlan = cpu_to_le16(pkt_prod);
8740 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8741 ETH_TX_BD_FLAGS_END_BD);
8742 tx_bd->general_data = ((UNICAST_ADDRESS <<
8743 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8744
58f4c4cf
EG
8745 wmb();
8746
f3c87cdd
YG
8747 fp->hw_tx_prods->bds_prod =
8748 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8749 mb(); /* FW restriction: must not reorder writing nbd and packets */
8750 fp->hw_tx_prods->packets_prod =
8751 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8752 DOORBELL(bp, FP_IDX(fp), 0);
8753
8754 mmiowb();
8755
8756 num_pkts++;
8757 fp->tx_bd_prod++;
8758 bp->dev->trans_start = jiffies;
8759
8760 udelay(100);
8761
8762 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8763 if (tx_idx != tx_start_idx + num_pkts)
8764 goto test_loopback_exit;
8765
8766 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8767 if (rx_idx != rx_start_idx + num_pkts)
8768 goto test_loopback_exit;
8769
8770 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8771 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8772 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8773 goto test_loopback_rx_exit;
8774
8775 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8776 if (len != pkt_size)
8777 goto test_loopback_rx_exit;
8778
8779 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8780 skb = rx_buf->skb;
8781 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8782 for (i = ETH_HLEN; i < pkt_size; i++)
8783 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8784 goto test_loopback_rx_exit;
8785
8786 rc = 0;
8787
8788test_loopback_rx_exit:
f3c87cdd
YG
8789
8790 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8791 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8792 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8793 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8794
8795 /* Update producers */
8796 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8797 fp->rx_sge_prod);
f3c87cdd
YG
8798
8799test_loopback_exit:
8800 bp->link_params.loopback_mode = LOOPBACK_NONE;
8801
8802 return rc;
8803}
8804
8805static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8806{
8807 int rc = 0;
8808
8809 if (!netif_running(bp->dev))
8810 return BNX2X_LOOPBACK_FAILED;
8811
f8ef6e44 8812 bnx2x_netif_stop(bp, 1);
f3c87cdd
YG
8813
8814 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8815 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8816 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8817 }
8818
8819 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8820 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8821 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8822 }
8823
8824 bnx2x_netif_start(bp);
8825
8826 return rc;
8827}
8828
8829#define CRC32_RESIDUAL 0xdebb20e3
8830
8831static int bnx2x_test_nvram(struct bnx2x *bp)
8832{
8833 static const struct {
8834 int offset;
8835 int size;
8836 } nvram_tbl[] = {
8837 { 0, 0x14 }, /* bootstrap */
8838 { 0x14, 0xec }, /* dir */
8839 { 0x100, 0x350 }, /* manuf_info */
8840 { 0x450, 0xf0 }, /* feature_info */
8841 { 0x640, 0x64 }, /* upgrade_key_info */
8842 { 0x6a4, 0x64 },
8843 { 0x708, 0x70 }, /* manuf_key_info */
8844 { 0x778, 0x70 },
8845 { 0, 0 }
8846 };
8847 u32 buf[0x350 / 4];
8848 u8 *data = (u8 *)buf;
8849 int i, rc;
8850 u32 magic, csum;
8851
8852 rc = bnx2x_nvram_read(bp, 0, data, 4);
8853 if (rc) {
8854 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8855 goto test_nvram_exit;
8856 }
8857
8858 magic = be32_to_cpu(buf[0]);
8859 if (magic != 0x669955aa) {
8860 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8861 rc = -ENODEV;
8862 goto test_nvram_exit;
8863 }
8864
8865 for (i = 0; nvram_tbl[i].size; i++) {
8866
8867 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8868 nvram_tbl[i].size);
8869 if (rc) {
8870 DP(NETIF_MSG_PROBE,
8871 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8872 goto test_nvram_exit;
8873 }
8874
8875 csum = ether_crc_le(nvram_tbl[i].size, data);
8876 if (csum != CRC32_RESIDUAL) {
8877 DP(NETIF_MSG_PROBE,
8878 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8879 rc = -ENODEV;
8880 goto test_nvram_exit;
8881 }
8882 }
8883
8884test_nvram_exit:
8885 return rc;
8886}
8887
8888static int bnx2x_test_intr(struct bnx2x *bp)
8889{
8890 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8891 int i, rc;
8892
8893 if (!netif_running(bp->dev))
8894 return -ENODEV;
8895
8896 config->hdr.length_6b = 0;
8897 config->hdr.offset = 0;
8898 config->hdr.client_id = BP_CL_ID(bp);
8899 config->hdr.reserved1 = 0;
8900
8901 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8902 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8903 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8904 if (rc == 0) {
8905 bp->set_mac_pending++;
8906 for (i = 0; i < 10; i++) {
8907 if (!bp->set_mac_pending)
8908 break;
8909 msleep_interruptible(10);
8910 }
8911 if (i == 10)
8912 rc = -ENODEV;
8913 }
8914
8915 return rc;
8916}
8917
a2fbb9ea
ET
8918static void bnx2x_self_test(struct net_device *dev,
8919 struct ethtool_test *etest, u64 *buf)
8920{
8921 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8922
8923 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8924
f3c87cdd 8925 if (!netif_running(dev))
a2fbb9ea 8926 return;
a2fbb9ea 8927
33471629 8928 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8929 if (IS_E1HMF(bp))
8930 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8931
8932 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8933 u8 link_up;
8934
8935 link_up = bp->link_vars.link_up;
8936 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8937 bnx2x_nic_load(bp, LOAD_DIAG);
8938 /* wait until link state is restored */
8939 bnx2x_wait_for_link(bp, link_up);
8940
8941 if (bnx2x_test_registers(bp) != 0) {
8942 buf[0] = 1;
8943 etest->flags |= ETH_TEST_FL_FAILED;
8944 }
8945 if (bnx2x_test_memory(bp) != 0) {
8946 buf[1] = 1;
8947 etest->flags |= ETH_TEST_FL_FAILED;
8948 }
8949 buf[2] = bnx2x_test_loopback(bp, link_up);
8950 if (buf[2] != 0)
8951 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8952
f3c87cdd
YG
8953 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8954 bnx2x_nic_load(bp, LOAD_NORMAL);
8955 /* wait until link state is restored */
8956 bnx2x_wait_for_link(bp, link_up);
8957 }
8958 if (bnx2x_test_nvram(bp) != 0) {
8959 buf[3] = 1;
a2fbb9ea
ET
8960 etest->flags |= ETH_TEST_FL_FAILED;
8961 }
f3c87cdd
YG
8962 if (bnx2x_test_intr(bp) != 0) {
8963 buf[4] = 1;
8964 etest->flags |= ETH_TEST_FL_FAILED;
8965 }
8966 if (bp->port.pmf)
8967 if (bnx2x_link_test(bp) != 0) {
8968 buf[5] = 1;
8969 etest->flags |= ETH_TEST_FL_FAILED;
8970 }
8971 buf[7] = bnx2x_mc_assert(bp);
8972 if (buf[7] != 0)
8973 etest->flags |= ETH_TEST_FL_FAILED;
8974
8975#ifdef BNX2X_EXTRA_DEBUG
8976 bnx2x_panic_dump(bp);
8977#endif
a2fbb9ea
ET
8978}
8979
bb2a0f7a
YG
8980static const struct {
8981 long offset;
8982 int size;
8983 u32 flags;
66e855f3
YG
8984#define STATS_FLAGS_PORT 1
8985#define STATS_FLAGS_FUNC 2
8986 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8987} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8988/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8989 8, STATS_FLAGS_FUNC, "rx_bytes" },
8990 { STATS_OFFSET32(error_bytes_received_hi),
8991 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8992 { STATS_OFFSET32(total_bytes_transmitted_hi),
8993 8, STATS_FLAGS_FUNC, "tx_bytes" },
8994 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8995 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8996 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8997 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8998 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8999 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 9000 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 9001 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9002 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9003 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9004 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9005 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9006/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9007 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9008 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9009 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9010 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9011 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9012 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9013 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9014 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9015 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9016 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9017 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9018 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9019 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9020 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9021 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9022 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9023 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9024 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9025 8, STATS_FLAGS_PORT, "rx_fragments" },
9026/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9027 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9028 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9029 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9030 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9031 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9032 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9033 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9034 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9035 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9036 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9037 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9038 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9039 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9040 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9041 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9042 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9043 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9044 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9045 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9046/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9047 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9048 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9049 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9050 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9051 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9052 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9053 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9054 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9055 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9056 { STATS_OFFSET32(mac_filter_discard),
9057 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9058 { STATS_OFFSET32(no_buff_discard),
9059 4, STATS_FLAGS_FUNC, "rx_discards" },
9060 { STATS_OFFSET32(xxoverflow_discard),
9061 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9062 { STATS_OFFSET32(brb_drop_hi),
9063 8, STATS_FLAGS_PORT, "brb_discard" },
9064 { STATS_OFFSET32(brb_truncate_hi),
9065 8, STATS_FLAGS_PORT, "brb_truncate" },
9066/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9067 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9068 { STATS_OFFSET32(rx_skb_alloc_failed),
9069 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9070/* 42 */{ STATS_OFFSET32(hw_csum_err),
9071 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9072};
9073
66e855f3
YG
9074#define IS_NOT_E1HMF_STAT(bp, i) \
9075 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9076
a2fbb9ea
ET
9077static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9078{
bb2a0f7a
YG
9079 struct bnx2x *bp = netdev_priv(dev);
9080 int i, j;
9081
a2fbb9ea
ET
9082 switch (stringset) {
9083 case ETH_SS_STATS:
bb2a0f7a 9084 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9085 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9086 continue;
9087 strcpy(buf + j*ETH_GSTRING_LEN,
9088 bnx2x_stats_arr[i].string);
9089 j++;
9090 }
a2fbb9ea
ET
9091 break;
9092
9093 case ETH_SS_TEST:
9094 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9095 break;
9096 }
9097}
9098
9099static int bnx2x_get_stats_count(struct net_device *dev)
9100{
bb2a0f7a
YG
9101 struct bnx2x *bp = netdev_priv(dev);
9102 int i, num_stats = 0;
9103
9104 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9105 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9106 continue;
9107 num_stats++;
9108 }
9109 return num_stats;
a2fbb9ea
ET
9110}
9111
9112static void bnx2x_get_ethtool_stats(struct net_device *dev,
9113 struct ethtool_stats *stats, u64 *buf)
9114{
9115 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9116 u32 *hw_stats = (u32 *)&bp->eth_stats;
9117 int i, j;
a2fbb9ea 9118
bb2a0f7a 9119 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9120 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9121 continue;
bb2a0f7a
YG
9122
9123 if (bnx2x_stats_arr[i].size == 0) {
9124 /* skip this counter */
9125 buf[j] = 0;
9126 j++;
a2fbb9ea
ET
9127 continue;
9128 }
bb2a0f7a 9129 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9130 /* 4-byte counter */
bb2a0f7a
YG
9131 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9132 j++;
a2fbb9ea
ET
9133 continue;
9134 }
9135 /* 8-byte counter */
bb2a0f7a
YG
9136 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9137 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9138 j++;
a2fbb9ea
ET
9139 }
9140}
9141
9142static int bnx2x_phys_id(struct net_device *dev, u32 data)
9143{
9144 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9145 int port = BP_PORT(bp);
a2fbb9ea
ET
9146 int i;
9147
34f80b04
EG
9148 if (!netif_running(dev))
9149 return 0;
9150
9151 if (!bp->port.pmf)
9152 return 0;
9153
a2fbb9ea
ET
9154 if (data == 0)
9155 data = 2;
9156
9157 for (i = 0; i < (data * 2); i++) {
c18487ee 9158 if ((i % 2) == 0)
34f80b04 9159 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9160 bp->link_params.hw_led_mode,
9161 bp->link_params.chip_id);
9162 else
34f80b04 9163 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9164 bp->link_params.hw_led_mode,
9165 bp->link_params.chip_id);
9166
a2fbb9ea
ET
9167 msleep_interruptible(500);
9168 if (signal_pending(current))
9169 break;
9170 }
9171
c18487ee 9172 if (bp->link_vars.link_up)
34f80b04 9173 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9174 bp->link_vars.line_speed,
9175 bp->link_params.hw_led_mode,
9176 bp->link_params.chip_id);
a2fbb9ea
ET
9177
9178 return 0;
9179}
9180
9181static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9182 .get_settings = bnx2x_get_settings,
9183 .set_settings = bnx2x_set_settings,
9184 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9185 .get_wol = bnx2x_get_wol,
9186 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9187 .get_msglevel = bnx2x_get_msglevel,
9188 .set_msglevel = bnx2x_set_msglevel,
9189 .nway_reset = bnx2x_nway_reset,
9190 .get_link = ethtool_op_get_link,
9191 .get_eeprom_len = bnx2x_get_eeprom_len,
9192 .get_eeprom = bnx2x_get_eeprom,
9193 .set_eeprom = bnx2x_set_eeprom,
9194 .get_coalesce = bnx2x_get_coalesce,
9195 .set_coalesce = bnx2x_set_coalesce,
9196 .get_ringparam = bnx2x_get_ringparam,
9197 .set_ringparam = bnx2x_set_ringparam,
9198 .get_pauseparam = bnx2x_get_pauseparam,
9199 .set_pauseparam = bnx2x_set_pauseparam,
9200 .get_rx_csum = bnx2x_get_rx_csum,
9201 .set_rx_csum = bnx2x_set_rx_csum,
9202 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9203 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9204 .set_flags = bnx2x_set_flags,
9205 .get_flags = ethtool_op_get_flags,
9206 .get_sg = ethtool_op_get_sg,
9207 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9208 .get_tso = ethtool_op_get_tso,
9209 .set_tso = bnx2x_set_tso,
9210 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9211 .self_test = bnx2x_self_test,
9212 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9213 .phys_id = bnx2x_phys_id,
9214 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9215 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9216};
9217
9218/* end of ethtool_ops */
9219
9220/****************************************************************************
9221* General service functions
9222****************************************************************************/
9223
9224static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9225{
9226 u16 pmcsr;
9227
9228 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9229
9230 switch (state) {
9231 case PCI_D0:
34f80b04 9232 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9233 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9234 PCI_PM_CTRL_PME_STATUS));
9235
9236 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9237 /* delay required during transition out of D3hot */
a2fbb9ea 9238 msleep(20);
34f80b04 9239 break;
a2fbb9ea 9240
34f80b04
EG
9241 case PCI_D3hot:
9242 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9243 pmcsr |= 3;
a2fbb9ea 9244
34f80b04
EG
9245 if (bp->wol)
9246 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9247
34f80b04
EG
9248 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9249 pmcsr);
a2fbb9ea 9250
34f80b04
EG
9251 /* No more memory access after this point until
9252 * device is brought back to D0.
9253 */
9254 break;
9255
9256 default:
9257 return -EINVAL;
9258 }
9259 return 0;
a2fbb9ea
ET
9260}
9261
34f80b04
EG
9262/*
9263 * net_device service functions
9264 */
9265
a2fbb9ea
ET
9266static int bnx2x_poll(struct napi_struct *napi, int budget)
9267{
9268 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9269 napi);
9270 struct bnx2x *bp = fp->bp;
9271 int work_done = 0;
2772f903 9272 u16 rx_cons_sb;
a2fbb9ea
ET
9273
9274#ifdef BNX2X_STOP_ON_ERROR
9275 if (unlikely(bp->panic))
34f80b04 9276 goto poll_panic;
a2fbb9ea
ET
9277#endif
9278
9279 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9280 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9281 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9282
9283 bnx2x_update_fpsb_idx(fp);
9284
da5a662a 9285 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9286 bnx2x_tx_int(fp, budget);
9287
2772f903
EG
9288 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9289 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9290 rx_cons_sb++;
da5a662a 9291 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9292 work_done = bnx2x_rx_int(fp, budget);
9293
da5a662a 9294 rmb(); /* BNX2X_HAS_WORK() reads the status block */
2772f903
EG
9295 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9296 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9297 rx_cons_sb++;
a2fbb9ea
ET
9298
9299 /* must not complete if we consumed full budget */
da5a662a 9300 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9301
9302#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9303poll_panic:
a2fbb9ea 9304#endif
908a7a16 9305 netif_rx_complete(napi);
a2fbb9ea 9306
34f80b04 9307 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9308 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9309 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9310 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9311 }
a2fbb9ea
ET
9312 return work_done;
9313}
9314
755735eb
EG
9315
9316/* we split the first BD into headers and data BDs
33471629 9317 * to ease the pain of our fellow microcode engineers
755735eb
EG
9318 * we use one mapping for both BDs
9319 * So far this has only been observed to happen
9320 * in Other Operating Systems(TM)
9321 */
9322static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9323 struct bnx2x_fastpath *fp,
9324 struct eth_tx_bd **tx_bd, u16 hlen,
9325 u16 bd_prod, int nbd)
9326{
9327 struct eth_tx_bd *h_tx_bd = *tx_bd;
9328 struct eth_tx_bd *d_tx_bd;
9329 dma_addr_t mapping;
9330 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9331
9332 /* first fix first BD */
9333 h_tx_bd->nbd = cpu_to_le16(nbd);
9334 h_tx_bd->nbytes = cpu_to_le16(hlen);
9335
9336 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9337 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9338 h_tx_bd->addr_lo, h_tx_bd->nbd);
9339
9340 /* now get a new data BD
9341 * (after the pbd) and fill it */
9342 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9343 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9344
9345 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9346 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9347
9348 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9349 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9350 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9351 d_tx_bd->vlan = 0;
9352 /* this marks the BD as one that has no individual mapping
9353 * the FW ignores this flag in a BD not marked start
9354 */
9355 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9356 DP(NETIF_MSG_TX_QUEUED,
9357 "TSO split data size is %d (%x:%x)\n",
9358 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9359
9360 /* update tx_bd for marking the last BD flag */
9361 *tx_bd = d_tx_bd;
9362
9363 return bd_prod;
9364}
9365
9366static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9367{
9368 if (fix > 0)
9369 csum = (u16) ~csum_fold(csum_sub(csum,
9370 csum_partial(t_header - fix, fix, 0)));
9371
9372 else if (fix < 0)
9373 csum = (u16) ~csum_fold(csum_add(csum,
9374 csum_partial(t_header, -fix, 0)));
9375
9376 return swab16(csum);
9377}
9378
9379static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9380{
9381 u32 rc;
9382
9383 if (skb->ip_summed != CHECKSUM_PARTIAL)
9384 rc = XMIT_PLAIN;
9385
9386 else {
9387 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9388 rc = XMIT_CSUM_V6;
9389 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9390 rc |= XMIT_CSUM_TCP;
9391
9392 } else {
9393 rc = XMIT_CSUM_V4;
9394 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9395 rc |= XMIT_CSUM_TCP;
9396 }
9397 }
9398
9399 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9400 rc |= XMIT_GSO_V4;
9401
9402 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9403 rc |= XMIT_GSO_V6;
9404
9405 return rc;
9406}
9407
9408/* check if packet requires linearization (packet is too fragmented) */
9409static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9410 u32 xmit_type)
9411{
9412 int to_copy = 0;
9413 int hlen = 0;
9414 int first_bd_sz = 0;
9415
9416 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9417 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9418
9419 if (xmit_type & XMIT_GSO) {
9420 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9421 /* Check if LSO packet needs to be copied:
9422 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9423 int wnd_size = MAX_FETCH_BD - 3;
33471629 9424 /* Number of windows to check */
755735eb
EG
9425 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9426 int wnd_idx = 0;
9427 int frag_idx = 0;
9428 u32 wnd_sum = 0;
9429
9430 /* Headers length */
9431 hlen = (int)(skb_transport_header(skb) - skb->data) +
9432 tcp_hdrlen(skb);
9433
9434 /* Amount of data (w/o headers) on linear part of SKB*/
9435 first_bd_sz = skb_headlen(skb) - hlen;
9436
9437 wnd_sum = first_bd_sz;
9438
9439 /* Calculate the first sum - it's special */
9440 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9441 wnd_sum +=
9442 skb_shinfo(skb)->frags[frag_idx].size;
9443
9444 /* If there was data on linear skb data - check it */
9445 if (first_bd_sz > 0) {
9446 if (unlikely(wnd_sum < lso_mss)) {
9447 to_copy = 1;
9448 goto exit_lbl;
9449 }
9450
9451 wnd_sum -= first_bd_sz;
9452 }
9453
9454 /* Others are easier: run through the frag list and
9455 check all windows */
9456 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9457 wnd_sum +=
9458 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9459
9460 if (unlikely(wnd_sum < lso_mss)) {
9461 to_copy = 1;
9462 break;
9463 }
9464 wnd_sum -=
9465 skb_shinfo(skb)->frags[wnd_idx].size;
9466 }
9467
9468 } else {
9469 /* in non-LSO too fragmented packet should always
9470 be linearized */
9471 to_copy = 1;
9472 }
9473 }
9474
9475exit_lbl:
9476 if (unlikely(to_copy))
9477 DP(NETIF_MSG_TX_QUEUED,
9478 "Linearization IS REQUIRED for %s packet. "
9479 "num_frags %d hlen %d first_bd_sz %d\n",
9480 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9481 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9482
9483 return to_copy;
9484}
9485
9486/* called with netif_tx_lock
a2fbb9ea 9487 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9488 * netif_wake_queue()
a2fbb9ea
ET
9489 */
9490static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9491{
9492 struct bnx2x *bp = netdev_priv(dev);
9493 struct bnx2x_fastpath *fp;
9494 struct sw_tx_bd *tx_buf;
9495 struct eth_tx_bd *tx_bd;
9496 struct eth_tx_parse_bd *pbd = NULL;
9497 u16 pkt_prod, bd_prod;
755735eb 9498 int nbd, fp_index;
a2fbb9ea 9499 dma_addr_t mapping;
755735eb
EG
9500 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9501 int vlan_off = (bp->e1hov ? 4 : 0);
9502 int i;
9503 u8 hlen = 0;
a2fbb9ea
ET
9504
9505#ifdef BNX2X_STOP_ON_ERROR
9506 if (unlikely(bp->panic))
9507 return NETDEV_TX_BUSY;
9508#endif
9509
755735eb 9510 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9511 fp = &bp->fp[fp_index];
755735eb 9512
231fd58a 9513 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9514 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9515 netif_stop_queue(dev);
9516 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9517 return NETDEV_TX_BUSY;
9518 }
9519
755735eb
EG
9520 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9521 " gso type %x xmit_type %x\n",
9522 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9523 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9524
33471629 9525 /* First, check if we need to linearize the skb
755735eb
EG
9526 (due to FW restrictions) */
9527 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9528 /* Statistics of linearization */
9529 bp->lin_cnt++;
9530 if (skb_linearize(skb) != 0) {
9531 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9532 "silently dropping this SKB\n");
9533 dev_kfree_skb_any(skb);
da5a662a 9534 return NETDEV_TX_OK;
755735eb
EG
9535 }
9536 }
9537
a2fbb9ea 9538 /*
755735eb 9539 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9540 then for TSO or xsum we have a parsing info BD,
755735eb 9541 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9542 (don't forget to mark the last one as last,
9543 and to unmap only AFTER you write to the BD ...)
755735eb 9544 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9545 */
9546
9547 pkt_prod = fp->tx_pkt_prod++;
755735eb 9548 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9549
755735eb 9550 /* get a tx_buf and first BD */
a2fbb9ea
ET
9551 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9552 tx_bd = &fp->tx_desc_ring[bd_prod];
9553
9554 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9555 tx_bd->general_data = (UNICAST_ADDRESS <<
9556 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9557 /* header nbd */
9558 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9559
755735eb
EG
9560 /* remember the first BD of the packet */
9561 tx_buf->first_bd = fp->tx_bd_prod;
9562 tx_buf->skb = skb;
a2fbb9ea
ET
9563
9564 DP(NETIF_MSG_TX_QUEUED,
9565 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9566 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9567
755735eb
EG
9568 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9569 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9570 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9571 vlan_off += 4;
9572 } else
9573 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9574
755735eb 9575 if (xmit_type) {
755735eb 9576 /* turn on parsing and get a BD */
a2fbb9ea
ET
9577 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9578 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9579
9580 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9581 }
9582
9583 if (xmit_type & XMIT_CSUM) {
9584 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9585
9586 /* for now NS flag is not used in Linux */
755735eb 9587 pbd->global_data = (hlen |
96fc1784 9588 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9589 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9590
755735eb
EG
9591 pbd->ip_hlen = (skb_transport_header(skb) -
9592 skb_network_header(skb)) / 2;
9593
9594 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9595
755735eb
EG
9596 pbd->total_hlen = cpu_to_le16(hlen);
9597 hlen = hlen*2 - vlan_off;
a2fbb9ea 9598
755735eb
EG
9599 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9600
9601 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9602 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9603 ETH_TX_BD_FLAGS_IP_CSUM;
9604 else
9605 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9606
9607 if (xmit_type & XMIT_CSUM_TCP) {
9608 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9609
9610 } else {
9611 s8 fix = SKB_CS_OFF(skb); /* signed! */
9612
a2fbb9ea 9613 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9614 pbd->cs_offset = fix / 2;
a2fbb9ea 9615
755735eb
EG
9616 DP(NETIF_MSG_TX_QUEUED,
9617 "hlen %d offset %d fix %d csum before fix %x\n",
9618 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9619 SKB_CS(skb));
9620
9621 /* HW bug: fixup the CSUM */
9622 pbd->tcp_pseudo_csum =
9623 bnx2x_csum_fix(skb_transport_header(skb),
9624 SKB_CS(skb), fix);
9625
9626 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9627 pbd->tcp_pseudo_csum);
9628 }
a2fbb9ea
ET
9629 }
9630
9631 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9632 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9633
9634 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9635 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9636 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9637 tx_bd->nbd = cpu_to_le16(nbd);
9638 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9639
9640 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9641 " nbytes %d flags %x vlan %x\n",
9642 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9643 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9644 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9645
755735eb 9646 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9647
9648 DP(NETIF_MSG_TX_QUEUED,
9649 "TSO packet len %d hlen %d total len %d tso size %d\n",
9650 skb->len, hlen, skb_headlen(skb),
9651 skb_shinfo(skb)->gso_size);
9652
9653 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9654
755735eb
EG
9655 if (unlikely(skb_headlen(skb) > hlen))
9656 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9657 bd_prod, ++nbd);
a2fbb9ea
ET
9658
9659 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9660 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9661 pbd->tcp_flags = pbd_tcp_flags(skb);
9662
9663 if (xmit_type & XMIT_GSO_V4) {
9664 pbd->ip_id = swab16(ip_hdr(skb)->id);
9665 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9666 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9667 ip_hdr(skb)->daddr,
9668 0, IPPROTO_TCP, 0));
755735eb
EG
9669
9670 } else
9671 pbd->tcp_pseudo_csum =
9672 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9673 &ipv6_hdr(skb)->daddr,
9674 0, IPPROTO_TCP, 0));
9675
a2fbb9ea
ET
9676 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9677 }
9678
755735eb
EG
9679 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9680 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9681
755735eb
EG
9682 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9683 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9684
755735eb
EG
9685 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9686 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9687
755735eb
EG
9688 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9689 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9690 tx_bd->nbytes = cpu_to_le16(frag->size);
9691 tx_bd->vlan = cpu_to_le16(pkt_prod);
9692 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9693
755735eb
EG
9694 DP(NETIF_MSG_TX_QUEUED,
9695 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9696 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9697 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9698 }
9699
755735eb 9700 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9701 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9702
9703 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9704 tx_bd, tx_bd->bd_flags.as_bitfield);
9705
a2fbb9ea
ET
9706 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9707
755735eb 9708 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9709 * if the packet contains or ends with it
9710 */
9711 if (TX_BD_POFF(bd_prod) < nbd)
9712 nbd++;
9713
9714 if (pbd)
9715 DP(NETIF_MSG_TX_QUEUED,
9716 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9717 " tcp_flags %x xsum %x seq %u hlen %u\n",
9718 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9719 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9720 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9721
755735eb 9722 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9723
58f4c4cf
EG
9724 /*
9725 * Make sure that the BD data is updated before updating the producer
9726 * since FW might read the BD right after the producer is updated.
9727 * This is only applicable for weak-ordered memory model archs such
9728 * as IA-64. The following barrier is also mandatory since FW will
9729 * assumes packets must have BDs.
9730 */
9731 wmb();
9732
96fc1784
ET
9733 fp->hw_tx_prods->bds_prod =
9734 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9735 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9736 fp->hw_tx_prods->packets_prod =
9737 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9738 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9739
9740 mmiowb();
9741
755735eb 9742 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9743 dev->trans_start = jiffies;
9744
9745 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9746 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9747 if we put Tx into XOFF state. */
9748 smp_mb();
a2fbb9ea 9749 netif_stop_queue(dev);
bb2a0f7a 9750 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9751 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9752 netif_wake_queue(dev);
9753 }
9754 fp->tx_pkt++;
9755
9756 return NETDEV_TX_OK;
9757}
9758
bb2a0f7a 9759/* called with rtnl_lock */
a2fbb9ea
ET
9760static int bnx2x_open(struct net_device *dev)
9761{
9762 struct bnx2x *bp = netdev_priv(dev);
9763
9764 bnx2x_set_power_state(bp, PCI_D0);
9765
bb2a0f7a 9766 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9767}
9768
bb2a0f7a 9769/* called with rtnl_lock */
a2fbb9ea
ET
9770static int bnx2x_close(struct net_device *dev)
9771{
a2fbb9ea
ET
9772 struct bnx2x *bp = netdev_priv(dev);
9773
9774 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9775 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9776 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9777 if (!CHIP_REV_IS_SLOW(bp))
9778 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9779
9780 return 0;
9781}
9782
34f80b04
EG
9783/* called with netif_tx_lock from set_multicast */
9784static void bnx2x_set_rx_mode(struct net_device *dev)
9785{
9786 struct bnx2x *bp = netdev_priv(dev);
9787 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9788 int port = BP_PORT(bp);
9789
9790 if (bp->state != BNX2X_STATE_OPEN) {
9791 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9792 return;
9793 }
9794
9795 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9796
9797 if (dev->flags & IFF_PROMISC)
9798 rx_mode = BNX2X_RX_MODE_PROMISC;
9799
9800 else if ((dev->flags & IFF_ALLMULTI) ||
9801 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9802 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9803
9804 else { /* some multicasts */
9805 if (CHIP_IS_E1(bp)) {
9806 int i, old, offset;
9807 struct dev_mc_list *mclist;
9808 struct mac_configuration_cmd *config =
9809 bnx2x_sp(bp, mcast_config);
9810
9811 for (i = 0, mclist = dev->mc_list;
9812 mclist && (i < dev->mc_count);
9813 i++, mclist = mclist->next) {
9814
9815 config->config_table[i].
9816 cam_entry.msb_mac_addr =
9817 swab16(*(u16 *)&mclist->dmi_addr[0]);
9818 config->config_table[i].
9819 cam_entry.middle_mac_addr =
9820 swab16(*(u16 *)&mclist->dmi_addr[2]);
9821 config->config_table[i].
9822 cam_entry.lsb_mac_addr =
9823 swab16(*(u16 *)&mclist->dmi_addr[4]);
9824 config->config_table[i].cam_entry.flags =
9825 cpu_to_le16(port);
9826 config->config_table[i].
9827 target_table_entry.flags = 0;
9828 config->config_table[i].
9829 target_table_entry.client_id = 0;
9830 config->config_table[i].
9831 target_table_entry.vlan_id = 0;
9832
9833 DP(NETIF_MSG_IFUP,
9834 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9835 config->config_table[i].
9836 cam_entry.msb_mac_addr,
9837 config->config_table[i].
9838 cam_entry.middle_mac_addr,
9839 config->config_table[i].
9840 cam_entry.lsb_mac_addr);
9841 }
9842 old = config->hdr.length_6b;
9843 if (old > i) {
9844 for (; i < old; i++) {
9845 if (CAM_IS_INVALID(config->
9846 config_table[i])) {
9847 i--; /* already invalidated */
9848 break;
9849 }
9850 /* invalidate */
9851 CAM_INVALIDATE(config->
9852 config_table[i]);
9853 }
9854 }
9855
9856 if (CHIP_REV_IS_SLOW(bp))
9857 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9858 else
9859 offset = BNX2X_MAX_MULTICAST*(1 + port);
9860
9861 config->hdr.length_6b = i;
9862 config->hdr.offset = offset;
9863 config->hdr.client_id = BP_CL_ID(bp);
9864 config->hdr.reserved1 = 0;
9865
9866 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9867 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9868 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9869 0);
9870 } else { /* E1H */
9871 /* Accept one or more multicasts */
9872 struct dev_mc_list *mclist;
9873 u32 mc_filter[MC_HASH_SIZE];
9874 u32 crc, bit, regidx;
9875 int i;
9876
9877 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9878
9879 for (i = 0, mclist = dev->mc_list;
9880 mclist && (i < dev->mc_count);
9881 i++, mclist = mclist->next) {
9882
7c510e4b
JB
9883 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9884 mclist->dmi_addr);
34f80b04
EG
9885
9886 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9887 bit = (crc >> 24) & 0xff;
9888 regidx = bit >> 5;
9889 bit &= 0x1f;
9890 mc_filter[regidx] |= (1 << bit);
9891 }
9892
9893 for (i = 0; i < MC_HASH_SIZE; i++)
9894 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9895 mc_filter[i]);
9896 }
9897 }
9898
9899 bp->rx_mode = rx_mode;
9900 bnx2x_set_storm_rx_mode(bp);
9901}
9902
9903/* called with rtnl_lock */
a2fbb9ea
ET
9904static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9905{
9906 struct sockaddr *addr = p;
9907 struct bnx2x *bp = netdev_priv(dev);
9908
34f80b04 9909 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9910 return -EINVAL;
9911
9912 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9913 if (netif_running(dev)) {
9914 if (CHIP_IS_E1(bp))
3101c2bc 9915 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9916 else
3101c2bc 9917 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9918 }
a2fbb9ea
ET
9919
9920 return 0;
9921}
9922
c18487ee 9923/* called with rtnl_lock */
a2fbb9ea
ET
9924static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9925{
9926 struct mii_ioctl_data *data = if_mii(ifr);
9927 struct bnx2x *bp = netdev_priv(dev);
3196a88a 9928 int port = BP_PORT(bp);
a2fbb9ea
ET
9929 int err;
9930
9931 switch (cmd) {
9932 case SIOCGMIIPHY:
34f80b04 9933 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9934
c14423fe 9935 /* fallthrough */
c18487ee 9936
a2fbb9ea 9937 case SIOCGMIIREG: {
c18487ee 9938 u16 mii_regval;
a2fbb9ea 9939
c18487ee
YR
9940 if (!netif_running(dev))
9941 return -EAGAIN;
a2fbb9ea 9942
34f80b04 9943 mutex_lock(&bp->port.phy_mutex);
3196a88a 9944 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9945 DEFAULT_PHY_DEV_ADDR,
9946 (data->reg_num & 0x1f), &mii_regval);
9947 data->val_out = mii_regval;
34f80b04 9948 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9949 return err;
9950 }
9951
9952 case SIOCSMIIREG:
9953 if (!capable(CAP_NET_ADMIN))
9954 return -EPERM;
9955
c18487ee
YR
9956 if (!netif_running(dev))
9957 return -EAGAIN;
9958
34f80b04 9959 mutex_lock(&bp->port.phy_mutex);
3196a88a 9960 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9961 DEFAULT_PHY_DEV_ADDR,
9962 (data->reg_num & 0x1f), data->val_in);
34f80b04 9963 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9964 return err;
9965
9966 default:
9967 /* do nothing */
9968 break;
9969 }
9970
9971 return -EOPNOTSUPP;
9972}
9973
34f80b04 9974/* called with rtnl_lock */
a2fbb9ea
ET
9975static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9976{
9977 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9978 int rc = 0;
a2fbb9ea
ET
9979
9980 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9981 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9982 return -EINVAL;
9983
9984 /* This does not race with packet allocation
c14423fe 9985 * because the actual alloc size is
a2fbb9ea
ET
9986 * only updated as part of load
9987 */
9988 dev->mtu = new_mtu;
9989
9990 if (netif_running(dev)) {
34f80b04
EG
9991 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9992 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9993 }
34f80b04
EG
9994
9995 return rc;
a2fbb9ea
ET
9996}
9997
9998static void bnx2x_tx_timeout(struct net_device *dev)
9999{
10000 struct bnx2x *bp = netdev_priv(dev);
10001
10002#ifdef BNX2X_STOP_ON_ERROR
10003 if (!bp->panic)
10004 bnx2x_panic();
10005#endif
10006 /* This allows the netif to be shutdown gracefully before resetting */
10007 schedule_work(&bp->reset_task);
10008}
10009
10010#ifdef BCM_VLAN
34f80b04 10011/* called with rtnl_lock */
a2fbb9ea
ET
10012static void bnx2x_vlan_rx_register(struct net_device *dev,
10013 struct vlan_group *vlgrp)
10014{
10015 struct bnx2x *bp = netdev_priv(dev);
10016
10017 bp->vlgrp = vlgrp;
10018 if (netif_running(dev))
49d66772 10019 bnx2x_set_client_config(bp);
a2fbb9ea 10020}
34f80b04 10021
a2fbb9ea
ET
10022#endif
10023
10024#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10025static void poll_bnx2x(struct net_device *dev)
10026{
10027 struct bnx2x *bp = netdev_priv(dev);
10028
10029 disable_irq(bp->pdev->irq);
10030 bnx2x_interrupt(bp->pdev->irq, dev);
10031 enable_irq(bp->pdev->irq);
10032}
10033#endif
10034
c64213cd
SH
10035static const struct net_device_ops bnx2x_netdev_ops = {
10036 .ndo_open = bnx2x_open,
10037 .ndo_stop = bnx2x_close,
10038 .ndo_start_xmit = bnx2x_start_xmit,
10039 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10040 .ndo_set_mac_address = bnx2x_change_mac_addr,
10041 .ndo_validate_addr = eth_validate_addr,
10042 .ndo_do_ioctl = bnx2x_ioctl,
10043 .ndo_change_mtu = bnx2x_change_mtu,
10044 .ndo_tx_timeout = bnx2x_tx_timeout,
10045#ifdef BCM_VLAN
10046 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10047#endif
10048#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10049 .ndo_poll_controller = poll_bnx2x,
10050#endif
10051};
10052
10053
34f80b04
EG
10054static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10055 struct net_device *dev)
a2fbb9ea
ET
10056{
10057 struct bnx2x *bp;
10058 int rc;
10059
10060 SET_NETDEV_DEV(dev, &pdev->dev);
10061 bp = netdev_priv(dev);
10062
34f80b04
EG
10063 bp->dev = dev;
10064 bp->pdev = pdev;
a2fbb9ea 10065 bp->flags = 0;
34f80b04 10066 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10067
10068 rc = pci_enable_device(pdev);
10069 if (rc) {
10070 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10071 goto err_out;
10072 }
10073
10074 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10075 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10076 " aborting\n");
10077 rc = -ENODEV;
10078 goto err_out_disable;
10079 }
10080
10081 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10082 printk(KERN_ERR PFX "Cannot find second PCI device"
10083 " base address, aborting\n");
10084 rc = -ENODEV;
10085 goto err_out_disable;
10086 }
10087
34f80b04
EG
10088 if (atomic_read(&pdev->enable_cnt) == 1) {
10089 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10090 if (rc) {
10091 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10092 " aborting\n");
10093 goto err_out_disable;
10094 }
a2fbb9ea 10095
34f80b04
EG
10096 pci_set_master(pdev);
10097 pci_save_state(pdev);
10098 }
a2fbb9ea
ET
10099
10100 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10101 if (bp->pm_cap == 0) {
10102 printk(KERN_ERR PFX "Cannot find power management"
10103 " capability, aborting\n");
10104 rc = -EIO;
10105 goto err_out_release;
10106 }
10107
10108 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10109 if (bp->pcie_cap == 0) {
10110 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10111 " aborting\n");
10112 rc = -EIO;
10113 goto err_out_release;
10114 }
10115
10116 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10117 bp->flags |= USING_DAC_FLAG;
10118 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10119 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10120 " failed, aborting\n");
10121 rc = -EIO;
10122 goto err_out_release;
10123 }
10124
10125 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10126 printk(KERN_ERR PFX "System does not support DMA,"
10127 " aborting\n");
10128 rc = -EIO;
10129 goto err_out_release;
10130 }
10131
34f80b04
EG
10132 dev->mem_start = pci_resource_start(pdev, 0);
10133 dev->base_addr = dev->mem_start;
10134 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10135
10136 dev->irq = pdev->irq;
10137
275f165f 10138 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10139 if (!bp->regview) {
10140 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10141 rc = -ENOMEM;
10142 goto err_out_release;
10143 }
10144
34f80b04
EG
10145 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10146 min_t(u64, BNX2X_DB_SIZE,
10147 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10148 if (!bp->doorbells) {
10149 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10150 rc = -ENOMEM;
10151 goto err_out_unmap;
10152 }
10153
10154 bnx2x_set_power_state(bp, PCI_D0);
10155
34f80b04
EG
10156 /* clean indirect addresses */
10157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10158 PCICFG_VENDOR_ID_OFFSET);
10159 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10160 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10161 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10162 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10163
34f80b04 10164 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10165
c64213cd 10166 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10167 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10168 dev->features |= NETIF_F_SG;
10169 dev->features |= NETIF_F_HW_CSUM;
10170 if (bp->flags & USING_DAC_FLAG)
10171 dev->features |= NETIF_F_HIGHDMA;
10172#ifdef BCM_VLAN
10173 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10174#endif
10175 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10176 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10177
10178 return 0;
10179
10180err_out_unmap:
10181 if (bp->regview) {
10182 iounmap(bp->regview);
10183 bp->regview = NULL;
10184 }
a2fbb9ea
ET
10185 if (bp->doorbells) {
10186 iounmap(bp->doorbells);
10187 bp->doorbells = NULL;
10188 }
10189
10190err_out_release:
34f80b04
EG
10191 if (atomic_read(&pdev->enable_cnt) == 1)
10192 pci_release_regions(pdev);
a2fbb9ea
ET
10193
10194err_out_disable:
10195 pci_disable_device(pdev);
10196 pci_set_drvdata(pdev, NULL);
10197
10198err_out:
10199 return rc;
10200}
10201
25047950
ET
10202static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10203{
10204 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10205
10206 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10207 return val;
10208}
10209
10210/* return value of 1=2.5GHz 2=5GHz */
10211static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10212{
10213 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10214
10215 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10216 return val;
10217}
10218
a2fbb9ea
ET
10219static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10220 const struct pci_device_id *ent)
10221{
10222 static int version_printed;
10223 struct net_device *dev = NULL;
10224 struct bnx2x *bp;
25047950 10225 int rc;
a2fbb9ea
ET
10226
10227 if (version_printed++ == 0)
10228 printk(KERN_INFO "%s", version);
10229
10230 /* dev zeroed in init_etherdev */
10231 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10232 if (!dev) {
10233 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10234 return -ENOMEM;
34f80b04 10235 }
a2fbb9ea 10236
a2fbb9ea
ET
10237 bp = netdev_priv(dev);
10238 bp->msglevel = debug;
10239
34f80b04 10240 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10241 if (rc < 0) {
10242 free_netdev(dev);
10243 return rc;
10244 }
10245
a2fbb9ea
ET
10246 rc = register_netdev(dev);
10247 if (rc) {
c14423fe 10248 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10249 goto init_one_exit;
a2fbb9ea
ET
10250 }
10251
10252 pci_set_drvdata(pdev, dev);
10253
34f80b04
EG
10254 rc = bnx2x_init_bp(bp);
10255 if (rc) {
10256 unregister_netdev(dev);
10257 goto init_one_exit;
10258 }
10259
12b56ea8
EG
10260 netif_carrier_off(dev);
10261
34f80b04 10262 bp->common.name = board_info[ent->driver_data].name;
25047950 10263 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10264 " IRQ %d, ", dev->name, bp->common.name,
10265 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10266 bnx2x_get_pcie_width(bp),
10267 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10268 dev->base_addr, bp->pdev->irq);
e174961c 10269 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10270 return 0;
34f80b04
EG
10271
10272init_one_exit:
10273 if (bp->regview)
10274 iounmap(bp->regview);
10275
10276 if (bp->doorbells)
10277 iounmap(bp->doorbells);
10278
10279 free_netdev(dev);
10280
10281 if (atomic_read(&pdev->enable_cnt) == 1)
10282 pci_release_regions(pdev);
10283
10284 pci_disable_device(pdev);
10285 pci_set_drvdata(pdev, NULL);
10286
10287 return rc;
a2fbb9ea
ET
10288}
10289
10290static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10291{
10292 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10293 struct bnx2x *bp;
10294
10295 if (!dev) {
228241eb
ET
10296 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10297 return;
10298 }
228241eb 10299 bp = netdev_priv(dev);
a2fbb9ea 10300
a2fbb9ea
ET
10301 unregister_netdev(dev);
10302
10303 if (bp->regview)
10304 iounmap(bp->regview);
10305
10306 if (bp->doorbells)
10307 iounmap(bp->doorbells);
10308
10309 free_netdev(dev);
34f80b04
EG
10310
10311 if (atomic_read(&pdev->enable_cnt) == 1)
10312 pci_release_regions(pdev);
10313
a2fbb9ea
ET
10314 pci_disable_device(pdev);
10315 pci_set_drvdata(pdev, NULL);
10316}
10317
10318static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10319{
10320 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10321 struct bnx2x *bp;
10322
34f80b04
EG
10323 if (!dev) {
10324 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10325 return -ENODEV;
10326 }
10327 bp = netdev_priv(dev);
a2fbb9ea 10328
34f80b04 10329 rtnl_lock();
a2fbb9ea 10330
34f80b04 10331 pci_save_state(pdev);
228241eb 10332
34f80b04
EG
10333 if (!netif_running(dev)) {
10334 rtnl_unlock();
10335 return 0;
10336 }
a2fbb9ea
ET
10337
10338 netif_device_detach(dev);
a2fbb9ea 10339
da5a662a 10340 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10341
a2fbb9ea 10342 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10343
34f80b04
EG
10344 rtnl_unlock();
10345
a2fbb9ea
ET
10346 return 0;
10347}
10348
10349static int bnx2x_resume(struct pci_dev *pdev)
10350{
10351 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10352 struct bnx2x *bp;
a2fbb9ea
ET
10353 int rc;
10354
228241eb
ET
10355 if (!dev) {
10356 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10357 return -ENODEV;
10358 }
228241eb 10359 bp = netdev_priv(dev);
a2fbb9ea 10360
34f80b04
EG
10361 rtnl_lock();
10362
228241eb 10363 pci_restore_state(pdev);
34f80b04
EG
10364
10365 if (!netif_running(dev)) {
10366 rtnl_unlock();
10367 return 0;
10368 }
10369
a2fbb9ea
ET
10370 bnx2x_set_power_state(bp, PCI_D0);
10371 netif_device_attach(dev);
10372
da5a662a 10373 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10374
34f80b04
EG
10375 rtnl_unlock();
10376
10377 return rc;
a2fbb9ea
ET
10378}
10379
f8ef6e44
YG
10380static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10381{
10382 int i;
10383
10384 bp->state = BNX2X_STATE_ERROR;
10385
10386 bp->rx_mode = BNX2X_RX_MODE_NONE;
10387
10388 bnx2x_netif_stop(bp, 0);
10389
10390 del_timer_sync(&bp->timer);
10391 bp->stats_state = STATS_STATE_DISABLED;
10392 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10393
10394 /* Release IRQs */
10395 bnx2x_free_irq(bp);
10396
10397 if (CHIP_IS_E1(bp)) {
10398 struct mac_configuration_cmd *config =
10399 bnx2x_sp(bp, mcast_config);
10400
10401 for (i = 0; i < config->hdr.length_6b; i++)
10402 CAM_INVALIDATE(config->config_table[i]);
10403 }
10404
10405 /* Free SKBs, SGEs, TPA pool and driver internals */
10406 bnx2x_free_skbs(bp);
10407 for_each_queue(bp, i)
10408 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10409 bnx2x_free_mem(bp);
10410
10411 bp->state = BNX2X_STATE_CLOSED;
10412
10413 netif_carrier_off(bp->dev);
10414
10415 return 0;
10416}
10417
10418static void bnx2x_eeh_recover(struct bnx2x *bp)
10419{
10420 u32 val;
10421
10422 mutex_init(&bp->port.phy_mutex);
10423
10424 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10425 bp->link_params.shmem_base = bp->common.shmem_base;
10426 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10427
10428 if (!bp->common.shmem_base ||
10429 (bp->common.shmem_base < 0xA0000) ||
10430 (bp->common.shmem_base >= 0xC0000)) {
10431 BNX2X_DEV_INFO("MCP not active\n");
10432 bp->flags |= NO_MCP_FLAG;
10433 return;
10434 }
10435
10436 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10437 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10438 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10439 BNX2X_ERR("BAD MCP validity signature\n");
10440
10441 if (!BP_NOMCP(bp)) {
10442 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10443 & DRV_MSG_SEQ_NUMBER_MASK);
10444 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10445 }
10446}
10447
493adb1f
WX
10448/**
10449 * bnx2x_io_error_detected - called when PCI error is detected
10450 * @pdev: Pointer to PCI device
10451 * @state: The current pci connection state
10452 *
10453 * This function is called after a PCI bus error affecting
10454 * this device has been detected.
10455 */
10456static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10457 pci_channel_state_t state)
10458{
10459 struct net_device *dev = pci_get_drvdata(pdev);
10460 struct bnx2x *bp = netdev_priv(dev);
10461
10462 rtnl_lock();
10463
10464 netif_device_detach(dev);
10465
10466 if (netif_running(dev))
f8ef6e44 10467 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10468
10469 pci_disable_device(pdev);
10470
10471 rtnl_unlock();
10472
10473 /* Request a slot reset */
10474 return PCI_ERS_RESULT_NEED_RESET;
10475}
10476
10477/**
10478 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10479 * @pdev: Pointer to PCI device
10480 *
10481 * Restart the card from scratch, as if from a cold-boot.
10482 */
10483static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10484{
10485 struct net_device *dev = pci_get_drvdata(pdev);
10486 struct bnx2x *bp = netdev_priv(dev);
10487
10488 rtnl_lock();
10489
10490 if (pci_enable_device(pdev)) {
10491 dev_err(&pdev->dev,
10492 "Cannot re-enable PCI device after reset\n");
10493 rtnl_unlock();
10494 return PCI_ERS_RESULT_DISCONNECT;
10495 }
10496
10497 pci_set_master(pdev);
10498 pci_restore_state(pdev);
10499
10500 if (netif_running(dev))
10501 bnx2x_set_power_state(bp, PCI_D0);
10502
10503 rtnl_unlock();
10504
10505 return PCI_ERS_RESULT_RECOVERED;
10506}
10507
10508/**
10509 * bnx2x_io_resume - called when traffic can start flowing again
10510 * @pdev: Pointer to PCI device
10511 *
10512 * This callback is called when the error recovery driver tells us that
10513 * its OK to resume normal operation.
10514 */
10515static void bnx2x_io_resume(struct pci_dev *pdev)
10516{
10517 struct net_device *dev = pci_get_drvdata(pdev);
10518 struct bnx2x *bp = netdev_priv(dev);
10519
10520 rtnl_lock();
10521
f8ef6e44
YG
10522 bnx2x_eeh_recover(bp);
10523
493adb1f 10524 if (netif_running(dev))
f8ef6e44 10525 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10526
10527 netif_device_attach(dev);
10528
10529 rtnl_unlock();
10530}
10531
10532static struct pci_error_handlers bnx2x_err_handler = {
10533 .error_detected = bnx2x_io_error_detected,
10534 .slot_reset = bnx2x_io_slot_reset,
10535 .resume = bnx2x_io_resume,
10536};
10537
a2fbb9ea 10538static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10539 .name = DRV_MODULE_NAME,
10540 .id_table = bnx2x_pci_tbl,
10541 .probe = bnx2x_init_one,
10542 .remove = __devexit_p(bnx2x_remove_one),
10543 .suspend = bnx2x_suspend,
10544 .resume = bnx2x_resume,
10545 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10546};
10547
10548static int __init bnx2x_init(void)
10549{
1cf167f2
EG
10550 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10551 if (bnx2x_wq == NULL) {
10552 printk(KERN_ERR PFX "Cannot create workqueue\n");
10553 return -ENOMEM;
10554 }
10555
a2fbb9ea
ET
10556 return pci_register_driver(&bnx2x_pci_driver);
10557}
10558
10559static void __exit bnx2x_cleanup(void)
10560{
10561 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10562
10563 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10564}
10565
10566module_init(bnx2x_init);
10567module_exit(bnx2x_cleanup);
10568