]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Re-arrange module parameters
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
56
e8b5fc51
VZ
57#define DRV_MODULE_VERSION "1.45.26"
58#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 59#define BNX2X_BC_VER 0x040200
a2fbb9ea 60
34f80b04
EG
61/* Time in jiffies before concluding the transmitter is hung */
62#define TX_TIMEOUT (5*HZ)
a2fbb9ea 63
53a10565 64static char version[] __devinitdata =
34f80b04 65 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
66 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
24e3fcef 68MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 72
555f6c78
EG
73static int multi_mode = 1;
74module_param(multi_mode, int, 0);
75
19680c48 76static int disable_tpa;
19680c48 77module_param(disable_tpa, int, 0);
9898f86d 78MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
79
80static int int_mode;
81module_param(int_mode, int, 0);
82MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
83
9898f86d 84static int poll;
a2fbb9ea 85module_param(poll, int, 0);
9898f86d 86MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
87
88static int mrrs = -1;
89module_param(mrrs, int, 0);
90MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
91
9898f86d 92static int debug;
a2fbb9ea 93module_param(debug, int, 0);
9898f86d
EG
94MODULE_PARM_DESC(debug, " Default debug msglevel");
95
96static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 97
1cf167f2 98static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
4781bfad 473 __be32 data[9];
a2fbb9ea
ET
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
8440d2b6
EG
507 /* Indices */
508 /* Common */
509 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
510 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
511 " spq_prod_idx(%u)\n",
512 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
514
515 /* Rx */
516 for_each_rx_queue(bp, i) {
a2fbb9ea 517 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 518
8440d2b6 519 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
520 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
521 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 522 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
523 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
526 " fp_u_idx(%x) *sb_u_idx(%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge,
528 le16_to_cpu(fp->fp_u_idx),
529 fp->status_blk->u_status_block.status_block_index);
530 }
a2fbb9ea 531
8440d2b6
EG
532 /* Tx */
533 for_each_tx_queue(bp, i) {
534 struct bnx2x_fastpath *fp = &bp->fp[i];
535 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 536
8440d2b6
EG
537 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
538 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
539 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
542 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543 fp->status_blk->c_status_block.status_block_index,
544 hw_prods->packets_prod, hw_prods->bds_prod);
545 }
a2fbb9ea 546
8440d2b6
EG
547 /* Rings */
548 /* Rx */
549 for_each_rx_queue(bp, i) {
550 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
551
552 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 554 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
555 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557
558 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 559 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
560 }
561
3196a88a
EG
562 start = RX_SGE(fp->rx_sge_prod);
563 end = RX_SGE(fp->last_max_sge);
8440d2b6 564 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
565 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567
568 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
569 j, rx_sge[1], rx_sge[0], sw_page->page);
570 }
571
a2fbb9ea
ET
572 start = RCQ_BD(fp->rx_comp_cons - 10);
573 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 574 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576
577 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578 j, cqe[0], cqe[1], cqe[2], cqe[3]);
579 }
580 }
581
8440d2b6
EG
582 /* Tx */
583 for_each_tx_queue(bp, i) {
584 struct bnx2x_fastpath *fp = &bp->fp[i];
585
586 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588 for (j = start; j != end; j = TX_BD(j + 1)) {
589 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
590
591 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592 sw_bd->skb, sw_bd->first_bd);
593 }
594
595 start = TX_BD(fp->tx_bd_cons - 10);
596 end = TX_BD(fp->tx_bd_cons + 254);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
599
600 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
602 }
603 }
a2fbb9ea 604
34f80b04 605 bnx2x_fw_dump(bp);
a2fbb9ea
ET
606 bnx2x_mc_assert(bp);
607 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
608}
609
615f8fd9 610static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 611{
34f80b04 612 int port = BP_PORT(bp);
a2fbb9ea
ET
613 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614 u32 val = REG_RD(bp, addr);
615 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 616 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
617
618 if (msix) {
8badd27a
EG
619 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
621 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
623 } else if (msi) {
624 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
628 } else {
629 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 630 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
631 HC_CONFIG_0_REG_INT_LINE_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 633
8badd27a
EG
634 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
635 val, port, addr);
615f8fd9
ET
636
637 REG_WR(bp, addr, val);
638
a2fbb9ea
ET
639 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
640 }
641
8badd27a
EG
642 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
643 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
644
645 REG_WR(bp, addr, val);
34f80b04
EG
646
647 if (CHIP_IS_E1H(bp)) {
648 /* init leading/trailing edge */
649 if (IS_E1HMF(bp)) {
8badd27a 650 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 651 if (bp->port.pmf)
4acac6a5
EG
652 /* enable nig and gpio3 attention */
653 val |= 0x1100;
34f80b04
EG
654 } else
655 val = 0xffff;
656
657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
659 }
a2fbb9ea
ET
660}
661
615f8fd9 662static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 663{
34f80b04 664 int port = BP_PORT(bp);
a2fbb9ea
ET
665 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666 u32 val = REG_RD(bp, addr);
667
668 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
675
8badd27a
EG
676 /* flush all outstanding writes */
677 mmiowb();
678
a2fbb9ea
ET
679 REG_WR(bp, addr, val);
680 if (REG_RD(bp, addr) != val)
681 BNX2X_ERR("BUG! proper val not read from IGU!\n");
682}
683
f8ef6e44 684static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 685{
a2fbb9ea 686 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 687 int i, offset;
a2fbb9ea 688
34f80b04 689 /* disable interrupt handling */
a2fbb9ea 690 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
691 if (disable_hw)
692 /* prevent the HW from sending interrupts */
693 bnx2x_int_disable(bp);
a2fbb9ea
ET
694
695 /* make sure all ISRs are done */
696 if (msix) {
8badd27a
EG
697 synchronize_irq(bp->msix_table[0].vector);
698 offset = 1;
a2fbb9ea 699 for_each_queue(bp, i)
8badd27a 700 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
701 } else
702 synchronize_irq(bp->pdev->irq);
703
704 /* make sure sp_task is not running */
1cf167f2
EG
705 cancel_delayed_work(&bp->sp_task);
706 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
707}
708
34f80b04 709/* fast path */
a2fbb9ea
ET
710
711/*
34f80b04 712 * General service functions
a2fbb9ea
ET
713 */
714
34f80b04 715static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
716 u8 storm, u16 index, u8 op, u8 update)
717{
5c862848
EG
718 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
719 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
720 struct igu_ack_register igu_ack;
721
722 igu_ack.status_block_index = index;
723 igu_ack.sb_id_and_flags =
34f80b04 724 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
725 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
726 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
727 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
728
5c862848
EG
729 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
730 (*(u32 *)&igu_ack), hc_addr);
731 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
732}
733
734static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
735{
736 struct host_status_block *fpsb = fp->status_blk;
737 u16 rc = 0;
738
739 barrier(); /* status block is written to by the chip */
740 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
741 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
742 rc |= 1;
743 }
744 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
745 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
746 rc |= 2;
747 }
748 return rc;
749}
750
a2fbb9ea
ET
751static u16 bnx2x_ack_int(struct bnx2x *bp)
752{
5c862848
EG
753 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
754 COMMAND_REG_SIMD_MASK);
755 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 756
5c862848
EG
757 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
758 result, hc_addr);
a2fbb9ea 759
a2fbb9ea
ET
760 return result;
761}
762
763
764/*
765 * fast path service functions
766 */
767
237907c1
EG
768static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
769{
770 u16 tx_cons_sb;
771
772 /* Tell compiler that status block fields can change */
773 barrier();
774 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
775 return (fp->tx_pkt_cons != tx_cons_sb);
776}
777
778static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
779{
780 /* Tell compiler that consumer and producer can change */
781 barrier();
782 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
783
237907c1
EG
784}
785
a2fbb9ea
ET
786/* free skb in the packet ring at pos idx
787 * return idx of last bd freed
788 */
789static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
790 u16 idx)
791{
792 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793 struct eth_tx_bd *tx_bd;
794 struct sk_buff *skb = tx_buf->skb;
34f80b04 795 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
796 int nbd;
797
798 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
799 idx, tx_buf, skb);
800
801 /* unmap first bd */
802 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
806
807 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 808 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
809#ifdef BNX2X_STOP_ON_ERROR
810 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 811 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
812 bnx2x_panic();
813 }
814#endif
815
816 /* Skip a parse bd and the TSO split header bd
817 since they have no mapping */
818 if (nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820
821 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822 ETH_TX_BD_FLAGS_TCP_CSUM |
823 ETH_TX_BD_FLAGS_SW_LSO)) {
824 if (--nbd)
825 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 /* is this a TSO split header bd? */
828 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
829 if (--nbd)
830 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
831 }
832 }
833
834 /* now free frags */
835 while (nbd > 0) {
836
837 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838 tx_bd = &fp->tx_desc_ring[bd_idx];
839 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
841 if (--nbd)
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843 }
844
845 /* release skb */
53e5e96e 846 WARN_ON(!skb);
a2fbb9ea
ET
847 dev_kfree_skb(skb);
848 tx_buf->first_bd = 0;
849 tx_buf->skb = NULL;
850
34f80b04 851 return new_cons;
a2fbb9ea
ET
852}
853
34f80b04 854static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 855{
34f80b04
EG
856 s16 used;
857 u16 prod;
858 u16 cons;
a2fbb9ea 859
34f80b04 860 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
861 prod = fp->tx_bd_prod;
862 cons = fp->tx_bd_cons;
863
34f80b04
EG
864 /* NUM_TX_RINGS = number of "next-page" entries
865 It will be used as a threshold */
866 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 867
34f80b04 868#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
869 WARN_ON(used < 0);
870 WARN_ON(used > fp->bp->tx_ring_size);
871 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 872#endif
a2fbb9ea 873
34f80b04 874 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
875}
876
877static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
878{
879 struct bnx2x *bp = fp->bp;
555f6c78 880 struct netdev_queue *txq;
a2fbb9ea
ET
881 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
882 int done = 0;
883
884#ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
886 return;
887#endif
888
555f6c78 889 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
890 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891 sw_cons = fp->tx_pkt_cons;
892
893 while (sw_cons != hw_cons) {
894 u16 pkt_cons;
895
896 pkt_cons = TX_BD(sw_cons);
897
898 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
899
34f80b04 900 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
901 hw_cons, sw_cons, pkt_cons);
902
34f80b04 903/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
904 rmb();
905 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
906 }
907*/
908 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
909 sw_cons++;
910 done++;
911
912 if (done == work)
913 break;
914 }
915
916 fp->tx_pkt_cons = sw_cons;
917 fp->tx_bd_cons = bd_cons;
918
555f6c78
EG
919 /* Need to make the tx_bd_cons update visible to start_xmit()
920 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
921 * memory barrier, there is a small possibility that start_xmit()
922 * will miss it and cause the queue to be stopped forever.
923 */
924 smp_mb();
925
926 /* TBD need a thresh? */
555f6c78 927 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 928
555f6c78 929 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 930
555f6c78 931 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 932 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 933 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 934 netif_tx_wake_queue(txq);
a2fbb9ea 935
555f6c78 936 __netif_tx_unlock(txq);
a2fbb9ea
ET
937 }
938}
939
3196a88a 940
a2fbb9ea
ET
941static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942 union eth_rx_cqe *rr_cqe)
943{
944 struct bnx2x *bp = fp->bp;
945 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
947
34f80b04 948 DP(BNX2X_MSG_SP,
a2fbb9ea 949 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 950 fp->index, cid, command, bp->state,
34f80b04 951 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
952
953 bp->spq_left++;
954
0626b899 955 if (fp->index) {
a2fbb9ea
ET
956 switch (command | fp->state) {
957 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958 BNX2X_FP_STATE_OPENING):
959 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
960 cid);
961 fp->state = BNX2X_FP_STATE_OPEN;
962 break;
963
964 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
966 cid);
967 fp->state = BNX2X_FP_STATE_HALTED;
968 break;
969
970 default:
34f80b04
EG
971 BNX2X_ERR("unexpected MC reply (%d) "
972 "fp->state is %x\n", command, fp->state);
973 break;
a2fbb9ea 974 }
34f80b04 975 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
976 return;
977 }
c14423fe 978
a2fbb9ea
ET
979 switch (command | bp->state) {
980 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982 bp->state = BNX2X_STATE_OPEN;
983 break;
984
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988 fp->state = BNX2X_FP_STATE_HALTED;
989 break;
990
a2fbb9ea 991 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 992 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 993 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
994 break;
995
3196a88a 996
a2fbb9ea 997 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 998 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 999 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1000 bp->set_mac_pending = 0;
a2fbb9ea
ET
1001 break;
1002
49d66772 1003 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1004 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1005 break;
1006
a2fbb9ea 1007 default:
34f80b04 1008 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1009 command, bp->state);
34f80b04 1010 break;
a2fbb9ea 1011 }
34f80b04 1012 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1013}
1014
7a9b2557
VZ
1015static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016 struct bnx2x_fastpath *fp, u16 index)
1017{
1018 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019 struct page *page = sw_buf->page;
1020 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1021
1022 /* Skip "next page" elements */
1023 if (!page)
1024 return;
1025
1026 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1027 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1028 __free_pages(page, PAGES_PER_SGE_SHIFT);
1029
1030 sw_buf->page = NULL;
1031 sge->addr_hi = 0;
1032 sge->addr_lo = 0;
1033}
1034
1035static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036 struct bnx2x_fastpath *fp, int last)
1037{
1038 int i;
1039
1040 for (i = 0; i < last; i++)
1041 bnx2x_free_rx_sge(bp, fp, i);
1042}
1043
1044static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045 struct bnx2x_fastpath *fp, u16 index)
1046{
1047 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1050 dma_addr_t mapping;
1051
1052 if (unlikely(page == NULL))
1053 return -ENOMEM;
1054
4f40f2cb 1055 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1056 PCI_DMA_FROMDEVICE);
8d8bb39b 1057 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1059 return -ENOMEM;
1060 }
1061
1062 sw_buf->page = page;
1063 pci_unmap_addr_set(sw_buf, mapping, mapping);
1064
1065 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1067
1068 return 0;
1069}
1070
a2fbb9ea
ET
1071static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072 struct bnx2x_fastpath *fp, u16 index)
1073{
1074 struct sk_buff *skb;
1075 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1077 dma_addr_t mapping;
1078
1079 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080 if (unlikely(skb == NULL))
1081 return -ENOMEM;
1082
437cf2f1 1083 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1084 PCI_DMA_FROMDEVICE);
8d8bb39b 1085 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1086 dev_kfree_skb(skb);
1087 return -ENOMEM;
1088 }
1089
1090 rx_buf->skb = skb;
1091 pci_unmap_addr_set(rx_buf, mapping, mapping);
1092
1093 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1095
1096 return 0;
1097}
1098
1099/* note that we are not allocating a new skb,
1100 * we are just moving one from cons to prod
1101 * we are not creating a new mapping,
1102 * so there is no need to check for dma_mapping_error().
1103 */
1104static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105 struct sk_buff *skb, u16 cons, u16 prod)
1106{
1107 struct bnx2x *bp = fp->bp;
1108 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1112
1113 pci_dma_sync_single_for_device(bp->pdev,
1114 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1115 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1116
1117 prod_rx_buf->skb = cons_rx_buf->skb;
1118 pci_unmap_addr_set(prod_rx_buf, mapping,
1119 pci_unmap_addr(cons_rx_buf, mapping));
1120 *prod_bd = *cons_bd;
1121}
1122
7a9b2557
VZ
1123static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1124 u16 idx)
1125{
1126 u16 last_max = fp->last_max_sge;
1127
1128 if (SUB_S16(idx, last_max) > 0)
1129 fp->last_max_sge = idx;
1130}
1131
1132static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1133{
1134 int i, j;
1135
1136 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137 int idx = RX_SGE_CNT * i - 1;
1138
1139 for (j = 0; j < 2; j++) {
1140 SGE_MASK_CLEAR_BIT(fp, idx);
1141 idx--;
1142 }
1143 }
1144}
1145
1146static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147 struct eth_fast_path_rx_cqe *fp_cqe)
1148{
1149 struct bnx2x *bp = fp->bp;
4f40f2cb 1150 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1151 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1152 SGE_PAGE_SHIFT;
7a9b2557
VZ
1153 u16 last_max, last_elem, first_elem;
1154 u16 delta = 0;
1155 u16 i;
1156
1157 if (!sge_len)
1158 return;
1159
1160 /* First mark all used pages */
1161 for (i = 0; i < sge_len; i++)
1162 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1163
1164 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1166
1167 /* Here we assume that the last SGE index is the biggest */
1168 prefetch((void *)(fp->sge_mask));
1169 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1170
1171 last_max = RX_SGE(fp->last_max_sge);
1172 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1174
1175 /* If ring is not full */
1176 if (last_elem + 1 != first_elem)
1177 last_elem++;
1178
1179 /* Now update the prod */
1180 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181 if (likely(fp->sge_mask[i]))
1182 break;
1183
1184 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185 delta += RX_SGE_MASK_ELEM_SZ;
1186 }
1187
1188 if (delta > 0) {
1189 fp->rx_sge_prod += delta;
1190 /* clear page-end entries */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1192 }
1193
1194 DP(NETIF_MSG_RX_STATUS,
1195 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1196 fp->last_max_sge, fp->rx_sge_prod);
1197}
1198
1199static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1200{
1201 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202 memset(fp->sge_mask, 0xff,
1203 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1204
33471629
EG
1205 /* Clear the two last indices in the page to 1:
1206 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1207 hence will never be indicated and should be removed from
1208 the calculations. */
1209 bnx2x_clear_sge_mask_next_elems(fp);
1210}
1211
1212static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213 struct sk_buff *skb, u16 cons, u16 prod)
1214{
1215 struct bnx2x *bp = fp->bp;
1216 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219 dma_addr_t mapping;
1220
1221 /* move empty skb from pool to prod and map it */
1222 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1224 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1225 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1226
1227 /* move partial skb from cons to pool (don't unmap yet) */
1228 fp->tpa_pool[queue] = *cons_rx_buf;
1229
1230 /* mark bin state as start - print error if current state != stop */
1231 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1233
1234 fp->tpa_state[queue] = BNX2X_TPA_START;
1235
1236 /* point prod_bd to new skb */
1237 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1239
1240#ifdef BNX2X_STOP_ON_ERROR
1241 fp->tpa_queue_used |= (1 << queue);
1242#ifdef __powerpc64__
1243 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1244#else
1245 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1246#endif
1247 fp->tpa_queue_used);
1248#endif
1249}
1250
1251static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252 struct sk_buff *skb,
1253 struct eth_fast_path_rx_cqe *fp_cqe,
1254 u16 cqe_idx)
1255{
1256 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1257 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258 u32 i, frag_len, frag_size, pages;
1259 int err;
1260 int j;
1261
1262 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1263 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1264
1265 /* This is needed in order to enable forwarding support */
1266 if (frag_size)
4f40f2cb 1267 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1268 max(frag_size, (u32)len_on_bd));
1269
1270#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1271 if (pages >
1272 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1273 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1274 pages, cqe_idx);
1275 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1276 fp_cqe->pkt_len, len_on_bd);
1277 bnx2x_panic();
1278 return -EINVAL;
1279 }
1280#endif
1281
1282 /* Run through the SGL and compose the fragmented skb */
1283 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1285
1286 /* FW gives the indices of the SGE as if the ring is an array
1287 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1288 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1289 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1290 old_rx_pg = *rx_pg;
1291
1292 /* If we fail to allocate a substitute page, we simply stop
1293 where we are and drop the whole packet */
1294 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295 if (unlikely(err)) {
de832a55 1296 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1297 return err;
1298 }
1299
1300 /* Unmap the page as we r going to pass it to the stack */
1301 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1302 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1303
1304 /* Add one frag and update the appropriate fields in the skb */
1305 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1306
1307 skb->data_len += frag_len;
1308 skb->truesize += frag_len;
1309 skb->len += frag_len;
1310
1311 frag_size -= frag_len;
1312 }
1313
1314 return 0;
1315}
1316
1317static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1319 u16 cqe_idx)
1320{
1321 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322 struct sk_buff *skb = rx_buf->skb;
1323 /* alloc new skb */
1324 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1325
1326 /* Unmap skb in the pool anyway, as we are going to change
1327 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1328 fails. */
1329 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1330 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1331
7a9b2557 1332 if (likely(new_skb)) {
66e855f3
YG
1333 /* fix ip xsum and give it to the stack */
1334 /* (no need to map the new skb) */
0c6671b0
EG
1335#ifdef BCM_VLAN
1336 int is_vlan_cqe =
1337 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338 PARSING_FLAGS_VLAN);
1339 int is_not_hwaccel_vlan_cqe =
1340 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1341#endif
7a9b2557
VZ
1342
1343 prefetch(skb);
1344 prefetch(((char *)(skb)) + 128);
1345
7a9b2557
VZ
1346#ifdef BNX2X_STOP_ON_ERROR
1347 if (pad + len > bp->rx_buf_size) {
1348 BNX2X_ERR("skb_put is about to fail... "
1349 "pad %d len %d rx_buf_size %d\n",
1350 pad, len, bp->rx_buf_size);
1351 bnx2x_panic();
1352 return;
1353 }
1354#endif
1355
1356 skb_reserve(skb, pad);
1357 skb_put(skb, len);
1358
1359 skb->protocol = eth_type_trans(skb, bp->dev);
1360 skb->ip_summed = CHECKSUM_UNNECESSARY;
1361
1362 {
1363 struct iphdr *iph;
1364
1365 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1366#ifdef BCM_VLAN
1367 /* If there is no Rx VLAN offloading -
1368 take VLAN tag into an account */
1369 if (unlikely(is_not_hwaccel_vlan_cqe))
1370 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1371#endif
7a9b2557
VZ
1372 iph->check = 0;
1373 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1374 }
1375
1376 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377 &cqe->fast_path_cqe, cqe_idx)) {
1378#ifdef BCM_VLAN
0c6671b0
EG
1379 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1381 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382 le16_to_cpu(cqe->fast_path_cqe.
1383 vlan_tag));
1384 else
1385#endif
1386 netif_receive_skb(skb);
1387 } else {
1388 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389 " - dropping packet!\n");
1390 dev_kfree_skb(skb);
1391 }
1392
7a9b2557
VZ
1393
1394 /* put new skb in bin */
1395 fp->tpa_pool[queue].skb = new_skb;
1396
1397 } else {
66e855f3 1398 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1399 DP(NETIF_MSG_RX_STATUS,
1400 "Failed to allocate new skb - dropping packet!\n");
de832a55 1401 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1402 }
1403
1404 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1405}
1406
1407static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408 struct bnx2x_fastpath *fp,
1409 u16 bd_prod, u16 rx_comp_prod,
1410 u16 rx_sge_prod)
1411{
8d9c5f34 1412 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1413 int i;
1414
1415 /* Update producers */
1416 rx_prods.bd_prod = bd_prod;
1417 rx_prods.cqe_prod = rx_comp_prod;
1418 rx_prods.sge_prod = rx_sge_prod;
1419
58f4c4cf
EG
1420 /*
1421 * Make sure that the BD and SGE data is updated before updating the
1422 * producers since FW might read the BD/SGE right after the producer
1423 * is updated.
1424 * This is only applicable for weak-ordered memory model archs such
1425 * as IA-64. The following barrier is also mandatory since FW will
1426 * assumes BDs must have buffers.
1427 */
1428 wmb();
1429
8d9c5f34
EG
1430 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1432 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1433 ((u32 *)&rx_prods)[i]);
1434
58f4c4cf
EG
1435 mmiowb(); /* keep prod updates ordered */
1436
7a9b2557 1437 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1438 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1439 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1440}
1441
a2fbb9ea
ET
1442static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1443{
1444 struct bnx2x *bp = fp->bp;
34f80b04 1445 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1446 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1447 int rx_pkt = 0;
1448
1449#ifdef BNX2X_STOP_ON_ERROR
1450 if (unlikely(bp->panic))
1451 return 0;
1452#endif
1453
34f80b04
EG
1454 /* CQ "next element" is of the size of the regular element,
1455 that's why it's ok here */
a2fbb9ea
ET
1456 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1458 hw_comp_cons++;
1459
1460 bd_cons = fp->rx_bd_cons;
1461 bd_prod = fp->rx_bd_prod;
34f80b04 1462 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1463 sw_comp_cons = fp->rx_comp_cons;
1464 sw_comp_prod = fp->rx_comp_prod;
1465
1466 /* Memory barrier necessary as speculative reads of the rx
1467 * buffer can be ahead of the index in the status block
1468 */
1469 rmb();
1470
1471 DP(NETIF_MSG_RX_STATUS,
1472 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1473 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1474
1475 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1476 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1477 struct sk_buff *skb;
1478 union eth_rx_cqe *cqe;
34f80b04
EG
1479 u8 cqe_fp_flags;
1480 u16 len, pad;
a2fbb9ea
ET
1481
1482 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483 bd_prod = RX_BD(bd_prod);
1484 bd_cons = RX_BD(bd_cons);
1485
1486 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1487 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1488
a2fbb9ea 1489 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1490 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1491 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1492 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1493 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1495
1496 /* is this a slowpath msg? */
34f80b04 1497 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1498 bnx2x_sp_event(fp, cqe);
1499 goto next_cqe;
1500
1501 /* this is an rx packet */
1502 } else {
1503 rx_buf = &fp->rx_buf_ring[bd_cons];
1504 skb = rx_buf->skb;
a2fbb9ea
ET
1505 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506 pad = cqe->fast_path_cqe.placement_offset;
1507
7a9b2557
VZ
1508 /* If CQE is marked both TPA_START and TPA_END
1509 it is a non-TPA CQE */
1510 if ((!fp->disable_tpa) &&
1511 (TPA_TYPE(cqe_fp_flags) !=
1512 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1513 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1514
1515 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516 DP(NETIF_MSG_RX_STATUS,
1517 "calling tpa_start on queue %d\n",
1518 queue);
1519
1520 bnx2x_tpa_start(fp, queue, skb,
1521 bd_cons, bd_prod);
1522 goto next_rx;
1523 }
1524
1525 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526 DP(NETIF_MSG_RX_STATUS,
1527 "calling tpa_stop on queue %d\n",
1528 queue);
1529
1530 if (!BNX2X_RX_SUM_FIX(cqe))
1531 BNX2X_ERR("STOP on none TCP "
1532 "data\n");
1533
1534 /* This is a size of the linear data
1535 on this skb */
1536 len = le16_to_cpu(cqe->fast_path_cqe.
1537 len_on_bd);
1538 bnx2x_tpa_stop(bp, fp, queue, pad,
1539 len, cqe, comp_ring_cons);
1540#ifdef BNX2X_STOP_ON_ERROR
1541 if (bp->panic)
1542 return -EINVAL;
1543#endif
1544
1545 bnx2x_update_sge_prod(fp,
1546 &cqe->fast_path_cqe);
1547 goto next_cqe;
1548 }
1549 }
1550
a2fbb9ea
ET
1551 pci_dma_sync_single_for_device(bp->pdev,
1552 pci_unmap_addr(rx_buf, mapping),
1553 pad + RX_COPY_THRESH,
1554 PCI_DMA_FROMDEVICE);
1555 prefetch(skb);
1556 prefetch(((char *)(skb)) + 128);
1557
1558 /* is this an error packet? */
34f80b04 1559 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1560 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1561 "ERROR flags %x rx packet %u\n",
1562 cqe_fp_flags, sw_comp_cons);
de832a55 1563 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1564 goto reuse_rx;
1565 }
1566
1567 /* Since we don't have a jumbo ring
1568 * copy small packets if mtu > 1500
1569 */
1570 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571 (len <= RX_COPY_THRESH)) {
1572 struct sk_buff *new_skb;
1573
1574 new_skb = netdev_alloc_skb(bp->dev,
1575 len + pad);
1576 if (new_skb == NULL) {
1577 DP(NETIF_MSG_RX_ERR,
34f80b04 1578 "ERROR packet dropped "
a2fbb9ea 1579 "because of alloc failure\n");
de832a55 1580 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1581 goto reuse_rx;
1582 }
1583
1584 /* aligned copy */
1585 skb_copy_from_linear_data_offset(skb, pad,
1586 new_skb->data + pad, len);
1587 skb_reserve(new_skb, pad);
1588 skb_put(new_skb, len);
1589
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591
1592 skb = new_skb;
1593
1594 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595 pci_unmap_single(bp->pdev,
1596 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1597 bp->rx_buf_size,
a2fbb9ea
ET
1598 PCI_DMA_FROMDEVICE);
1599 skb_reserve(skb, pad);
1600 skb_put(skb, len);
1601
1602 } else {
1603 DP(NETIF_MSG_RX_ERR,
34f80b04 1604 "ERROR packet dropped because "
a2fbb9ea 1605 "of alloc failure\n");
de832a55 1606 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1607reuse_rx:
1608 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609 goto next_rx;
1610 }
1611
1612 skb->protocol = eth_type_trans(skb, bp->dev);
1613
1614 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1615 if (bp->rx_csum) {
1adcd8be
EG
1616 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1618 else
de832a55 1619 fp->eth_q_stats.hw_csum_err++;
66e855f3 1620 }
a2fbb9ea
ET
1621 }
1622
748e5439 1623 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1624#ifdef BCM_VLAN
0c6671b0 1625 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1626 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1628 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1630 else
1631#endif
34f80b04 1632 netif_receive_skb(skb);
a2fbb9ea 1633
a2fbb9ea
ET
1634
1635next_rx:
1636 rx_buf->skb = NULL;
1637
1638 bd_cons = NEXT_RX_IDX(bd_cons);
1639 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1640 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1641 rx_pkt++;
a2fbb9ea
ET
1642next_cqe:
1643 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1645
34f80b04 1646 if (rx_pkt == budget)
a2fbb9ea
ET
1647 break;
1648 } /* while */
1649
1650 fp->rx_bd_cons = bd_cons;
34f80b04 1651 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1652 fp->rx_comp_cons = sw_comp_cons;
1653 fp->rx_comp_prod = sw_comp_prod;
1654
7a9b2557
VZ
1655 /* Update producers */
1656 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1657 fp->rx_sge_prod);
a2fbb9ea
ET
1658
1659 fp->rx_pkt += rx_pkt;
1660 fp->rx_calls++;
1661
1662 return rx_pkt;
1663}
1664
1665static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1666{
1667 struct bnx2x_fastpath *fp = fp_cookie;
1668 struct bnx2x *bp = fp->bp;
0626b899 1669 int index = fp->index;
a2fbb9ea 1670
da5a662a
VZ
1671 /* Return here if interrupt is disabled */
1672 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1674 return IRQ_HANDLED;
1675 }
1676
34f80b04 1677 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1678 index, fp->sb_id);
1679 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1680
1681#ifdef BNX2X_STOP_ON_ERROR
1682 if (unlikely(bp->panic))
1683 return IRQ_HANDLED;
1684#endif
1685
1686 prefetch(fp->rx_cons_sb);
1687 prefetch(fp->tx_cons_sb);
1688 prefetch(&fp->status_blk->c_status_block.status_block_index);
1689 prefetch(&fp->status_blk->u_status_block.status_block_index);
1690
288379f0 1691 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1692
a2fbb9ea
ET
1693 return IRQ_HANDLED;
1694}
1695
1696static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1697{
555f6c78 1698 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1699 u16 status = bnx2x_ack_int(bp);
34f80b04 1700 u16 mask;
a2fbb9ea 1701
34f80b04 1702 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1703 if (unlikely(status == 0)) {
1704 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1705 return IRQ_NONE;
1706 }
34f80b04 1707 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1708
34f80b04 1709 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712 return IRQ_HANDLED;
1713 }
1714
3196a88a
EG
1715#ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp->panic))
1717 return IRQ_HANDLED;
1718#endif
1719
34f80b04
EG
1720 mask = 0x2 << bp->fp[0].sb_id;
1721 if (status & mask) {
a2fbb9ea
ET
1722 struct bnx2x_fastpath *fp = &bp->fp[0];
1723
1724 prefetch(fp->rx_cons_sb);
1725 prefetch(fp->tx_cons_sb);
1726 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727 prefetch(&fp->status_blk->u_status_block.status_block_index);
1728
288379f0 1729 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1730
34f80b04 1731 status &= ~mask;
a2fbb9ea
ET
1732 }
1733
a2fbb9ea 1734
34f80b04 1735 if (unlikely(status & 0x1)) {
1cf167f2 1736 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1737
1738 status &= ~0x1;
1739 if (!status)
1740 return IRQ_HANDLED;
1741 }
1742
34f80b04
EG
1743 if (status)
1744 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1745 status);
a2fbb9ea 1746
c18487ee 1747 return IRQ_HANDLED;
a2fbb9ea
ET
1748}
1749
c18487ee 1750/* end of fast path */
a2fbb9ea 1751
bb2a0f7a 1752static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1753
c18487ee
YR
1754/* Link */
1755
1756/*
1757 * General service functions
1758 */
a2fbb9ea 1759
4a37fb66 1760static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1761{
1762 u32 lock_status;
1763 u32 resource_bit = (1 << resource);
4a37fb66
YG
1764 int func = BP_FUNC(bp);
1765 u32 hw_lock_control_reg;
c18487ee 1766 int cnt;
a2fbb9ea 1767
c18487ee
YR
1768 /* Validating that the resource is within range */
1769 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1770 DP(NETIF_MSG_HW,
1771 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1773 return -EINVAL;
1774 }
a2fbb9ea 1775
4a37fb66
YG
1776 if (func <= 5) {
1777 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1778 } else {
1779 hw_lock_control_reg =
1780 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1781 }
1782
c18487ee 1783 /* Validating that the resource is not already taken */
4a37fb66 1784 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1785 if (lock_status & resource_bit) {
1786 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1787 lock_status, resource_bit);
1788 return -EEXIST;
1789 }
a2fbb9ea 1790
46230476
EG
1791 /* Try for 5 second every 5ms */
1792 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1793 /* Try to acquire the lock */
4a37fb66
YG
1794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1796 if (lock_status & resource_bit)
1797 return 0;
a2fbb9ea 1798
c18487ee 1799 msleep(5);
a2fbb9ea 1800 }
c18487ee
YR
1801 DP(NETIF_MSG_HW, "Timeout\n");
1802 return -EAGAIN;
1803}
a2fbb9ea 1804
4a37fb66 1805static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1806{
1807 u32 lock_status;
1808 u32 resource_bit = (1 << resource);
4a37fb66
YG
1809 int func = BP_FUNC(bp);
1810 u32 hw_lock_control_reg;
a2fbb9ea 1811
c18487ee
YR
1812 /* Validating that the resource is within range */
1813 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1814 DP(NETIF_MSG_HW,
1815 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1817 return -EINVAL;
1818 }
1819
4a37fb66
YG
1820 if (func <= 5) {
1821 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1822 } else {
1823 hw_lock_control_reg =
1824 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1825 }
1826
c18487ee 1827 /* Validating that the resource is currently taken */
4a37fb66 1828 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1829 if (!(lock_status & resource_bit)) {
1830 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1831 lock_status, resource_bit);
1832 return -EFAULT;
a2fbb9ea
ET
1833 }
1834
4a37fb66 1835 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1836 return 0;
1837}
1838
1839/* HW Lock for shared dual port PHYs */
4a37fb66 1840static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1841{
34f80b04 1842 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1843
46c6a674
EG
1844 if (bp->port.need_hw_lock)
1845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1846}
a2fbb9ea 1847
4a37fb66 1848static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1849{
46c6a674
EG
1850 if (bp->port.need_hw_lock)
1851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1852
34f80b04 1853 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1854}
a2fbb9ea 1855
4acac6a5
EG
1856int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1857{
1858 /* The GPIO should be swapped if swap register is set and active */
1859 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861 int gpio_shift = gpio_num +
1862 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863 u32 gpio_mask = (1 << gpio_shift);
1864 u32 gpio_reg;
1865 int value;
1866
1867 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1869 return -EINVAL;
1870 }
1871
1872 /* read GPIO value */
1873 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1874
1875 /* get the requested pin value */
1876 if ((gpio_reg & gpio_mask) == gpio_mask)
1877 value = 1;
1878 else
1879 value = 0;
1880
1881 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1882
1883 return value;
1884}
1885
17de50b7 1886int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1887{
1888 /* The GPIO should be swapped if swap register is set and active */
1889 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1890 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1891 int gpio_shift = gpio_num +
1892 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893 u32 gpio_mask = (1 << gpio_shift);
1894 u32 gpio_reg;
a2fbb9ea 1895
c18487ee
YR
1896 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1898 return -EINVAL;
1899 }
a2fbb9ea 1900
4a37fb66 1901 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1902 /* read GPIO and mask except the float bits */
1903 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1904
c18487ee
YR
1905 switch (mode) {
1906 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908 gpio_num, gpio_shift);
1909 /* clear FLOAT and set CLR */
1910 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1912 break;
a2fbb9ea 1913
c18487ee
YR
1914 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916 gpio_num, gpio_shift);
1917 /* clear FLOAT and set SET */
1918 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1920 break;
a2fbb9ea 1921
17de50b7 1922 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1923 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924 gpio_num, gpio_shift);
1925 /* set FLOAT */
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1927 break;
a2fbb9ea 1928
c18487ee
YR
1929 default:
1930 break;
a2fbb9ea
ET
1931 }
1932
c18487ee 1933 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1935
c18487ee 1936 return 0;
a2fbb9ea
ET
1937}
1938
4acac6a5
EG
1939int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1940{
1941 /* The GPIO should be swapped if swap register is set and active */
1942 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944 int gpio_shift = gpio_num +
1945 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946 u32 gpio_mask = (1 << gpio_shift);
1947 u32 gpio_reg;
1948
1949 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951 return -EINVAL;
1952 }
1953
1954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1955 /* read GPIO int */
1956 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1957
1958 switch (mode) {
1959 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961 "output low\n", gpio_num, gpio_shift);
1962 /* clear SET and set CLR */
1963 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1965 break;
1966
1967 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969 "output high\n", gpio_num, gpio_shift);
1970 /* clear CLR and set SET */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1973 break;
1974
1975 default:
1976 break;
1977 }
1978
1979 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981
1982 return 0;
1983}
1984
c18487ee 1985static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1986{
c18487ee
YR
1987 u32 spio_mask = (1 << spio_num);
1988 u32 spio_reg;
a2fbb9ea 1989
c18487ee
YR
1990 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991 (spio_num > MISC_REGISTERS_SPIO_7)) {
1992 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1993 return -EINVAL;
a2fbb9ea
ET
1994 }
1995
4a37fb66 1996 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1997 /* read SPIO and mask except the float bits */
1998 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1999
c18487ee 2000 switch (mode) {
6378c025 2001 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2002 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003 /* clear FLOAT and set CLR */
2004 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2006 break;
a2fbb9ea 2007
6378c025 2008 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010 /* clear FLOAT and set SET */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2013 break;
a2fbb9ea 2014
c18487ee
YR
2015 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2017 /* set FLOAT */
2018 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019 break;
a2fbb9ea 2020
c18487ee
YR
2021 default:
2022 break;
a2fbb9ea
ET
2023 }
2024
c18487ee 2025 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2026 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2027
a2fbb9ea
ET
2028 return 0;
2029}
2030
c18487ee 2031static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2032{
ad33ea3a
EG
2033 switch (bp->link_vars.ieee_fc &
2034 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2035 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2036 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2037 ADVERTISED_Pause);
2038 break;
2039 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2040 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2041 ADVERTISED_Pause);
2042 break;
2043 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2044 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
2045 break;
2046 default:
34f80b04 2047 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2048 ADVERTISED_Pause);
2049 break;
2050 }
2051}
f1410647 2052
c18487ee
YR
2053static void bnx2x_link_report(struct bnx2x *bp)
2054{
2055 if (bp->link_vars.link_up) {
2056 if (bp->state == BNX2X_STATE_OPEN)
2057 netif_carrier_on(bp->dev);
2058 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2059
c18487ee 2060 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2061
c18487ee
YR
2062 if (bp->link_vars.duplex == DUPLEX_FULL)
2063 printk("full duplex");
2064 else
2065 printk("half duplex");
f1410647 2066
c0700f90
DM
2067 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2068 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2069 printk(", receive ");
c0700f90 2070 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2071 printk("& transmit ");
2072 } else {
2073 printk(", transmit ");
2074 }
2075 printk("flow control ON");
2076 }
2077 printk("\n");
f1410647 2078
c18487ee
YR
2079 } else { /* link_down */
2080 netif_carrier_off(bp->dev);
2081 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2082 }
c18487ee
YR
2083}
2084
b5bf9068 2085static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2086{
19680c48
EG
2087 if (!BP_NOMCP(bp)) {
2088 u8 rc;
a2fbb9ea 2089
19680c48 2090 /* Initialize link parameters structure variables */
8c99e7b0
YR
2091 /* It is recommended to turn off RX FC for jumbo frames
2092 for better performance */
2093 if (IS_E1HMF(bp))
c0700f90 2094 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2095 else if (bp->dev->mtu > 5000)
c0700f90 2096 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2097 else
c0700f90 2098 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2099
4a37fb66 2100 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2101
2102 if (load_mode == LOAD_DIAG)
2103 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2104
19680c48 2105 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2106
4a37fb66 2107 bnx2x_release_phy_lock(bp);
a2fbb9ea 2108
3c96c68b
EG
2109 bnx2x_calc_fc_adv(bp);
2110
b5bf9068
EG
2111 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2112 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2113 bnx2x_link_report(bp);
b5bf9068 2114 }
34f80b04 2115
19680c48
EG
2116 return rc;
2117 }
2118 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2119 return -EINVAL;
a2fbb9ea
ET
2120}
2121
c18487ee 2122static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2123{
19680c48 2124 if (!BP_NOMCP(bp)) {
4a37fb66 2125 bnx2x_acquire_phy_lock(bp);
19680c48 2126 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2127 bnx2x_release_phy_lock(bp);
a2fbb9ea 2128
19680c48
EG
2129 bnx2x_calc_fc_adv(bp);
2130 } else
2131 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2132}
a2fbb9ea 2133
c18487ee
YR
2134static void bnx2x__link_reset(struct bnx2x *bp)
2135{
19680c48 2136 if (!BP_NOMCP(bp)) {
4a37fb66 2137 bnx2x_acquire_phy_lock(bp);
589abe3a 2138 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2139 bnx2x_release_phy_lock(bp);
19680c48
EG
2140 } else
2141 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2142}
a2fbb9ea 2143
c18487ee
YR
2144static u8 bnx2x_link_test(struct bnx2x *bp)
2145{
2146 u8 rc;
a2fbb9ea 2147
4a37fb66 2148 bnx2x_acquire_phy_lock(bp);
c18487ee 2149 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2150 bnx2x_release_phy_lock(bp);
a2fbb9ea 2151
c18487ee
YR
2152 return rc;
2153}
a2fbb9ea 2154
8a1c38d1 2155static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2156{
8a1c38d1
EG
2157 u32 r_param = bp->link_vars.line_speed / 8;
2158 u32 fair_periodic_timeout_usec;
2159 u32 t_fair;
34f80b04 2160
8a1c38d1
EG
2161 memset(&(bp->cmng.rs_vars), 0,
2162 sizeof(struct rate_shaping_vars_per_port));
2163 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2164
8a1c38d1
EG
2165 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2166 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2167
8a1c38d1
EG
2168 /* this is the threshold below which no timer arming will occur
2169 1.25 coefficient is for the threshold to be a little bigger
2170 than the real time, to compensate for timer in-accuracy */
2171 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2172 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2173
8a1c38d1
EG
2174 /* resolution of fairness timer */
2175 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2176 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2177 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2178
8a1c38d1
EG
2179 /* this is the threshold below which we won't arm the timer anymore */
2180 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2181
8a1c38d1
EG
2182 /* we multiply by 1e3/8 to get bytes/msec.
2183 We don't want the credits to pass a credit
2184 of the t_fair*FAIR_MEM (algorithm resolution) */
2185 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2186 /* since each tick is 4 usec */
2187 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2188}
2189
8a1c38d1 2190static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2191{
2192 struct rate_shaping_vars_per_vn m_rs_vn;
2193 struct fairness_vars_per_vn m_fair_vn;
2194 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2195 u16 vn_min_rate, vn_max_rate;
2196 int i;
2197
2198 /* If function is hidden - set min and max to zeroes */
2199 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2200 vn_min_rate = 0;
2201 vn_max_rate = 0;
2202
2203 } else {
2204 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2205 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2206 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2207 if current min rate is zero - set it to 1.
33471629 2208 This is a requirement of the algorithm. */
8a1c38d1 2209 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2210 vn_min_rate = DEF_MIN_RATE;
2211 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2212 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2213 }
2214
8a1c38d1
EG
2215 DP(NETIF_MSG_IFUP,
2216 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2217 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2218
2219 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2220 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2221
2222 /* global vn counter - maximal Mbps for this vn */
2223 m_rs_vn.vn_counter.rate = vn_max_rate;
2224
2225 /* quota - number of bytes transmitted in this period */
2226 m_rs_vn.vn_counter.quota =
2227 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2228
8a1c38d1 2229 if (bp->vn_weight_sum) {
34f80b04
EG
2230 /* credit for each period of the fairness algorithm:
2231 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2232 vn_weight_sum should not be larger than 10000, thus
2233 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2234 than zero */
34f80b04 2235 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2236 max((u32)(vn_min_rate * (T_FAIR_COEF /
2237 (8 * bp->vn_weight_sum))),
2238 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2239 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2240 m_fair_vn.vn_credit_delta);
2241 }
2242
34f80b04
EG
2243 /* Store it to internal memory */
2244 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2245 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2247 ((u32 *)(&m_rs_vn))[i]);
2248
2249 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2250 REG_WR(bp, BAR_XSTRORM_INTMEM +
2251 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2252 ((u32 *)(&m_fair_vn))[i]);
2253}
2254
8a1c38d1 2255
c18487ee
YR
2256/* This function is called upon link interrupt */
2257static void bnx2x_link_attn(struct bnx2x *bp)
2258{
bb2a0f7a
YG
2259 /* Make sure that we are synced with the current statistics */
2260 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2261
c18487ee 2262 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2263
bb2a0f7a
YG
2264 if (bp->link_vars.link_up) {
2265
1c06328c
EG
2266 /* dropless flow control */
2267 if (CHIP_IS_E1H(bp)) {
2268 int port = BP_PORT(bp);
2269 u32 pause_enabled = 0;
2270
2271 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2272 pause_enabled = 1;
2273
2274 REG_WR(bp, BAR_USTRORM_INTMEM +
2275 USTORM_PAUSE_ENABLED_OFFSET(port),
2276 pause_enabled);
2277 }
2278
bb2a0f7a
YG
2279 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2280 struct host_port_stats *pstats;
2281
2282 pstats = bnx2x_sp(bp, port_stats);
2283 /* reset old bmac stats */
2284 memset(&(pstats->mac_stx[0]), 0,
2285 sizeof(struct mac_stx));
2286 }
2287 if ((bp->state == BNX2X_STATE_OPEN) ||
2288 (bp->state == BNX2X_STATE_DISABLED))
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290 }
2291
c18487ee
YR
2292 /* indicate link status */
2293 bnx2x_link_report(bp);
34f80b04
EG
2294
2295 if (IS_E1HMF(bp)) {
8a1c38d1 2296 int port = BP_PORT(bp);
34f80b04 2297 int func;
8a1c38d1 2298 int vn;
34f80b04
EG
2299
2300 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2301 if (vn == BP_E1HVN(bp))
2302 continue;
2303
8a1c38d1 2304 func = ((vn << 1) | port);
34f80b04
EG
2305
2306 /* Set the attention towards other drivers
2307 on the same port */
2308 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2309 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2310 }
34f80b04 2311
8a1c38d1
EG
2312 if (bp->link_vars.link_up) {
2313 int i;
2314
2315 /* Init rate shaping and fairness contexts */
2316 bnx2x_init_port_minmax(bp);
34f80b04 2317
34f80b04 2318 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2319 bnx2x_init_vn_minmax(bp, 2*vn + port);
2320
2321 /* Store it to internal memory */
2322 for (i = 0;
2323 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2324 REG_WR(bp, BAR_XSTRORM_INTMEM +
2325 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2326 ((u32 *)(&bp->cmng))[i]);
2327 }
34f80b04 2328 }
c18487ee 2329}
a2fbb9ea 2330
c18487ee
YR
2331static void bnx2x__link_status_update(struct bnx2x *bp)
2332{
2333 if (bp->state != BNX2X_STATE_OPEN)
2334 return;
a2fbb9ea 2335
c18487ee 2336 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2337
bb2a0f7a
YG
2338 if (bp->link_vars.link_up)
2339 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2340 else
2341 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2342
c18487ee
YR
2343 /* indicate link status */
2344 bnx2x_link_report(bp);
a2fbb9ea 2345}
a2fbb9ea 2346
34f80b04
EG
2347static void bnx2x_pmf_update(struct bnx2x *bp)
2348{
2349 int port = BP_PORT(bp);
2350 u32 val;
2351
2352 bp->port.pmf = 1;
2353 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2354
2355 /* enable nig attention */
2356 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2357 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2358 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2359
2360 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2361}
2362
c18487ee 2363/* end of Link */
a2fbb9ea
ET
2364
2365/* slow path */
2366
2367/*
2368 * General service functions
2369 */
2370
2371/* the slow path queue is odd since completions arrive on the fastpath ring */
2372static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2373 u32 data_hi, u32 data_lo, int common)
2374{
34f80b04 2375 int func = BP_FUNC(bp);
a2fbb9ea 2376
34f80b04
EG
2377 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2378 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2379 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2380 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2381 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2382
2383#ifdef BNX2X_STOP_ON_ERROR
2384 if (unlikely(bp->panic))
2385 return -EIO;
2386#endif
2387
34f80b04 2388 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2389
2390 if (!bp->spq_left) {
2391 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2392 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2393 bnx2x_panic();
2394 return -EBUSY;
2395 }
f1410647 2396
a2fbb9ea
ET
2397 /* CID needs port number to be encoded int it */
2398 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2399 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2400 HW_CID(bp, cid)));
2401 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2402 if (common)
2403 bp->spq_prod_bd->hdr.type |=
2404 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2405
2406 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2407 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2408
2409 bp->spq_left--;
2410
2411 if (bp->spq_prod_bd == bp->spq_last_bd) {
2412 bp->spq_prod_bd = bp->spq;
2413 bp->spq_prod_idx = 0;
2414 DP(NETIF_MSG_TIMER, "end of spq\n");
2415
2416 } else {
2417 bp->spq_prod_bd++;
2418 bp->spq_prod_idx++;
2419 }
2420
34f80b04 2421 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2422 bp->spq_prod_idx);
2423
34f80b04 2424 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2425 return 0;
2426}
2427
2428/* acquire split MCP access lock register */
4a37fb66 2429static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2430{
a2fbb9ea 2431 u32 i, j, val;
34f80b04 2432 int rc = 0;
a2fbb9ea
ET
2433
2434 might_sleep();
2435 i = 100;
2436 for (j = 0; j < i*10; j++) {
2437 val = (1UL << 31);
2438 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2439 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2440 if (val & (1L << 31))
2441 break;
2442
2443 msleep(5);
2444 }
a2fbb9ea 2445 if (!(val & (1L << 31))) {
19680c48 2446 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2447 rc = -EBUSY;
2448 }
2449
2450 return rc;
2451}
2452
4a37fb66
YG
2453/* release split MCP access lock register */
2454static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2455{
2456 u32 val = 0;
2457
2458 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2459}
2460
2461static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2462{
2463 struct host_def_status_block *def_sb = bp->def_status_blk;
2464 u16 rc = 0;
2465
2466 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2467 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2468 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2469 rc |= 1;
2470 }
2471 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2472 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2473 rc |= 2;
2474 }
2475 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2476 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2477 rc |= 4;
2478 }
2479 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2480 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2481 rc |= 8;
2482 }
2483 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2484 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2485 rc |= 16;
2486 }
2487 return rc;
2488}
2489
2490/*
2491 * slow path service functions
2492 */
2493
2494static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2495{
34f80b04 2496 int port = BP_PORT(bp);
5c862848
EG
2497 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2498 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2499 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2500 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2501 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2502 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2503 u32 aeu_mask;
87942b46 2504 u32 nig_mask = 0;
a2fbb9ea 2505
a2fbb9ea
ET
2506 if (bp->attn_state & asserted)
2507 BNX2X_ERR("IGU ERROR\n");
2508
3fcaf2e5
EG
2509 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2510 aeu_mask = REG_RD(bp, aeu_addr);
2511
a2fbb9ea 2512 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2513 aeu_mask, asserted);
2514 aeu_mask &= ~(asserted & 0xff);
2515 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2516
3fcaf2e5
EG
2517 REG_WR(bp, aeu_addr, aeu_mask);
2518 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2519
3fcaf2e5 2520 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2521 bp->attn_state |= asserted;
3fcaf2e5 2522 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2523
2524 if (asserted & ATTN_HARD_WIRED_MASK) {
2525 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2526
a5e9a7cf
EG
2527 bnx2x_acquire_phy_lock(bp);
2528
877e9aa4 2529 /* save nig interrupt mask */
87942b46 2530 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2531 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2532
c18487ee 2533 bnx2x_link_attn(bp);
a2fbb9ea
ET
2534
2535 /* handle unicore attn? */
2536 }
2537 if (asserted & ATTN_SW_TIMER_4_FUNC)
2538 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2539
2540 if (asserted & GPIO_2_FUNC)
2541 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2542
2543 if (asserted & GPIO_3_FUNC)
2544 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2545
2546 if (asserted & GPIO_4_FUNC)
2547 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2548
2549 if (port == 0) {
2550 if (asserted & ATTN_GENERAL_ATTN_1) {
2551 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2552 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2553 }
2554 if (asserted & ATTN_GENERAL_ATTN_2) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2557 }
2558 if (asserted & ATTN_GENERAL_ATTN_3) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2561 }
2562 } else {
2563 if (asserted & ATTN_GENERAL_ATTN_4) {
2564 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2565 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2566 }
2567 if (asserted & ATTN_GENERAL_ATTN_5) {
2568 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2569 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2570 }
2571 if (asserted & ATTN_GENERAL_ATTN_6) {
2572 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2574 }
2575 }
2576
2577 } /* if hardwired */
2578
5c862848
EG
2579 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2580 asserted, hc_addr);
2581 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2582
2583 /* now set back the mask */
a5e9a7cf 2584 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2585 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2586 bnx2x_release_phy_lock(bp);
2587 }
a2fbb9ea
ET
2588}
2589
877e9aa4 2590static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2591{
34f80b04 2592 int port = BP_PORT(bp);
877e9aa4
ET
2593 int reg_offset;
2594 u32 val;
2595
34f80b04
EG
2596 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2597 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2598
34f80b04 2599 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2600
2601 val = REG_RD(bp, reg_offset);
2602 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2603 REG_WR(bp, reg_offset, val);
2604
2605 BNX2X_ERR("SPIO5 hw attention\n");
2606
35b19ba5
EG
2607 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2608 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2609 /* Fan failure attention */
2610
17de50b7 2611 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2612 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2613 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2614 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2615 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2616 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2617 /* mark the failure */
c18487ee 2618 bp->link_params.ext_phy_config &=
877e9aa4 2619 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2620 bp->link_params.ext_phy_config |=
877e9aa4
ET
2621 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2622 SHMEM_WR(bp,
2623 dev_info.port_hw_config[port].
2624 external_phy_config,
c18487ee 2625 bp->link_params.ext_phy_config);
877e9aa4
ET
2626 /* log the failure */
2627 printk(KERN_ERR PFX "Fan Failure on Network"
2628 " Controller %s has caused the driver to"
2629 " shutdown the card to prevent permanent"
2630 " damage. Please contact Dell Support for"
2631 " assistance\n", bp->dev->name);
2632 break;
2633
2634 default:
2635 break;
2636 }
2637 }
34f80b04 2638
589abe3a
EG
2639 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2640 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2641 bnx2x_acquire_phy_lock(bp);
2642 bnx2x_handle_module_detect_int(&bp->link_params);
2643 bnx2x_release_phy_lock(bp);
2644 }
2645
34f80b04
EG
2646 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2647
2648 val = REG_RD(bp, reg_offset);
2649 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2650 REG_WR(bp, reg_offset, val);
2651
2652 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2653 (attn & HW_INTERRUT_ASSERT_SET_0));
2654 bnx2x_panic();
2655 }
877e9aa4
ET
2656}
2657
2658static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2659{
2660 u32 val;
2661
0626b899 2662 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2663
2664 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2665 BNX2X_ERR("DB hw attention 0x%x\n", val);
2666 /* DORQ discard attention */
2667 if (val & 0x2)
2668 BNX2X_ERR("FATAL error from DORQ\n");
2669 }
34f80b04
EG
2670
2671 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2672
2673 int port = BP_PORT(bp);
2674 int reg_offset;
2675
2676 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2677 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2678
2679 val = REG_RD(bp, reg_offset);
2680 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2681 REG_WR(bp, reg_offset, val);
2682
2683 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2684 (attn & HW_INTERRUT_ASSERT_SET_1));
2685 bnx2x_panic();
2686 }
877e9aa4
ET
2687}
2688
2689static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2690{
2691 u32 val;
2692
2693 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2694
2695 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2696 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2697 /* CFC error attention */
2698 if (val & 0x2)
2699 BNX2X_ERR("FATAL error from CFC\n");
2700 }
2701
2702 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2703
2704 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2705 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2706 /* RQ_USDMDP_FIFO_OVERFLOW */
2707 if (val & 0x18000)
2708 BNX2X_ERR("FATAL error from PXP\n");
2709 }
34f80b04
EG
2710
2711 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2712
2713 int port = BP_PORT(bp);
2714 int reg_offset;
2715
2716 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2717 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2718
2719 val = REG_RD(bp, reg_offset);
2720 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2721 REG_WR(bp, reg_offset, val);
2722
2723 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2724 (attn & HW_INTERRUT_ASSERT_SET_2));
2725 bnx2x_panic();
2726 }
877e9aa4
ET
2727}
2728
2729static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2730{
34f80b04
EG
2731 u32 val;
2732
877e9aa4
ET
2733 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2734
34f80b04
EG
2735 if (attn & BNX2X_PMF_LINK_ASSERT) {
2736 int func = BP_FUNC(bp);
2737
2738 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2739 bnx2x__link_status_update(bp);
2740 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2741 DRV_STATUS_PMF)
2742 bnx2x_pmf_update(bp);
2743
2744 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2745
2746 BNX2X_ERR("MC assert!\n");
2747 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2748 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2749 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2751 bnx2x_panic();
2752
2753 } else if (attn & BNX2X_MCP_ASSERT) {
2754
2755 BNX2X_ERR("MCP assert!\n");
2756 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2757 bnx2x_fw_dump(bp);
877e9aa4
ET
2758
2759 } else
2760 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2761 }
2762
2763 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2764 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2765 if (attn & BNX2X_GRC_TIMEOUT) {
2766 val = CHIP_IS_E1H(bp) ?
2767 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2768 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2769 }
2770 if (attn & BNX2X_GRC_RSV) {
2771 val = CHIP_IS_E1H(bp) ?
2772 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2773 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2774 }
877e9aa4 2775 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2776 }
2777}
2778
2779static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2780{
a2fbb9ea
ET
2781 struct attn_route attn;
2782 struct attn_route group_mask;
34f80b04 2783 int port = BP_PORT(bp);
877e9aa4 2784 int index;
a2fbb9ea
ET
2785 u32 reg_addr;
2786 u32 val;
3fcaf2e5 2787 u32 aeu_mask;
a2fbb9ea
ET
2788
2789 /* need to take HW lock because MCP or other port might also
2790 try to handle this event */
4a37fb66 2791 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2792
2793 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2794 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2795 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2796 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2797 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2798 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2799
2800 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2801 if (deasserted & (1 << index)) {
2802 group_mask = bp->attn_group[index];
2803
34f80b04
EG
2804 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2805 index, group_mask.sig[0], group_mask.sig[1],
2806 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2807
877e9aa4
ET
2808 bnx2x_attn_int_deasserted3(bp,
2809 attn.sig[3] & group_mask.sig[3]);
2810 bnx2x_attn_int_deasserted1(bp,
2811 attn.sig[1] & group_mask.sig[1]);
2812 bnx2x_attn_int_deasserted2(bp,
2813 attn.sig[2] & group_mask.sig[2]);
2814 bnx2x_attn_int_deasserted0(bp,
2815 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2816
a2fbb9ea
ET
2817 if ((attn.sig[0] & group_mask.sig[0] &
2818 HW_PRTY_ASSERT_SET_0) ||
2819 (attn.sig[1] & group_mask.sig[1] &
2820 HW_PRTY_ASSERT_SET_1) ||
2821 (attn.sig[2] & group_mask.sig[2] &
2822 HW_PRTY_ASSERT_SET_2))
6378c025 2823 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2824 }
2825 }
2826
4a37fb66 2827 bnx2x_release_alr(bp);
a2fbb9ea 2828
5c862848 2829 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2830
2831 val = ~deasserted;
3fcaf2e5
EG
2832 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2833 val, reg_addr);
5c862848 2834 REG_WR(bp, reg_addr, val);
a2fbb9ea 2835
a2fbb9ea 2836 if (~bp->attn_state & deasserted)
3fcaf2e5 2837 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2838
2839 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2840 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2841
3fcaf2e5
EG
2842 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2843 aeu_mask = REG_RD(bp, reg_addr);
2844
2845 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2846 aeu_mask, deasserted);
2847 aeu_mask |= (deasserted & 0xff);
2848 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2849
3fcaf2e5
EG
2850 REG_WR(bp, reg_addr, aeu_mask);
2851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2852
2853 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2854 bp->attn_state &= ~deasserted;
2855 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2856}
2857
2858static void bnx2x_attn_int(struct bnx2x *bp)
2859{
2860 /* read local copy of bits */
68d59484
EG
2861 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2862 attn_bits);
2863 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2864 attn_bits_ack);
a2fbb9ea
ET
2865 u32 attn_state = bp->attn_state;
2866
2867 /* look for changed bits */
2868 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2869 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2870
2871 DP(NETIF_MSG_HW,
2872 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2873 attn_bits, attn_ack, asserted, deasserted);
2874
2875 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2876 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2877
2878 /* handle bits that were raised */
2879 if (asserted)
2880 bnx2x_attn_int_asserted(bp, asserted);
2881
2882 if (deasserted)
2883 bnx2x_attn_int_deasserted(bp, deasserted);
2884}
2885
2886static void bnx2x_sp_task(struct work_struct *work)
2887{
1cf167f2 2888 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2889 u16 status;
2890
34f80b04 2891
a2fbb9ea
ET
2892 /* Return here if interrupt is disabled */
2893 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2894 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2895 return;
2896 }
2897
2898 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2899/* if (status == 0) */
2900/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2901
3196a88a 2902 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2903
877e9aa4
ET
2904 /* HW attentions */
2905 if (status & 0x1)
a2fbb9ea 2906 bnx2x_attn_int(bp);
a2fbb9ea 2907
68d59484 2908 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2909 IGU_INT_NOP, 1);
2910 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2911 IGU_INT_NOP, 1);
2912 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2913 IGU_INT_NOP, 1);
2914 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2915 IGU_INT_NOP, 1);
2916 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2917 IGU_INT_ENABLE, 1);
877e9aa4 2918
a2fbb9ea
ET
2919}
2920
2921static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2922{
2923 struct net_device *dev = dev_instance;
2924 struct bnx2x *bp = netdev_priv(dev);
2925
2926 /* Return here if interrupt is disabled */
2927 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2928 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2929 return IRQ_HANDLED;
2930 }
2931
8d9c5f34 2932 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2933
2934#ifdef BNX2X_STOP_ON_ERROR
2935 if (unlikely(bp->panic))
2936 return IRQ_HANDLED;
2937#endif
2938
1cf167f2 2939 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2940
2941 return IRQ_HANDLED;
2942}
2943
2944/* end of slow path */
2945
2946/* Statistics */
2947
2948/****************************************************************************
2949* Macros
2950****************************************************************************/
2951
a2fbb9ea
ET
2952/* sum[hi:lo] += add[hi:lo] */
2953#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2954 do { \
2955 s_lo += a_lo; \
f5ba6772 2956 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2957 } while (0)
2958
2959/* difference = minuend - subtrahend */
2960#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2961 do { \
bb2a0f7a
YG
2962 if (m_lo < s_lo) { \
2963 /* underflow */ \
a2fbb9ea 2964 d_hi = m_hi - s_hi; \
bb2a0f7a 2965 if (d_hi > 0) { \
6378c025 2966 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2967 d_hi--; \
2968 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2969 } else { \
6378c025 2970 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2971 d_hi = 0; \
2972 d_lo = 0; \
2973 } \
bb2a0f7a
YG
2974 } else { \
2975 /* m_lo >= s_lo */ \
a2fbb9ea 2976 if (m_hi < s_hi) { \
bb2a0f7a
YG
2977 d_hi = 0; \
2978 d_lo = 0; \
2979 } else { \
6378c025 2980 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2981 d_hi = m_hi - s_hi; \
2982 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2983 } \
2984 } \
2985 } while (0)
2986
bb2a0f7a 2987#define UPDATE_STAT64(s, t) \
a2fbb9ea 2988 do { \
bb2a0f7a
YG
2989 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2990 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2991 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2992 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2993 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2994 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2995 } while (0)
2996
bb2a0f7a 2997#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2998 do { \
bb2a0f7a
YG
2999 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3000 diff.lo, new->s##_lo, old->s##_lo); \
3001 ADD_64(estats->t##_hi, diff.hi, \
3002 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3003 } while (0)
3004
3005/* sum[hi:lo] += add */
3006#define ADD_EXTEND_64(s_hi, s_lo, a) \
3007 do { \
3008 s_lo += a; \
3009 s_hi += (s_lo < a) ? 1 : 0; \
3010 } while (0)
3011
bb2a0f7a 3012#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3013 do { \
bb2a0f7a
YG
3014 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3015 pstats->mac_stx[1].s##_lo, \
3016 new->s); \
a2fbb9ea
ET
3017 } while (0)
3018
bb2a0f7a 3019#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3020 do { \
4781bfad
EG
3021 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3022 old_tclient->s = tclient->s; \
de832a55
EG
3023 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3024 } while (0)
3025
3026#define UPDATE_EXTEND_USTAT(s, t) \
3027 do { \
3028 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3029 old_uclient->s = uclient->s; \
3030 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3031 } while (0)
3032
3033#define UPDATE_EXTEND_XSTAT(s, t) \
3034 do { \
4781bfad
EG
3035 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3036 old_xclient->s = xclient->s; \
de832a55
EG
3037 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3038 } while (0)
3039
3040/* minuend -= subtrahend */
3041#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3042 do { \
3043 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3044 } while (0)
3045
3046/* minuend[hi:lo] -= subtrahend */
3047#define SUB_EXTEND_64(m_hi, m_lo, s) \
3048 do { \
3049 SUB_64(m_hi, 0, m_lo, s); \
3050 } while (0)
3051
3052#define SUB_EXTEND_USTAT(s, t) \
3053 do { \
3054 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3055 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3056 } while (0)
3057
3058/*
3059 * General service functions
3060 */
3061
3062static inline long bnx2x_hilo(u32 *hiref)
3063{
3064 u32 lo = *(hiref + 1);
3065#if (BITS_PER_LONG == 64)
3066 u32 hi = *hiref;
3067
3068 return HILO_U64(hi, lo);
3069#else
3070 return lo;
3071#endif
3072}
3073
3074/*
3075 * Init service functions
3076 */
3077
bb2a0f7a
YG
3078static void bnx2x_storm_stats_post(struct bnx2x *bp)
3079{
3080 if (!bp->stats_pending) {
3081 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3082 int i, rc;
bb2a0f7a
YG
3083
3084 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3085 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3086 for_each_queue(bp, i)
3087 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3088
3089 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3090 ((u32 *)&ramrod_data)[1],
3091 ((u32 *)&ramrod_data)[0], 0);
3092 if (rc == 0) {
3093 /* stats ramrod has it's own slot on the spq */
3094 bp->spq_left++;
3095 bp->stats_pending = 1;
3096 }
3097 }
3098}
3099
3100static void bnx2x_stats_init(struct bnx2x *bp)
3101{
3102 int port = BP_PORT(bp);
de832a55 3103 int i;
bb2a0f7a 3104
de832a55 3105 bp->stats_pending = 0;
bb2a0f7a
YG
3106 bp->executer_idx = 0;
3107 bp->stats_counter = 0;
3108
3109 /* port stats */
3110 if (!BP_NOMCP(bp))
3111 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3112 else
3113 bp->port.port_stx = 0;
3114 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3115
3116 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3117 bp->port.old_nig_stats.brb_discard =
3118 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3119 bp->port.old_nig_stats.brb_truncate =
3120 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3121 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3122 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3123 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3124 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3125
3126 /* function stats */
de832a55
EG
3127 for_each_queue(bp, i) {
3128 struct bnx2x_fastpath *fp = &bp->fp[i];
3129
3130 memset(&fp->old_tclient, 0,
3131 sizeof(struct tstorm_per_client_stats));
3132 memset(&fp->old_uclient, 0,
3133 sizeof(struct ustorm_per_client_stats));
3134 memset(&fp->old_xclient, 0,
3135 sizeof(struct xstorm_per_client_stats));
3136 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3137 }
3138
bb2a0f7a 3139 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3140 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3141
3142 bp->stats_state = STATS_STATE_DISABLED;
3143 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3144 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3145}
3146
3147static void bnx2x_hw_stats_post(struct bnx2x *bp)
3148{
3149 struct dmae_command *dmae = &bp->stats_dmae;
3150 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3151
3152 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3153 if (CHIP_REV_IS_SLOW(bp))
3154 return;
bb2a0f7a
YG
3155
3156 /* loader */
3157 if (bp->executer_idx) {
3158 int loader_idx = PMF_DMAE_C(bp);
3159
3160 memset(dmae, 0, sizeof(struct dmae_command));
3161
3162 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3163 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3164 DMAE_CMD_DST_RESET |
3165#ifdef __BIG_ENDIAN
3166 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3167#else
3168 DMAE_CMD_ENDIANITY_DW_SWAP |
3169#endif
3170 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3171 DMAE_CMD_PORT_0) |
3172 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3173 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3174 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3175 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3176 sizeof(struct dmae_command) *
3177 (loader_idx + 1)) >> 2;
3178 dmae->dst_addr_hi = 0;
3179 dmae->len = sizeof(struct dmae_command) >> 2;
3180 if (CHIP_IS_E1(bp))
3181 dmae->len--;
3182 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3183 dmae->comp_addr_hi = 0;
3184 dmae->comp_val = 1;
3185
3186 *stats_comp = 0;
3187 bnx2x_post_dmae(bp, dmae, loader_idx);
3188
3189 } else if (bp->func_stx) {
3190 *stats_comp = 0;
3191 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3192 }
3193}
3194
3195static int bnx2x_stats_comp(struct bnx2x *bp)
3196{
3197 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3198 int cnt = 10;
3199
3200 might_sleep();
3201 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3202 if (!cnt) {
3203 BNX2X_ERR("timeout waiting for stats finished\n");
3204 break;
3205 }
3206 cnt--;
12469401 3207 msleep(1);
bb2a0f7a
YG
3208 }
3209 return 1;
3210}
3211
3212/*
3213 * Statistics service functions
3214 */
3215
3216static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3217{
3218 struct dmae_command *dmae;
3219 u32 opcode;
3220 int loader_idx = PMF_DMAE_C(bp);
3221 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3222
3223 /* sanity */
3224 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3225 BNX2X_ERR("BUG!\n");
3226 return;
3227 }
3228
3229 bp->executer_idx = 0;
3230
3231 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232 DMAE_CMD_C_ENABLE |
3233 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3234#ifdef __BIG_ENDIAN
3235 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3236#else
3237 DMAE_CMD_ENDIANITY_DW_SWAP |
3238#endif
3239 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3241
3242 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3243 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3244 dmae->src_addr_lo = bp->port.port_stx >> 2;
3245 dmae->src_addr_hi = 0;
3246 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3247 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3248 dmae->len = DMAE_LEN32_RD_MAX;
3249 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3250 dmae->comp_addr_hi = 0;
3251 dmae->comp_val = 1;
3252
3253 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3254 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3255 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3256 dmae->src_addr_hi = 0;
7a9b2557
VZ
3257 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3258 DMAE_LEN32_RD_MAX * 4);
3259 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3260 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3261 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3262 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3263 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3264 dmae->comp_val = DMAE_COMP_VAL;
3265
3266 *stats_comp = 0;
3267 bnx2x_hw_stats_post(bp);
3268 bnx2x_stats_comp(bp);
3269}
3270
3271static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3272{
3273 struct dmae_command *dmae;
34f80b04 3274 int port = BP_PORT(bp);
bb2a0f7a 3275 int vn = BP_E1HVN(bp);
a2fbb9ea 3276 u32 opcode;
bb2a0f7a 3277 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3278 u32 mac_addr;
bb2a0f7a
YG
3279 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3280
3281 /* sanity */
3282 if (!bp->link_vars.link_up || !bp->port.pmf) {
3283 BNX2X_ERR("BUG!\n");
3284 return;
3285 }
a2fbb9ea
ET
3286
3287 bp->executer_idx = 0;
bb2a0f7a
YG
3288
3289 /* MCP */
3290 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3291 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3292 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3293#ifdef __BIG_ENDIAN
bb2a0f7a 3294 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3295#else
bb2a0f7a 3296 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3297#endif
bb2a0f7a
YG
3298 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3299 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3300
bb2a0f7a 3301 if (bp->port.port_stx) {
a2fbb9ea
ET
3302
3303 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3304 dmae->opcode = opcode;
bb2a0f7a
YG
3305 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3306 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3307 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3308 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3309 dmae->len = sizeof(struct host_port_stats) >> 2;
3310 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3311 dmae->comp_addr_hi = 0;
3312 dmae->comp_val = 1;
a2fbb9ea
ET
3313 }
3314
bb2a0f7a
YG
3315 if (bp->func_stx) {
3316
3317 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3318 dmae->opcode = opcode;
3319 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3320 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3321 dmae->dst_addr_lo = bp->func_stx >> 2;
3322 dmae->dst_addr_hi = 0;
3323 dmae->len = sizeof(struct host_func_stats) >> 2;
3324 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3325 dmae->comp_addr_hi = 0;
3326 dmae->comp_val = 1;
a2fbb9ea
ET
3327 }
3328
bb2a0f7a 3329 /* MAC */
a2fbb9ea
ET
3330 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3331 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3332 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3333#ifdef __BIG_ENDIAN
3334 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3335#else
3336 DMAE_CMD_ENDIANITY_DW_SWAP |
3337#endif
bb2a0f7a
YG
3338 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3339 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3340
c18487ee 3341 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3342
3343 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3344 NIG_REG_INGRESS_BMAC0_MEM);
3345
3346 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3347 BIGMAC_REGISTER_TX_STAT_GTBYT */
3348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349 dmae->opcode = opcode;
3350 dmae->src_addr_lo = (mac_addr +
3351 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3352 dmae->src_addr_hi = 0;
3353 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3354 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3355 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3356 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3357 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3358 dmae->comp_addr_hi = 0;
3359 dmae->comp_val = 1;
3360
3361 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3362 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3363 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3364 dmae->opcode = opcode;
3365 dmae->src_addr_lo = (mac_addr +
3366 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3367 dmae->src_addr_hi = 0;
3368 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3369 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3370 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3371 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3372 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3373 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3374 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3375 dmae->comp_addr_hi = 0;
3376 dmae->comp_val = 1;
3377
c18487ee 3378 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3379
3380 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3381
3382 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3383 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3384 dmae->opcode = opcode;
3385 dmae->src_addr_lo = (mac_addr +
3386 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3387 dmae->src_addr_hi = 0;
3388 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3390 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392 dmae->comp_addr_hi = 0;
3393 dmae->comp_val = 1;
3394
3395 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3396 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3397 dmae->opcode = opcode;
3398 dmae->src_addr_lo = (mac_addr +
3399 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3400 dmae->src_addr_hi = 0;
3401 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3402 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3403 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3404 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3405 dmae->len = 1;
3406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3407 dmae->comp_addr_hi = 0;
3408 dmae->comp_val = 1;
3409
3410 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3411 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3412 dmae->opcode = opcode;
3413 dmae->src_addr_lo = (mac_addr +
3414 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3415 dmae->src_addr_hi = 0;
3416 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3417 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3418 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3419 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3420 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3421 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3422 dmae->comp_addr_hi = 0;
3423 dmae->comp_val = 1;
3424 }
3425
3426 /* NIG */
bb2a0f7a
YG
3427 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3428 dmae->opcode = opcode;
3429 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3430 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3431 dmae->src_addr_hi = 0;
3432 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3433 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3434 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3435 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3436 dmae->comp_addr_hi = 0;
3437 dmae->comp_val = 1;
3438
3439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440 dmae->opcode = opcode;
3441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3442 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3443 dmae->src_addr_hi = 0;
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3445 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3446 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3447 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3448 dmae->len = (2*sizeof(u32)) >> 2;
3449 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3450 dmae->comp_addr_hi = 0;
3451 dmae->comp_val = 1;
3452
a2fbb9ea
ET
3453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3455 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3456 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3457#ifdef __BIG_ENDIAN
3458 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3459#else
3460 DMAE_CMD_ENDIANITY_DW_SWAP |
3461#endif
bb2a0f7a
YG
3462 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3463 (vn << DMAE_CMD_E1HVN_SHIFT));
3464 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3465 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3466 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3467 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3468 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3469 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3470 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3471 dmae->len = (2*sizeof(u32)) >> 2;
3472 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3473 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3474 dmae->comp_val = DMAE_COMP_VAL;
3475
3476 *stats_comp = 0;
a2fbb9ea
ET
3477}
3478
bb2a0f7a 3479static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3480{
bb2a0f7a
YG
3481 struct dmae_command *dmae = &bp->stats_dmae;
3482 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3483
bb2a0f7a
YG
3484 /* sanity */
3485 if (!bp->func_stx) {
3486 BNX2X_ERR("BUG!\n");
3487 return;
3488 }
a2fbb9ea 3489
bb2a0f7a
YG
3490 bp->executer_idx = 0;
3491 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3492
bb2a0f7a
YG
3493 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3494 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3495 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3496#ifdef __BIG_ENDIAN
3497 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3498#else
3499 DMAE_CMD_ENDIANITY_DW_SWAP |
3500#endif
3501 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3502 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3503 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3504 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3505 dmae->dst_addr_lo = bp->func_stx >> 2;
3506 dmae->dst_addr_hi = 0;
3507 dmae->len = sizeof(struct host_func_stats) >> 2;
3508 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3509 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3510 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3511
bb2a0f7a
YG
3512 *stats_comp = 0;
3513}
a2fbb9ea 3514
bb2a0f7a
YG
3515static void bnx2x_stats_start(struct bnx2x *bp)
3516{
3517 if (bp->port.pmf)
3518 bnx2x_port_stats_init(bp);
3519
3520 else if (bp->func_stx)
3521 bnx2x_func_stats_init(bp);
3522
3523 bnx2x_hw_stats_post(bp);
3524 bnx2x_storm_stats_post(bp);
3525}
3526
3527static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3528{
3529 bnx2x_stats_comp(bp);
3530 bnx2x_stats_pmf_update(bp);
3531 bnx2x_stats_start(bp);
3532}
3533
3534static void bnx2x_stats_restart(struct bnx2x *bp)
3535{
3536 bnx2x_stats_comp(bp);
3537 bnx2x_stats_start(bp);
3538}
3539
3540static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3541{
3542 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3543 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3544 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3545 struct {
3546 u32 lo;
3547 u32 hi;
3548 } diff;
bb2a0f7a
YG
3549
3550 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3551 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3552 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3553 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3554 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3555 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3556 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3557 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3558 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3559 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3560 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3561 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3562 UPDATE_STAT64(tx_stat_gt127,
3563 tx_stat_etherstatspkts65octetsto127octets);
3564 UPDATE_STAT64(tx_stat_gt255,
3565 tx_stat_etherstatspkts128octetsto255octets);
3566 UPDATE_STAT64(tx_stat_gt511,
3567 tx_stat_etherstatspkts256octetsto511octets);
3568 UPDATE_STAT64(tx_stat_gt1023,
3569 tx_stat_etherstatspkts512octetsto1023octets);
3570 UPDATE_STAT64(tx_stat_gt1518,
3571 tx_stat_etherstatspkts1024octetsto1522octets);
3572 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3573 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3574 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3575 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3576 UPDATE_STAT64(tx_stat_gterr,
3577 tx_stat_dot3statsinternalmactransmiterrors);
3578 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3579
3580 estats->pause_frames_received_hi =
3581 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3582 estats->pause_frames_received_lo =
3583 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3584
3585 estats->pause_frames_sent_hi =
3586 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3587 estats->pause_frames_sent_lo =
3588 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3589}
3590
3591static void bnx2x_emac_stats_update(struct bnx2x *bp)
3592{
3593 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3594 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3595 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3596
3597 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3598 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3599 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3600 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3601 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3602 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3603 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3605 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3606 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3607 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3608 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3609 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3610 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3611 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3612 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3613 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3614 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3615 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3616 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3617 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3618 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3620 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3621 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3622 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3623 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3628
3629 estats->pause_frames_received_hi =
3630 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3631 estats->pause_frames_received_lo =
3632 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3633 ADD_64(estats->pause_frames_received_hi,
3634 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3635 estats->pause_frames_received_lo,
3636 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3637
3638 estats->pause_frames_sent_hi =
3639 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3640 estats->pause_frames_sent_lo =
3641 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3642 ADD_64(estats->pause_frames_sent_hi,
3643 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3644 estats->pause_frames_sent_lo,
3645 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3646}
3647
3648static int bnx2x_hw_stats_update(struct bnx2x *bp)
3649{
3650 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3651 struct nig_stats *old = &(bp->port.old_nig_stats);
3652 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3653 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3654 struct {
3655 u32 lo;
3656 u32 hi;
3657 } diff;
de832a55 3658 u32 nig_timer_max;
bb2a0f7a
YG
3659
3660 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3661 bnx2x_bmac_stats_update(bp);
3662
3663 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3664 bnx2x_emac_stats_update(bp);
3665
3666 else { /* unreached */
3667 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3668 return -1;
3669 }
a2fbb9ea 3670
bb2a0f7a
YG
3671 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3672 new->brb_discard - old->brb_discard);
66e855f3
YG
3673 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3674 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3675
bb2a0f7a
YG
3676 UPDATE_STAT64_NIG(egress_mac_pkt0,
3677 etherstatspkts1024octetsto1522octets);
3678 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3679
bb2a0f7a 3680 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3681
bb2a0f7a
YG
3682 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3683 sizeof(struct mac_stx));
3684 estats->brb_drop_hi = pstats->brb_drop_hi;
3685 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3686
bb2a0f7a 3687 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3688
de832a55
EG
3689 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3690 if (nig_timer_max != estats->nig_timer_max) {
3691 estats->nig_timer_max = nig_timer_max;
3692 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3693 }
3694
bb2a0f7a 3695 return 0;
a2fbb9ea
ET
3696}
3697
bb2a0f7a 3698static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3699{
3700 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3701 struct tstorm_per_port_stats *tport =
de832a55 3702 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3703 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3704 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3705 int i;
3706
3707 memset(&(fstats->total_bytes_received_hi), 0,
3708 sizeof(struct host_func_stats) - 2*sizeof(u32));
3709 estats->error_bytes_received_hi = 0;
3710 estats->error_bytes_received_lo = 0;
3711 estats->etherstatsoverrsizepkts_hi = 0;
3712 estats->etherstatsoverrsizepkts_lo = 0;
3713 estats->no_buff_discard_hi = 0;
3714 estats->no_buff_discard_lo = 0;
a2fbb9ea 3715
de832a55
EG
3716 for_each_queue(bp, i) {
3717 struct bnx2x_fastpath *fp = &bp->fp[i];
3718 int cl_id = fp->cl_id;
3719 struct tstorm_per_client_stats *tclient =
3720 &stats->tstorm_common.client_statistics[cl_id];
3721 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3722 struct ustorm_per_client_stats *uclient =
3723 &stats->ustorm_common.client_statistics[cl_id];
3724 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3725 struct xstorm_per_client_stats *xclient =
3726 &stats->xstorm_common.client_statistics[cl_id];
3727 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3728 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3729 u32 diff;
3730
3731 /* are storm stats valid? */
3732 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3733 bp->stats_counter) {
de832a55
EG
3734 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3735 " xstorm counter (%d) != stats_counter (%d)\n",
3736 i, xclient->stats_counter, bp->stats_counter);
3737 return -1;
3738 }
3739 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3740 bp->stats_counter) {
de832a55
EG
3741 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3742 " tstorm counter (%d) != stats_counter (%d)\n",
3743 i, tclient->stats_counter, bp->stats_counter);
3744 return -2;
3745 }
3746 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3747 bp->stats_counter) {
3748 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3749 " ustorm counter (%d) != stats_counter (%d)\n",
3750 i, uclient->stats_counter, bp->stats_counter);
3751 return -4;
3752 }
a2fbb9ea 3753
de832a55
EG
3754 qstats->total_bytes_received_hi =
3755 qstats->valid_bytes_received_hi =
a2fbb9ea 3756 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3757 qstats->total_bytes_received_lo =
3758 qstats->valid_bytes_received_lo =
a2fbb9ea 3759 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3760
de832a55 3761 qstats->error_bytes_received_hi =
bb2a0f7a 3762 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3763 qstats->error_bytes_received_lo =
bb2a0f7a 3764 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3765
de832a55
EG
3766 ADD_64(qstats->total_bytes_received_hi,
3767 qstats->error_bytes_received_hi,
3768 qstats->total_bytes_received_lo,
3769 qstats->error_bytes_received_lo);
3770
3771 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3772 total_unicast_packets_received);
3773 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3774 total_multicast_packets_received);
3775 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3776 total_broadcast_packets_received);
3777 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3778 etherstatsoverrsizepkts);
3779 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3780
3781 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3782 total_unicast_packets_received);
3783 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3784 total_multicast_packets_received);
3785 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3786 total_broadcast_packets_received);
3787 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3788 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3789 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3790
3791 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3792 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3793 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3794 le32_to_cpu(xclient->total_sent_bytes.lo);
3795
de832a55
EG
3796 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3797 total_unicast_packets_transmitted);
3798 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3799 total_multicast_packets_transmitted);
3800 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3801 total_broadcast_packets_transmitted);
3802
3803 old_tclient->checksum_discard = tclient->checksum_discard;
3804 old_tclient->ttl0_discard = tclient->ttl0_discard;
3805
3806 ADD_64(fstats->total_bytes_received_hi,
3807 qstats->total_bytes_received_hi,
3808 fstats->total_bytes_received_lo,
3809 qstats->total_bytes_received_lo);
3810 ADD_64(fstats->total_bytes_transmitted_hi,
3811 qstats->total_bytes_transmitted_hi,
3812 fstats->total_bytes_transmitted_lo,
3813 qstats->total_bytes_transmitted_lo);
3814 ADD_64(fstats->total_unicast_packets_received_hi,
3815 qstats->total_unicast_packets_received_hi,
3816 fstats->total_unicast_packets_received_lo,
3817 qstats->total_unicast_packets_received_lo);
3818 ADD_64(fstats->total_multicast_packets_received_hi,
3819 qstats->total_multicast_packets_received_hi,
3820 fstats->total_multicast_packets_received_lo,
3821 qstats->total_multicast_packets_received_lo);
3822 ADD_64(fstats->total_broadcast_packets_received_hi,
3823 qstats->total_broadcast_packets_received_hi,
3824 fstats->total_broadcast_packets_received_lo,
3825 qstats->total_broadcast_packets_received_lo);
3826 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3827 qstats->total_unicast_packets_transmitted_hi,
3828 fstats->total_unicast_packets_transmitted_lo,
3829 qstats->total_unicast_packets_transmitted_lo);
3830 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3831 qstats->total_multicast_packets_transmitted_hi,
3832 fstats->total_multicast_packets_transmitted_lo,
3833 qstats->total_multicast_packets_transmitted_lo);
3834 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3835 qstats->total_broadcast_packets_transmitted_hi,
3836 fstats->total_broadcast_packets_transmitted_lo,
3837 qstats->total_broadcast_packets_transmitted_lo);
3838 ADD_64(fstats->valid_bytes_received_hi,
3839 qstats->valid_bytes_received_hi,
3840 fstats->valid_bytes_received_lo,
3841 qstats->valid_bytes_received_lo);
3842
3843 ADD_64(estats->error_bytes_received_hi,
3844 qstats->error_bytes_received_hi,
3845 estats->error_bytes_received_lo,
3846 qstats->error_bytes_received_lo);
3847 ADD_64(estats->etherstatsoverrsizepkts_hi,
3848 qstats->etherstatsoverrsizepkts_hi,
3849 estats->etherstatsoverrsizepkts_lo,
3850 qstats->etherstatsoverrsizepkts_lo);
3851 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3852 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3853 }
3854
3855 ADD_64(fstats->total_bytes_received_hi,
3856 estats->rx_stat_ifhcinbadoctets_hi,
3857 fstats->total_bytes_received_lo,
3858 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3859
3860 memcpy(estats, &(fstats->total_bytes_received_hi),
3861 sizeof(struct host_func_stats) - 2*sizeof(u32));
3862
de832a55
EG
3863 ADD_64(estats->etherstatsoverrsizepkts_hi,
3864 estats->rx_stat_dot3statsframestoolong_hi,
3865 estats->etherstatsoverrsizepkts_lo,
3866 estats->rx_stat_dot3statsframestoolong_lo);
3867 ADD_64(estats->error_bytes_received_hi,
3868 estats->rx_stat_ifhcinbadoctets_hi,
3869 estats->error_bytes_received_lo,
3870 estats->rx_stat_ifhcinbadoctets_lo);
3871
3872 if (bp->port.pmf) {
3873 estats->mac_filter_discard =
3874 le32_to_cpu(tport->mac_filter_discard);
3875 estats->xxoverflow_discard =
3876 le32_to_cpu(tport->xxoverflow_discard);
3877 estats->brb_truncate_discard =
bb2a0f7a 3878 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3879 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3880 }
bb2a0f7a
YG
3881
3882 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3883
de832a55
EG
3884 bp->stats_pending = 0;
3885
a2fbb9ea
ET
3886 return 0;
3887}
3888
bb2a0f7a 3889static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3890{
bb2a0f7a 3891 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3892 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3893 int i;
a2fbb9ea
ET
3894
3895 nstats->rx_packets =
3896 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3897 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3898 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3899
3900 nstats->tx_packets =
3901 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3902 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3903 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3904
de832a55 3905 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3906
0e39e645 3907 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3908
de832a55
EG
3909 nstats->rx_dropped = estats->mac_discard;
3910 for_each_queue(bp, i)
3911 nstats->rx_dropped +=
3912 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3913
a2fbb9ea
ET
3914 nstats->tx_dropped = 0;
3915
3916 nstats->multicast =
de832a55 3917 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3918
bb2a0f7a 3919 nstats->collisions =
de832a55 3920 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3921
3922 nstats->rx_length_errors =
de832a55
EG
3923 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3924 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3925 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3926 bnx2x_hilo(&estats->brb_truncate_hi);
3927 nstats->rx_crc_errors =
3928 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3929 nstats->rx_frame_errors =
3930 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3931 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3932 nstats->rx_missed_errors = estats->xxoverflow_discard;
3933
3934 nstats->rx_errors = nstats->rx_length_errors +
3935 nstats->rx_over_errors +
3936 nstats->rx_crc_errors +
3937 nstats->rx_frame_errors +
0e39e645
ET
3938 nstats->rx_fifo_errors +
3939 nstats->rx_missed_errors;
a2fbb9ea 3940
bb2a0f7a 3941 nstats->tx_aborted_errors =
de832a55
EG
3942 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3943 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3944 nstats->tx_carrier_errors =
3945 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3946 nstats->tx_fifo_errors = 0;
3947 nstats->tx_heartbeat_errors = 0;
3948 nstats->tx_window_errors = 0;
3949
3950 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3951 nstats->tx_carrier_errors +
3952 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3953}
3954
3955static void bnx2x_drv_stats_update(struct bnx2x *bp)
3956{
3957 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3958 int i;
3959
3960 estats->driver_xoff = 0;
3961 estats->rx_err_discard_pkt = 0;
3962 estats->rx_skb_alloc_failed = 0;
3963 estats->hw_csum_err = 0;
3964 for_each_queue(bp, i) {
3965 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3966
3967 estats->driver_xoff += qstats->driver_xoff;
3968 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3969 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3970 estats->hw_csum_err += qstats->hw_csum_err;
3971 }
a2fbb9ea
ET
3972}
3973
bb2a0f7a 3974static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3975{
bb2a0f7a 3976 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3977
bb2a0f7a
YG
3978 if (*stats_comp != DMAE_COMP_VAL)
3979 return;
3980
3981 if (bp->port.pmf)
de832a55 3982 bnx2x_hw_stats_update(bp);
a2fbb9ea 3983
de832a55
EG
3984 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3985 BNX2X_ERR("storm stats were not updated for 3 times\n");
3986 bnx2x_panic();
3987 return;
a2fbb9ea
ET
3988 }
3989
de832a55
EG
3990 bnx2x_net_stats_update(bp);
3991 bnx2x_drv_stats_update(bp);
3992
a2fbb9ea 3993 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3994 struct tstorm_per_client_stats *old_tclient =
3995 &bp->fp->old_tclient;
3996 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3997 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3998 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3999 int i;
a2fbb9ea
ET
4000
4001 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4002 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4003 " tx pkt (%lx)\n",
4004 bnx2x_tx_avail(bp->fp),
7a9b2557 4005 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4006 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4007 " rx pkt (%lx)\n",
7a9b2557
VZ
4008 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4009 bp->fp->rx_comp_cons),
4010 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4011 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4012 "brb truncate %u\n",
4013 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4014 qstats->driver_xoff,
4015 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4016 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4017 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4018 "mac_discard %u mac_filter_discard %u "
4019 "xxovrflow_discard %u brb_truncate_discard %u "
4020 "ttl0_discard %u\n",
4781bfad 4021 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4022 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4023 bnx2x_hilo(&qstats->no_buff_discard_hi),
4024 estats->mac_discard, estats->mac_filter_discard,
4025 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4026 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4027
4028 for_each_queue(bp, i) {
4029 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4030 bnx2x_fp(bp, i, tx_pkt),
4031 bnx2x_fp(bp, i, rx_pkt),
4032 bnx2x_fp(bp, i, rx_calls));
4033 }
4034 }
4035
bb2a0f7a
YG
4036 bnx2x_hw_stats_post(bp);
4037 bnx2x_storm_stats_post(bp);
4038}
a2fbb9ea 4039
bb2a0f7a
YG
4040static void bnx2x_port_stats_stop(struct bnx2x *bp)
4041{
4042 struct dmae_command *dmae;
4043 u32 opcode;
4044 int loader_idx = PMF_DMAE_C(bp);
4045 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4046
bb2a0f7a 4047 bp->executer_idx = 0;
a2fbb9ea 4048
bb2a0f7a
YG
4049 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4050 DMAE_CMD_C_ENABLE |
4051 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4052#ifdef __BIG_ENDIAN
bb2a0f7a 4053 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4054#else
bb2a0f7a 4055 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4056#endif
bb2a0f7a
YG
4057 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4058 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4059
4060 if (bp->port.port_stx) {
4061
4062 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4063 if (bp->func_stx)
4064 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4065 else
4066 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4067 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4068 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4069 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4070 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4071 dmae->len = sizeof(struct host_port_stats) >> 2;
4072 if (bp->func_stx) {
4073 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4074 dmae->comp_addr_hi = 0;
4075 dmae->comp_val = 1;
4076 } else {
4077 dmae->comp_addr_lo =
4078 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4079 dmae->comp_addr_hi =
4080 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4081 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4082
bb2a0f7a
YG
4083 *stats_comp = 0;
4084 }
a2fbb9ea
ET
4085 }
4086
bb2a0f7a
YG
4087 if (bp->func_stx) {
4088
4089 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4090 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4091 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4092 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4093 dmae->dst_addr_lo = bp->func_stx >> 2;
4094 dmae->dst_addr_hi = 0;
4095 dmae->len = sizeof(struct host_func_stats) >> 2;
4096 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4097 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4098 dmae->comp_val = DMAE_COMP_VAL;
4099
4100 *stats_comp = 0;
a2fbb9ea 4101 }
bb2a0f7a
YG
4102}
4103
4104static void bnx2x_stats_stop(struct bnx2x *bp)
4105{
4106 int update = 0;
4107
4108 bnx2x_stats_comp(bp);
4109
4110 if (bp->port.pmf)
4111 update = (bnx2x_hw_stats_update(bp) == 0);
4112
4113 update |= (bnx2x_storm_stats_update(bp) == 0);
4114
4115 if (update) {
4116 bnx2x_net_stats_update(bp);
a2fbb9ea 4117
bb2a0f7a
YG
4118 if (bp->port.pmf)
4119 bnx2x_port_stats_stop(bp);
4120
4121 bnx2x_hw_stats_post(bp);
4122 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4123 }
4124}
4125
bb2a0f7a
YG
4126static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4127{
4128}
4129
4130static const struct {
4131 void (*action)(struct bnx2x *bp);
4132 enum bnx2x_stats_state next_state;
4133} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4134/* state event */
4135{
4136/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4137/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4138/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4139/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4140},
4141{
4142/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4143/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4144/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4145/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4146}
4147};
4148
4149static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4150{
4151 enum bnx2x_stats_state state = bp->stats_state;
4152
4153 bnx2x_stats_stm[state][event].action(bp);
4154 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4155
4156 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4157 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4158 state, event, bp->stats_state);
4159}
4160
a2fbb9ea
ET
4161static void bnx2x_timer(unsigned long data)
4162{
4163 struct bnx2x *bp = (struct bnx2x *) data;
4164
4165 if (!netif_running(bp->dev))
4166 return;
4167
4168 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4169 goto timer_restart;
a2fbb9ea
ET
4170
4171 if (poll) {
4172 struct bnx2x_fastpath *fp = &bp->fp[0];
4173 int rc;
4174
4175 bnx2x_tx_int(fp, 1000);
4176 rc = bnx2x_rx_int(fp, 1000);
4177 }
4178
34f80b04
EG
4179 if (!BP_NOMCP(bp)) {
4180 int func = BP_FUNC(bp);
a2fbb9ea
ET
4181 u32 drv_pulse;
4182 u32 mcp_pulse;
4183
4184 ++bp->fw_drv_pulse_wr_seq;
4185 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4186 /* TBD - add SYSTEM_TIME */
4187 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4188 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4189
34f80b04 4190 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4191 MCP_PULSE_SEQ_MASK);
4192 /* The delta between driver pulse and mcp response
4193 * should be 1 (before mcp response) or 0 (after mcp response)
4194 */
4195 if ((drv_pulse != mcp_pulse) &&
4196 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4197 /* someone lost a heartbeat... */
4198 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4199 drv_pulse, mcp_pulse);
4200 }
4201 }
4202
bb2a0f7a
YG
4203 if ((bp->state == BNX2X_STATE_OPEN) ||
4204 (bp->state == BNX2X_STATE_DISABLED))
4205 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4206
f1410647 4207timer_restart:
a2fbb9ea
ET
4208 mod_timer(&bp->timer, jiffies + bp->current_interval);
4209}
4210
4211/* end of Statistics */
4212
4213/* nic init */
4214
4215/*
4216 * nic init service functions
4217 */
4218
34f80b04 4219static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4220{
34f80b04
EG
4221 int port = BP_PORT(bp);
4222
4223 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4224 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4225 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4226 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4227 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4228 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4229}
4230
5c862848
EG
4231static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4232 dma_addr_t mapping, int sb_id)
34f80b04
EG
4233{
4234 int port = BP_PORT(bp);
bb2a0f7a 4235 int func = BP_FUNC(bp);
a2fbb9ea 4236 int index;
34f80b04 4237 u64 section;
a2fbb9ea
ET
4238
4239 /* USTORM */
4240 section = ((u64)mapping) + offsetof(struct host_status_block,
4241 u_status_block);
34f80b04 4242 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4243
4244 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4245 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4246 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4247 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4248 U64_HI(section));
bb2a0f7a
YG
4249 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4250 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4251
4252 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4253 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4254 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4255
4256 /* CSTORM */
4257 section = ((u64)mapping) + offsetof(struct host_status_block,
4258 c_status_block);
34f80b04 4259 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4260
4261 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4262 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4263 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4264 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4265 U64_HI(section));
7a9b2557
VZ
4266 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4267 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4268
4269 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4270 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4271 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4272
4273 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4274}
4275
4276static void bnx2x_zero_def_sb(struct bnx2x *bp)
4277{
4278 int func = BP_FUNC(bp);
a2fbb9ea 4279
34f80b04
EG
4280 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4281 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4282 sizeof(struct ustorm_def_status_block)/4);
4283 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4284 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4285 sizeof(struct cstorm_def_status_block)/4);
4286 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4287 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4288 sizeof(struct xstorm_def_status_block)/4);
4289 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4290 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4291 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4292}
4293
4294static void bnx2x_init_def_sb(struct bnx2x *bp,
4295 struct host_def_status_block *def_sb,
34f80b04 4296 dma_addr_t mapping, int sb_id)
a2fbb9ea 4297{
34f80b04
EG
4298 int port = BP_PORT(bp);
4299 int func = BP_FUNC(bp);
a2fbb9ea
ET
4300 int index, val, reg_offset;
4301 u64 section;
4302
4303 /* ATTN */
4304 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4305 atten_status_block);
34f80b04 4306 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4307
49d66772
ET
4308 bp->attn_state = 0;
4309
a2fbb9ea
ET
4310 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4311 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4312
34f80b04 4313 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4314 bp->attn_group[index].sig[0] = REG_RD(bp,
4315 reg_offset + 0x10*index);
4316 bp->attn_group[index].sig[1] = REG_RD(bp,
4317 reg_offset + 0x4 + 0x10*index);
4318 bp->attn_group[index].sig[2] = REG_RD(bp,
4319 reg_offset + 0x8 + 0x10*index);
4320 bp->attn_group[index].sig[3] = REG_RD(bp,
4321 reg_offset + 0xc + 0x10*index);
4322 }
4323
a2fbb9ea
ET
4324 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4325 HC_REG_ATTN_MSG0_ADDR_L);
4326
4327 REG_WR(bp, reg_offset, U64_LO(section));
4328 REG_WR(bp, reg_offset + 4, U64_HI(section));
4329
4330 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4331
4332 val = REG_RD(bp, reg_offset);
34f80b04 4333 val |= sb_id;
a2fbb9ea
ET
4334 REG_WR(bp, reg_offset, val);
4335
4336 /* USTORM */
4337 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4338 u_def_status_block);
34f80b04 4339 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4340
4341 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4342 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4343 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4344 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4345 U64_HI(section));
5c862848 4346 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4347 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4348
4349 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4350 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4351 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4352
4353 /* CSTORM */
4354 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4355 c_def_status_block);
34f80b04 4356 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4357
4358 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4359 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4360 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4361 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4362 U64_HI(section));
5c862848 4363 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4364 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4365
4366 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4367 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4368 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4369
4370 /* TSTORM */
4371 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4372 t_def_status_block);
34f80b04 4373 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4374
4375 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4376 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4377 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4378 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4379 U64_HI(section));
5c862848 4380 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4381 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4382
4383 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4384 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4385 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4386
4387 /* XSTORM */
4388 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4389 x_def_status_block);
34f80b04 4390 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4391
4392 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4393 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4394 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4395 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4396 U64_HI(section));
5c862848 4397 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4398 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4399
4400 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4401 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4402 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4403
bb2a0f7a 4404 bp->stats_pending = 0;
66e855f3 4405 bp->set_mac_pending = 0;
bb2a0f7a 4406
34f80b04 4407 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4408}
4409
4410static void bnx2x_update_coalesce(struct bnx2x *bp)
4411{
34f80b04 4412 int port = BP_PORT(bp);
a2fbb9ea
ET
4413 int i;
4414
4415 for_each_queue(bp, i) {
34f80b04 4416 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4417
4418 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4419 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4420 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4421 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4422 bp->rx_ticks/12);
a2fbb9ea 4423 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4424 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4425 U_SB_ETH_RX_CQ_INDEX),
4426 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4427
4428 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4429 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4430 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4431 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4432 bp->tx_ticks/12);
a2fbb9ea 4433 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4434 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4435 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4436 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4437 }
4438}
4439
7a9b2557
VZ
4440static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4441 struct bnx2x_fastpath *fp, int last)
4442{
4443 int i;
4444
4445 for (i = 0; i < last; i++) {
4446 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4447 struct sk_buff *skb = rx_buf->skb;
4448
4449 if (skb == NULL) {
4450 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4451 continue;
4452 }
4453
4454 if (fp->tpa_state[i] == BNX2X_TPA_START)
4455 pci_unmap_single(bp->pdev,
4456 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4457 bp->rx_buf_size,
7a9b2557
VZ
4458 PCI_DMA_FROMDEVICE);
4459
4460 dev_kfree_skb(skb);
4461 rx_buf->skb = NULL;
4462 }
4463}
4464
a2fbb9ea
ET
4465static void bnx2x_init_rx_rings(struct bnx2x *bp)
4466{
7a9b2557 4467 int func = BP_FUNC(bp);
32626230
EG
4468 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4469 ETH_MAX_AGGREGATION_QUEUES_E1H;
4470 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4471 int i, j;
a2fbb9ea 4472
87942b46 4473 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4474 DP(NETIF_MSG_IFUP,
4475 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4476
7a9b2557 4477 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4478
555f6c78 4479 for_each_rx_queue(bp, j) {
32626230 4480 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4481
32626230 4482 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4483 fp->tpa_pool[i].skb =
4484 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4485 if (!fp->tpa_pool[i].skb) {
4486 BNX2X_ERR("Failed to allocate TPA "
4487 "skb pool for queue[%d] - "
4488 "disabling TPA on this "
4489 "queue!\n", j);
4490 bnx2x_free_tpa_pool(bp, fp, i);
4491 fp->disable_tpa = 1;
4492 break;
4493 }
4494 pci_unmap_addr_set((struct sw_rx_bd *)
4495 &bp->fp->tpa_pool[i],
4496 mapping, 0);
4497 fp->tpa_state[i] = BNX2X_TPA_STOP;
4498 }
4499 }
4500 }
4501
555f6c78 4502 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4503 struct bnx2x_fastpath *fp = &bp->fp[j];
4504
4505 fp->rx_bd_cons = 0;
4506 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4507 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4508
4509 /* "next page" elements initialization */
4510 /* SGE ring */
4511 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4512 struct eth_rx_sge *sge;
4513
4514 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4515 sge->addr_hi =
4516 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4517 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4518 sge->addr_lo =
4519 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4520 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4521 }
4522
4523 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4524
7a9b2557 4525 /* RX BD ring */
a2fbb9ea
ET
4526 for (i = 1; i <= NUM_RX_RINGS; i++) {
4527 struct eth_rx_bd *rx_bd;
4528
4529 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4530 rx_bd->addr_hi =
4531 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4532 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4533 rx_bd->addr_lo =
4534 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4535 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4536 }
4537
34f80b04 4538 /* CQ ring */
a2fbb9ea
ET
4539 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4540 struct eth_rx_cqe_next_page *nextpg;
4541
4542 nextpg = (struct eth_rx_cqe_next_page *)
4543 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4544 nextpg->addr_hi =
4545 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4546 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4547 nextpg->addr_lo =
4548 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4549 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4550 }
4551
7a9b2557
VZ
4552 /* Allocate SGEs and initialize the ring elements */
4553 for (i = 0, ring_prod = 0;
4554 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4555
7a9b2557
VZ
4556 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4557 BNX2X_ERR("was only able to allocate "
4558 "%d rx sges\n", i);
4559 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4560 /* Cleanup already allocated elements */
4561 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4562 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4563 fp->disable_tpa = 1;
4564 ring_prod = 0;
4565 break;
4566 }
4567 ring_prod = NEXT_SGE_IDX(ring_prod);
4568 }
4569 fp->rx_sge_prod = ring_prod;
4570
4571 /* Allocate BDs and initialize BD ring */
66e855f3 4572 fp->rx_comp_cons = 0;
7a9b2557 4573 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4574 for (i = 0; i < bp->rx_ring_size; i++) {
4575 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4576 BNX2X_ERR("was only able to allocate "
de832a55
EG
4577 "%d rx skbs on queue[%d]\n", i, j);
4578 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4579 break;
4580 }
4581 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4582 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4583 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4584 }
4585
7a9b2557
VZ
4586 fp->rx_bd_prod = ring_prod;
4587 /* must not have more available CQEs than BDs */
4588 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4589 cqe_ring_prod);
a2fbb9ea
ET
4590 fp->rx_pkt = fp->rx_calls = 0;
4591
7a9b2557
VZ
4592 /* Warning!
4593 * this will generate an interrupt (to the TSTORM)
4594 * must only be done after chip is initialized
4595 */
4596 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4597 fp->rx_sge_prod);
a2fbb9ea
ET
4598 if (j != 0)
4599 continue;
4600
4601 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4602 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4603 U64_LO(fp->rx_comp_mapping));
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4605 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4606 U64_HI(fp->rx_comp_mapping));
4607 }
4608}
4609
4610static void bnx2x_init_tx_ring(struct bnx2x *bp)
4611{
4612 int i, j;
4613
555f6c78 4614 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4615 struct bnx2x_fastpath *fp = &bp->fp[j];
4616
4617 for (i = 1; i <= NUM_TX_RINGS; i++) {
4618 struct eth_tx_bd *tx_bd =
4619 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4620
4621 tx_bd->addr_hi =
4622 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4623 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4624 tx_bd->addr_lo =
4625 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4626 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4627 }
4628
4629 fp->tx_pkt_prod = 0;
4630 fp->tx_pkt_cons = 0;
4631 fp->tx_bd_prod = 0;
4632 fp->tx_bd_cons = 0;
4633 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4634 fp->tx_pkt = 0;
4635 }
4636}
4637
4638static void bnx2x_init_sp_ring(struct bnx2x *bp)
4639{
34f80b04 4640 int func = BP_FUNC(bp);
a2fbb9ea
ET
4641
4642 spin_lock_init(&bp->spq_lock);
4643
4644 bp->spq_left = MAX_SPQ_PENDING;
4645 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4646 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4647 bp->spq_prod_bd = bp->spq;
4648 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4649
34f80b04 4650 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4651 U64_LO(bp->spq_mapping));
34f80b04
EG
4652 REG_WR(bp,
4653 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4654 U64_HI(bp->spq_mapping));
4655
34f80b04 4656 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4657 bp->spq_prod_idx);
4658}
4659
4660static void bnx2x_init_context(struct bnx2x *bp)
4661{
4662 int i;
4663
4664 for_each_queue(bp, i) {
4665 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4666 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4667 u8 cl_id = fp->cl_id;
0626b899 4668 u8 sb_id = fp->sb_id;
a2fbb9ea 4669
34f80b04
EG
4670 context->ustorm_st_context.common.sb_index_numbers =
4671 BNX2X_RX_SB_INDEX_NUM;
0626b899 4672 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4673 context->ustorm_st_context.common.status_block_id = sb_id;
4674 context->ustorm_st_context.common.flags =
de832a55
EG
4675 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4676 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4677 context->ustorm_st_context.common.statistics_counter_id =
4678 cl_id;
8d9c5f34 4679 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4680 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4681 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4682 bp->rx_buf_size;
34f80b04 4683 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4684 U64_HI(fp->rx_desc_mapping);
34f80b04 4685 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4686 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4687 if (!fp->disable_tpa) {
4688 context->ustorm_st_context.common.flags |=
4689 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4690 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4691 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4692 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4693 (u32)0xffff);
7a9b2557
VZ
4694 context->ustorm_st_context.common.sge_page_base_hi =
4695 U64_HI(fp->rx_sge_mapping);
4696 context->ustorm_st_context.common.sge_page_base_lo =
4697 U64_LO(fp->rx_sge_mapping);
4698 }
4699
8d9c5f34
EG
4700 context->ustorm_ag_context.cdu_usage =
4701 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4702 CDU_REGION_NUMBER_UCM_AG,
4703 ETH_CONNECTION_TYPE);
4704
4705 context->xstorm_st_context.tx_bd_page_base_hi =
4706 U64_HI(fp->tx_desc_mapping);
4707 context->xstorm_st_context.tx_bd_page_base_lo =
4708 U64_LO(fp->tx_desc_mapping);
4709 context->xstorm_st_context.db_data_addr_hi =
4710 U64_HI(fp->tx_prods_mapping);
4711 context->xstorm_st_context.db_data_addr_lo =
4712 U64_LO(fp->tx_prods_mapping);
0626b899 4713 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4714 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4715 context->cstorm_st_context.sb_index_number =
5c862848 4716 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4717 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4718
4719 context->xstorm_ag_context.cdu_reserved =
4720 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4721 CDU_REGION_NUMBER_XCM_AG,
4722 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4723 }
4724}
4725
4726static void bnx2x_init_ind_table(struct bnx2x *bp)
4727{
26c8fa4d 4728 int func = BP_FUNC(bp);
a2fbb9ea
ET
4729 int i;
4730
555f6c78 4731 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4732 return;
4733
555f6c78
EG
4734 DP(NETIF_MSG_IFUP,
4735 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4736 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4737 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4738 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4739 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4740}
4741
49d66772
ET
4742static void bnx2x_set_client_config(struct bnx2x *bp)
4743{
49d66772 4744 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4745 int port = BP_PORT(bp);
4746 int i;
49d66772 4747
e7799c5f 4748 tstorm_client.mtu = bp->dev->mtu;
49d66772 4749 tstorm_client.config_flags =
de832a55
EG
4750 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4751 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4752#ifdef BCM_VLAN
0c6671b0 4753 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4754 tstorm_client.config_flags |=
8d9c5f34 4755 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4756 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4757 }
4758#endif
49d66772 4759
7a9b2557
VZ
4760 if (bp->flags & TPA_ENABLE_FLAG) {
4761 tstorm_client.max_sges_for_packet =
4f40f2cb 4762 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4763 tstorm_client.max_sges_for_packet =
4764 ((tstorm_client.max_sges_for_packet +
4765 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4766 PAGES_PER_SGE_SHIFT;
4767
4768 tstorm_client.config_flags |=
4769 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4770 }
4771
49d66772 4772 for_each_queue(bp, i) {
de832a55
EG
4773 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4774
49d66772 4775 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4776 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4777 ((u32 *)&tstorm_client)[0]);
4778 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4779 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4780 ((u32 *)&tstorm_client)[1]);
4781 }
4782
34f80b04
EG
4783 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4784 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4785}
4786
a2fbb9ea
ET
4787static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4788{
a2fbb9ea 4789 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4790 int mode = bp->rx_mode;
4791 int mask = (1 << BP_L_ID(bp));
4792 int func = BP_FUNC(bp);
a2fbb9ea
ET
4793 int i;
4794
3196a88a 4795 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4796
4797 switch (mode) {
4798 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4799 tstorm_mac_filter.ucast_drop_all = mask;
4800 tstorm_mac_filter.mcast_drop_all = mask;
4801 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4802 break;
4803 case BNX2X_RX_MODE_NORMAL:
34f80b04 4804 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4805 break;
4806 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4807 tstorm_mac_filter.mcast_accept_all = mask;
4808 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4809 break;
4810 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4811 tstorm_mac_filter.ucast_accept_all = mask;
4812 tstorm_mac_filter.mcast_accept_all = mask;
4813 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4814 break;
4815 default:
34f80b04
EG
4816 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4817 break;
a2fbb9ea
ET
4818 }
4819
4820 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4821 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4822 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4823 ((u32 *)&tstorm_mac_filter)[i]);
4824
34f80b04 4825/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4826 ((u32 *)&tstorm_mac_filter)[i]); */
4827 }
a2fbb9ea 4828
49d66772
ET
4829 if (mode != BNX2X_RX_MODE_NONE)
4830 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4831}
4832
471de716
EG
4833static void bnx2x_init_internal_common(struct bnx2x *bp)
4834{
4835 int i;
4836
3cdf1db7
YG
4837 if (bp->flags & TPA_ENABLE_FLAG) {
4838 struct tstorm_eth_tpa_exist tpa = {0};
4839
4840 tpa.tpa_exist = 1;
4841
4842 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4843 ((u32 *)&tpa)[0]);
4844 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4845 ((u32 *)&tpa)[1]);
4846 }
4847
471de716
EG
4848 /* Zero this manually as its initialization is
4849 currently missing in the initTool */
4850 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4851 REG_WR(bp, BAR_USTRORM_INTMEM +
4852 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4853}
4854
4855static void bnx2x_init_internal_port(struct bnx2x *bp)
4856{
4857 int port = BP_PORT(bp);
4858
4859 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4860 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4861 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4862 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4863}
4864
8a1c38d1
EG
4865/* Calculates the sum of vn_min_rates.
4866 It's needed for further normalizing of the min_rates.
4867 Returns:
4868 sum of vn_min_rates.
4869 or
4870 0 - if all the min_rates are 0.
4871 In the later case fainess algorithm should be deactivated.
4872 If not all min_rates are zero then those that are zeroes will be set to 1.
4873 */
4874static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4875{
4876 int all_zero = 1;
4877 int port = BP_PORT(bp);
4878 int vn;
4879
4880 bp->vn_weight_sum = 0;
4881 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4882 int func = 2*vn + port;
4883 u32 vn_cfg =
4884 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4885 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4886 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4887
4888 /* Skip hidden vns */
4889 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4890 continue;
4891
4892 /* If min rate is zero - set it to 1 */
4893 if (!vn_min_rate)
4894 vn_min_rate = DEF_MIN_RATE;
4895 else
4896 all_zero = 0;
4897
4898 bp->vn_weight_sum += vn_min_rate;
4899 }
4900
4901 /* ... only if all min rates are zeros - disable fairness */
4902 if (all_zero)
4903 bp->vn_weight_sum = 0;
4904}
4905
471de716 4906static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4907{
a2fbb9ea
ET
4908 struct tstorm_eth_function_common_config tstorm_config = {0};
4909 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4910 int port = BP_PORT(bp);
4911 int func = BP_FUNC(bp);
de832a55
EG
4912 int i, j;
4913 u32 offset;
471de716 4914 u16 max_agg_size;
a2fbb9ea
ET
4915
4916 if (is_multi(bp)) {
555f6c78 4917 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4918 tstorm_config.rss_result_mask = MULTI_MASK;
4919 }
8d9c5f34
EG
4920 if (IS_E1HMF(bp))
4921 tstorm_config.config_flags |=
4922 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4923
34f80b04
EG
4924 tstorm_config.leading_client_id = BP_L_ID(bp);
4925
a2fbb9ea 4926 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4927 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4928 (*(u32 *)&tstorm_config));
4929
c14423fe 4930 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4931 bnx2x_set_storm_rx_mode(bp);
4932
de832a55
EG
4933 for_each_queue(bp, i) {
4934 u8 cl_id = bp->fp[i].cl_id;
4935
4936 /* reset xstorm per client statistics */
4937 offset = BAR_XSTRORM_INTMEM +
4938 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4939 for (j = 0;
4940 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4941 REG_WR(bp, offset + j*4, 0);
4942
4943 /* reset tstorm per client statistics */
4944 offset = BAR_TSTRORM_INTMEM +
4945 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4946 for (j = 0;
4947 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4948 REG_WR(bp, offset + j*4, 0);
4949
4950 /* reset ustorm per client statistics */
4951 offset = BAR_USTRORM_INTMEM +
4952 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953 for (j = 0;
4954 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4956 }
4957
4958 /* Init statistics related context */
34f80b04 4959 stats_flags.collect_eth = 1;
a2fbb9ea 4960
66e855f3 4961 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4962 ((u32 *)&stats_flags)[0]);
66e855f3 4963 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4964 ((u32 *)&stats_flags)[1]);
4965
66e855f3 4966 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4967 ((u32 *)&stats_flags)[0]);
66e855f3 4968 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4969 ((u32 *)&stats_flags)[1]);
4970
de832a55
EG
4971 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4972 ((u32 *)&stats_flags)[0]);
4973 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4974 ((u32 *)&stats_flags)[1]);
4975
66e855f3 4976 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4977 ((u32 *)&stats_flags)[0]);
66e855f3 4978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4979 ((u32 *)&stats_flags)[1]);
4980
66e855f3
YG
4981 REG_WR(bp, BAR_XSTRORM_INTMEM +
4982 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4983 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4984 REG_WR(bp, BAR_XSTRORM_INTMEM +
4985 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4986 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4987
4988 REG_WR(bp, BAR_TSTRORM_INTMEM +
4989 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991 REG_WR(bp, BAR_TSTRORM_INTMEM +
4992 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4994
de832a55
EG
4995 REG_WR(bp, BAR_USTRORM_INTMEM +
4996 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_USTRORM_INTMEM +
4999 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
34f80b04
EG
5002 if (CHIP_IS_E1H(bp)) {
5003 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5004 IS_E1HMF(bp));
5005 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5006 IS_E1HMF(bp));
5007 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5008 IS_E1HMF(bp));
5009 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5010 IS_E1HMF(bp));
5011
7a9b2557
VZ
5012 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5013 bp->e1hov);
34f80b04
EG
5014 }
5015
4f40f2cb
EG
5016 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5017 max_agg_size =
5018 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5019 SGE_PAGE_SIZE * PAGES_PER_SGE),
5020 (u32)0xffff);
555f6c78 5021 for_each_rx_queue(bp, i) {
7a9b2557 5022 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5023
5024 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5025 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5026 U64_LO(fp->rx_comp_mapping));
5027 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5028 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5029 U64_HI(fp->rx_comp_mapping));
5030
7a9b2557 5031 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5032 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5033 max_agg_size);
5034 }
8a1c38d1 5035
1c06328c
EG
5036 /* dropless flow control */
5037 if (CHIP_IS_E1H(bp)) {
5038 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5039
5040 rx_pause.bd_thr_low = 250;
5041 rx_pause.cqe_thr_low = 250;
5042 rx_pause.cos = 1;
5043 rx_pause.sge_thr_low = 0;
5044 rx_pause.bd_thr_high = 350;
5045 rx_pause.cqe_thr_high = 350;
5046 rx_pause.sge_thr_high = 0;
5047
5048 for_each_rx_queue(bp, i) {
5049 struct bnx2x_fastpath *fp = &bp->fp[i];
5050
5051 if (!fp->disable_tpa) {
5052 rx_pause.sge_thr_low = 150;
5053 rx_pause.sge_thr_high = 250;
5054 }
5055
5056
5057 offset = BAR_USTRORM_INTMEM +
5058 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5059 fp->cl_id);
5060 for (j = 0;
5061 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5062 j++)
5063 REG_WR(bp, offset + j*4,
5064 ((u32 *)&rx_pause)[j]);
5065 }
5066 }
5067
8a1c38d1
EG
5068 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5069
5070 /* Init rate shaping and fairness contexts */
5071 if (IS_E1HMF(bp)) {
5072 int vn;
5073
5074 /* During init there is no active link
5075 Until link is up, set link rate to 10Gbps */
5076 bp->link_vars.line_speed = SPEED_10000;
5077 bnx2x_init_port_minmax(bp);
5078
5079 bnx2x_calc_vn_weight_sum(bp);
5080
5081 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5082 bnx2x_init_vn_minmax(bp, 2*vn + port);
5083
5084 /* Enable rate shaping and fairness */
5085 bp->cmng.flags.cmng_enables =
5086 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5087 if (bp->vn_weight_sum)
5088 bp->cmng.flags.cmng_enables |=
5089 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5090 else
5091 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5092 " fairness will be disabled\n");
5093 } else {
5094 /* rate shaping and fairness are disabled */
5095 DP(NETIF_MSG_IFUP,
5096 "single function mode minmax will be disabled\n");
5097 }
5098
5099
5100 /* Store it to internal memory */
5101 if (bp->port.pmf)
5102 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5103 REG_WR(bp, BAR_XSTRORM_INTMEM +
5104 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5105 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5106}
5107
471de716
EG
5108static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5109{
5110 switch (load_code) {
5111 case FW_MSG_CODE_DRV_LOAD_COMMON:
5112 bnx2x_init_internal_common(bp);
5113 /* no break */
5114
5115 case FW_MSG_CODE_DRV_LOAD_PORT:
5116 bnx2x_init_internal_port(bp);
5117 /* no break */
5118
5119 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5120 bnx2x_init_internal_func(bp);
5121 break;
5122
5123 default:
5124 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5125 break;
5126 }
5127}
5128
5129static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5130{
5131 int i;
5132
5133 for_each_queue(bp, i) {
5134 struct bnx2x_fastpath *fp = &bp->fp[i];
5135
34f80b04 5136 fp->bp = bp;
a2fbb9ea 5137 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5138 fp->index = i;
34f80b04
EG
5139 fp->cl_id = BP_L_ID(bp) + i;
5140 fp->sb_id = fp->cl_id;
5141 DP(NETIF_MSG_IFUP,
5142 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
0626b899 5143 bp, fp->status_blk, i, fp->cl_id, fp->sb_id);
5c862848 5144 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5145 fp->sb_id);
5c862848 5146 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5147 }
5148
5c862848
EG
5149 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5150 DEF_SB_ID);
5151 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5152 bnx2x_update_coalesce(bp);
5153 bnx2x_init_rx_rings(bp);
5154 bnx2x_init_tx_ring(bp);
5155 bnx2x_init_sp_ring(bp);
5156 bnx2x_init_context(bp);
471de716 5157 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5158 bnx2x_init_ind_table(bp);
0ef00459
EG
5159 bnx2x_stats_init(bp);
5160
5161 /* At this point, we are ready for interrupts */
5162 atomic_set(&bp->intr_sem, 0);
5163
5164 /* flush all before enabling interrupts */
5165 mb();
5166 mmiowb();
5167
615f8fd9 5168 bnx2x_int_enable(bp);
a2fbb9ea
ET
5169}
5170
5171/* end of nic init */
5172
5173/*
5174 * gzip service functions
5175 */
5176
5177static int bnx2x_gunzip_init(struct bnx2x *bp)
5178{
5179 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5180 &bp->gunzip_mapping);
5181 if (bp->gunzip_buf == NULL)
5182 goto gunzip_nomem1;
5183
5184 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5185 if (bp->strm == NULL)
5186 goto gunzip_nomem2;
5187
5188 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5189 GFP_KERNEL);
5190 if (bp->strm->workspace == NULL)
5191 goto gunzip_nomem3;
5192
5193 return 0;
5194
5195gunzip_nomem3:
5196 kfree(bp->strm);
5197 bp->strm = NULL;
5198
5199gunzip_nomem2:
5200 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5201 bp->gunzip_mapping);
5202 bp->gunzip_buf = NULL;
5203
5204gunzip_nomem1:
5205 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5206 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5207 return -ENOMEM;
5208}
5209
5210static void bnx2x_gunzip_end(struct bnx2x *bp)
5211{
5212 kfree(bp->strm->workspace);
5213
5214 kfree(bp->strm);
5215 bp->strm = NULL;
5216
5217 if (bp->gunzip_buf) {
5218 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5219 bp->gunzip_mapping);
5220 bp->gunzip_buf = NULL;
5221 }
5222}
5223
5224static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5225{
5226 int n, rc;
5227
5228 /* check gzip header */
5229 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5230 return -EINVAL;
5231
5232 n = 10;
5233
34f80b04 5234#define FNAME 0x8
a2fbb9ea
ET
5235
5236 if (zbuf[3] & FNAME)
5237 while ((zbuf[n++] != 0) && (n < len));
5238
5239 bp->strm->next_in = zbuf + n;
5240 bp->strm->avail_in = len - n;
5241 bp->strm->next_out = bp->gunzip_buf;
5242 bp->strm->avail_out = FW_BUF_SIZE;
5243
5244 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5245 if (rc != Z_OK)
5246 return rc;
5247
5248 rc = zlib_inflate(bp->strm, Z_FINISH);
5249 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5250 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5251 bp->dev->name, bp->strm->msg);
5252
5253 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5254 if (bp->gunzip_outlen & 0x3)
5255 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5256 " gunzip_outlen (%d) not aligned\n",
5257 bp->dev->name, bp->gunzip_outlen);
5258 bp->gunzip_outlen >>= 2;
5259
5260 zlib_inflateEnd(bp->strm);
5261
5262 if (rc == Z_STREAM_END)
5263 return 0;
5264
5265 return rc;
5266}
5267
5268/* nic load/unload */
5269
5270/*
34f80b04 5271 * General service functions
a2fbb9ea
ET
5272 */
5273
5274/* send a NIG loopback debug packet */
5275static void bnx2x_lb_pckt(struct bnx2x *bp)
5276{
a2fbb9ea 5277 u32 wb_write[3];
a2fbb9ea
ET
5278
5279 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5280 wb_write[0] = 0x55555555;
5281 wb_write[1] = 0x55555555;
34f80b04 5282 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5283 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5284
5285 /* NON-IP protocol */
a2fbb9ea
ET
5286 wb_write[0] = 0x09000000;
5287 wb_write[1] = 0x55555555;
34f80b04 5288 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5289 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5290}
5291
5292/* some of the internal memories
5293 * are not directly readable from the driver
5294 * to test them we send debug packets
5295 */
5296static int bnx2x_int_mem_test(struct bnx2x *bp)
5297{
5298 int factor;
5299 int count, i;
5300 u32 val = 0;
5301
ad8d3948 5302 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5303 factor = 120;
ad8d3948
EG
5304 else if (CHIP_REV_IS_EMUL(bp))
5305 factor = 200;
5306 else
a2fbb9ea 5307 factor = 1;
a2fbb9ea
ET
5308
5309 DP(NETIF_MSG_HW, "start part1\n");
5310
5311 /* Disable inputs of parser neighbor blocks */
5312 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5313 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5314 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5315 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5316
5317 /* Write 0 to parser credits for CFC search request */
5318 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5319
5320 /* send Ethernet packet */
5321 bnx2x_lb_pckt(bp);
5322
5323 /* TODO do i reset NIG statistic? */
5324 /* Wait until NIG register shows 1 packet of size 0x10 */
5325 count = 1000 * factor;
5326 while (count) {
34f80b04 5327
a2fbb9ea
ET
5328 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5329 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5330 if (val == 0x10)
5331 break;
5332
5333 msleep(10);
5334 count--;
5335 }
5336 if (val != 0x10) {
5337 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5338 return -1;
5339 }
5340
5341 /* Wait until PRS register shows 1 packet */
5342 count = 1000 * factor;
5343 while (count) {
5344 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5345 if (val == 1)
5346 break;
5347
5348 msleep(10);
5349 count--;
5350 }
5351 if (val != 0x1) {
5352 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5353 return -2;
5354 }
5355
5356 /* Reset and init BRB, PRS */
34f80b04 5357 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5358 msleep(50);
34f80b04 5359 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5360 msleep(50);
5361 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5362 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5363
5364 DP(NETIF_MSG_HW, "part2\n");
5365
5366 /* Disable inputs of parser neighbor blocks */
5367 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5368 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5369 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5370 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5371
5372 /* Write 0 to parser credits for CFC search request */
5373 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5374
5375 /* send 10 Ethernet packets */
5376 for (i = 0; i < 10; i++)
5377 bnx2x_lb_pckt(bp);
5378
5379 /* Wait until NIG register shows 10 + 1
5380 packets of size 11*0x10 = 0xb0 */
5381 count = 1000 * factor;
5382 while (count) {
34f80b04 5383
a2fbb9ea
ET
5384 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5385 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5386 if (val == 0xb0)
5387 break;
5388
5389 msleep(10);
5390 count--;
5391 }
5392 if (val != 0xb0) {
5393 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5394 return -3;
5395 }
5396
5397 /* Wait until PRS register shows 2 packets */
5398 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5399 if (val != 2)
5400 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5401
5402 /* Write 1 to parser credits for CFC search request */
5403 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5404
5405 /* Wait until PRS register shows 3 packets */
5406 msleep(10 * factor);
5407 /* Wait until NIG register shows 1 packet of size 0x10 */
5408 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5409 if (val != 3)
5410 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5411
5412 /* clear NIG EOP FIFO */
5413 for (i = 0; i < 11; i++)
5414 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5415 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5416 if (val != 1) {
5417 BNX2X_ERR("clear of NIG failed\n");
5418 return -4;
5419 }
5420
5421 /* Reset and init BRB, PRS, NIG */
5422 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5423 msleep(50);
5424 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5425 msleep(50);
5426 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5427 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5428#ifndef BCM_ISCSI
5429 /* set NIC mode */
5430 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5431#endif
5432
5433 /* Enable inputs of parser neighbor blocks */
5434 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5435 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5436 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5437 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5438
5439 DP(NETIF_MSG_HW, "done\n");
5440
5441 return 0; /* OK */
5442}
5443
5444static void enable_blocks_attention(struct bnx2x *bp)
5445{
5446 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5447 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5448 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5449 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5450 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5451 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5452 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5453 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5454 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5455/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5456/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5457 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5458 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5459 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5460/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5461/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5462 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5463 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5464 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5465 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5466/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5467/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5468 if (CHIP_REV_IS_FPGA(bp))
5469 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5470 else
5471 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5472 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5473 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5474 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5475/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5476/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5477 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5478 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5479/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5480 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5481}
5482
34f80b04 5483
81f75bbf
EG
5484static void bnx2x_reset_common(struct bnx2x *bp)
5485{
5486 /* reset_common */
5487 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5488 0xd3ffff7f);
5489 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5490}
5491
34f80b04 5492static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5493{
a2fbb9ea 5494 u32 val, i;
a2fbb9ea 5495
34f80b04 5496 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5497
81f75bbf 5498 bnx2x_reset_common(bp);
34f80b04
EG
5499 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5500 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5501
34f80b04
EG
5502 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5503 if (CHIP_IS_E1H(bp))
5504 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5505
34f80b04
EG
5506 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5507 msleep(30);
5508 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5509
34f80b04
EG
5510 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5511 if (CHIP_IS_E1(bp)) {
5512 /* enable HW interrupt from PXP on USDM overflow
5513 bit 16 on INT_MASK_0 */
5514 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5515 }
a2fbb9ea 5516
34f80b04
EG
5517 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5518 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5519
5520#ifdef __BIG_ENDIAN
34f80b04
EG
5521 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5522 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5523 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5524 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5525 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5526 /* make sure this value is 0 */
5527 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5528
5529/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5530 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5531 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5532 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5533 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5534#endif
5535
34f80b04 5536 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5537#ifdef BCM_ISCSI
34f80b04
EG
5538 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5539 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5540 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5541#endif
5542
34f80b04
EG
5543 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5544 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5545
34f80b04
EG
5546 /* let the HW do it's magic ... */
5547 msleep(100);
5548 /* finish PXP init */
5549 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5550 if (val != 1) {
5551 BNX2X_ERR("PXP2 CFG failed\n");
5552 return -EBUSY;
5553 }
5554 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5555 if (val != 1) {
5556 BNX2X_ERR("PXP2 RD_INIT failed\n");
5557 return -EBUSY;
5558 }
a2fbb9ea 5559
34f80b04
EG
5560 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5561 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5562
34f80b04 5563 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5564
34f80b04
EG
5565 /* clean the DMAE memory */
5566 bp->dmae_ready = 1;
5567 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5568
34f80b04
EG
5569 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5570 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5571 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5572 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5573
34f80b04
EG
5574 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5575 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5576 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5577 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5578
5579 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5580 /* soft reset pulse */
5581 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5582 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5583
5584#ifdef BCM_ISCSI
34f80b04 5585 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5586#endif
a2fbb9ea 5587
34f80b04
EG
5588 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5589 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5590 if (!CHIP_REV_IS_SLOW(bp)) {
5591 /* enable hw interrupt from doorbell Q */
5592 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5593 }
a2fbb9ea 5594
34f80b04 5595 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5596 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5597 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5598 /* set NIC mode */
5599 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5600 if (CHIP_IS_E1H(bp))
5601 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5602
34f80b04
EG
5603 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5604 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5605 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5606 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5607
34f80b04
EG
5608 if (CHIP_IS_E1H(bp)) {
5609 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5610 STORM_INTMEM_SIZE_E1H/2);
5611 bnx2x_init_fill(bp,
5612 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5613 0, STORM_INTMEM_SIZE_E1H/2);
5614 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5615 STORM_INTMEM_SIZE_E1H/2);
5616 bnx2x_init_fill(bp,
5617 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5618 0, STORM_INTMEM_SIZE_E1H/2);
5619 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5620 STORM_INTMEM_SIZE_E1H/2);
5621 bnx2x_init_fill(bp,
5622 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5623 0, STORM_INTMEM_SIZE_E1H/2);
5624 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5625 STORM_INTMEM_SIZE_E1H/2);
5626 bnx2x_init_fill(bp,
5627 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5628 0, STORM_INTMEM_SIZE_E1H/2);
5629 } else { /* E1 */
ad8d3948
EG
5630 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5631 STORM_INTMEM_SIZE_E1);
5632 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5633 STORM_INTMEM_SIZE_E1);
5634 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5635 STORM_INTMEM_SIZE_E1);
5636 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5637 STORM_INTMEM_SIZE_E1);
34f80b04 5638 }
a2fbb9ea 5639
34f80b04
EG
5640 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5641 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5642 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5643 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5644
34f80b04
EG
5645 /* sync semi rtc */
5646 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5647 0x80000000);
5648 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5649 0x80000000);
a2fbb9ea 5650
34f80b04
EG
5651 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5652 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5653 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5654
34f80b04
EG
5655 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5656 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5657 REG_WR(bp, i, 0xc0cac01a);
5658 /* TODO: replace with something meaningful */
5659 }
8d9c5f34 5660 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5661 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5662
34f80b04
EG
5663 if (sizeof(union cdu_context) != 1024)
5664 /* we currently assume that a context is 1024 bytes */
5665 printk(KERN_ALERT PFX "please adjust the size of"
5666 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5667
34f80b04
EG
5668 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5669 val = (4 << 24) + (0 << 12) + 1024;
5670 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5671 if (CHIP_IS_E1(bp)) {
5672 /* !!! fix pxp client crdit until excel update */
5673 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5674 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5675 }
a2fbb9ea 5676
34f80b04
EG
5677 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5678 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5679 /* enable context validation interrupt from CFC */
5680 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5681
5682 /* set the thresholds to prevent CFC/CDU race */
5683 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5684
34f80b04
EG
5685 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5686 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5687
34f80b04
EG
5688 /* PXPCS COMMON comes here */
5689 /* Reset PCIE errors for debug */
5690 REG_WR(bp, 0x2814, 0xffffffff);
5691 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5692
34f80b04
EG
5693 /* EMAC0 COMMON comes here */
5694 /* EMAC1 COMMON comes here */
5695 /* DBU COMMON comes here */
5696 /* DBG COMMON comes here */
5697
5698 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5699 if (CHIP_IS_E1H(bp)) {
5700 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5701 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5702 }
5703
5704 if (CHIP_REV_IS_SLOW(bp))
5705 msleep(200);
5706
5707 /* finish CFC init */
5708 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5709 if (val != 1) {
5710 BNX2X_ERR("CFC LL_INIT failed\n");
5711 return -EBUSY;
5712 }
5713 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5714 if (val != 1) {
5715 BNX2X_ERR("CFC AC_INIT failed\n");
5716 return -EBUSY;
5717 }
5718 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5719 if (val != 1) {
5720 BNX2X_ERR("CFC CAM_INIT failed\n");
5721 return -EBUSY;
5722 }
5723 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5724
34f80b04
EG
5725 /* read NIG statistic
5726 to see if this is our first up since powerup */
5727 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5728 val = *bnx2x_sp(bp, wb_data[0]);
5729
5730 /* do internal memory self test */
5731 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5732 BNX2X_ERR("internal mem self test failed\n");
5733 return -EBUSY;
5734 }
5735
35b19ba5 5736 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5740 bp->port.need_hw_lock = 1;
5741 break;
5742
35b19ba5 5743 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5744 /* Fan failure is indicated by SPIO 5 */
5745 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5746 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5747
5748 /* set to active low mode */
5749 val = REG_RD(bp, MISC_REG_SPIO_INT);
5750 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5751 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5752 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5753
34f80b04
EG
5754 /* enable interrupt to signal the IGU */
5755 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5756 val |= (1 << MISC_REGISTERS_SPIO_5);
5757 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5758 break;
f1410647 5759
34f80b04
EG
5760 default:
5761 break;
5762 }
f1410647 5763
34f80b04
EG
5764 /* clear PXP2 attentions */
5765 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5766
34f80b04 5767 enable_blocks_attention(bp);
a2fbb9ea 5768
6bbca910
YR
5769 if (!BP_NOMCP(bp)) {
5770 bnx2x_acquire_phy_lock(bp);
5771 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5772 bnx2x_release_phy_lock(bp);
5773 } else
5774 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5775
34f80b04
EG
5776 return 0;
5777}
a2fbb9ea 5778
34f80b04
EG
5779static int bnx2x_init_port(struct bnx2x *bp)
5780{
5781 int port = BP_PORT(bp);
1c06328c 5782 u32 low, high;
34f80b04 5783 u32 val;
a2fbb9ea 5784
34f80b04
EG
5785 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5786
5787 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5788
5789 /* Port PXP comes here */
5790 /* Port PXP2 comes here */
a2fbb9ea
ET
5791#ifdef BCM_ISCSI
5792 /* Port0 1
5793 * Port1 385 */
5794 i++;
5795 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5796 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5797 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5798 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5799
5800 /* Port0 2
5801 * Port1 386 */
5802 i++;
5803 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5804 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5805 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5806 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5807
5808 /* Port0 3
5809 * Port1 387 */
5810 i++;
5811 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5812 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5813 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5814 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5815#endif
34f80b04 5816 /* Port CMs come here */
8d9c5f34
EG
5817 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5818 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5819
5820 /* Port QM comes here */
a2fbb9ea
ET
5821#ifdef BCM_ISCSI
5822 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5823 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5824
5825 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5826 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5827#endif
5828 /* Port DQ comes here */
1c06328c
EG
5829
5830 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5831 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5832 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5833 /* no pause for emulation and FPGA */
5834 low = 0;
5835 high = 513;
5836 } else {
5837 if (IS_E1HMF(bp))
5838 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5839 else if (bp->dev->mtu > 4096) {
5840 if (bp->flags & ONE_PORT_FLAG)
5841 low = 160;
5842 else {
5843 val = bp->dev->mtu;
5844 /* (24*1024 + val*4)/256 */
5845 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5846 }
5847 } else
5848 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5849 high = low + 56; /* 14*1024/256 */
5850 }
5851 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5852 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5853
5854
ad8d3948 5855 /* Port PRS comes here */
a2fbb9ea
ET
5856 /* Port TSDM comes here */
5857 /* Port CSDM comes here */
5858 /* Port USDM comes here */
5859 /* Port XSDM comes here */
34f80b04
EG
5860 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5861 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5862 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5863 port ? USEM_PORT1_END : USEM_PORT0_END);
5864 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5865 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5866 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5867 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5868 /* Port UPB comes here */
34f80b04
EG
5869 /* Port XPB comes here */
5870
5871 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5872 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5873
5874 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5875 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5876
5877 /* update threshold */
34f80b04 5878 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5879 /* update init credit */
34f80b04 5880 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5881
5882 /* probe changes */
34f80b04 5883 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5884 msleep(5);
34f80b04 5885 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5886
5887#ifdef BCM_ISCSI
5888 /* tell the searcher where the T2 table is */
5889 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5890
5891 wb_write[0] = U64_LO(bp->t2_mapping);
5892 wb_write[1] = U64_HI(bp->t2_mapping);
5893 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5894 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5895 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5896 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5897
5898 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5899 /* Port SRCH comes here */
5900#endif
5901 /* Port CDU comes here */
5902 /* Port CFC comes here */
34f80b04
EG
5903
5904 if (CHIP_IS_E1(bp)) {
5905 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5906 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5907 }
5908 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5909 port ? HC_PORT1_END : HC_PORT0_END);
5910
5911 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5912 MISC_AEU_PORT0_START,
34f80b04
EG
5913 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5914 /* init aeu_mask_attn_func_0/1:
5915 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5916 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5917 * bits 4-7 are used for "per vn group attention" */
5918 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5919 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5920
a2fbb9ea
ET
5921 /* Port PXPCS comes here */
5922 /* Port EMAC0 comes here */
5923 /* Port EMAC1 comes here */
5924 /* Port DBU comes here */
5925 /* Port DBG comes here */
34f80b04
EG
5926 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5927 port ? NIG_PORT1_END : NIG_PORT0_END);
5928
5929 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5930
5931 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5932 /* 0x2 disable e1hov, 0x1 enable */
5933 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5934 (IS_E1HMF(bp) ? 0x1 : 0x2));
5935
1c06328c
EG
5936 /* support pause requests from USDM, TSDM and BRB */
5937 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5938
5939 {
5940 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5941 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5942 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5943 }
34f80b04
EG
5944 }
5945
a2fbb9ea
ET
5946 /* Port MCP comes here */
5947 /* Port DMAE comes here */
5948
35b19ba5 5949 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5950 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5951 {
5952 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5953
5954 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5955 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5956
5957 /* The GPIO should be swapped if the swap register is
5958 set and active */
5959 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5960 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5961
5962 /* Select function upon port-swap configuration */
5963 if (port == 0) {
5964 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5965 aeu_gpio_mask = (swap_val && swap_override) ?
5966 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5967 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5968 } else {
5969 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5970 aeu_gpio_mask = (swap_val && swap_override) ?
5971 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5972 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5973 }
5974 val = REG_RD(bp, offset);
5975 /* add GPIO3 to group */
5976 val |= aeu_gpio_mask;
5977 REG_WR(bp, offset, val);
5978 }
5979 break;
5980
35b19ba5 5981 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5982 /* add SPIO 5 to group 0 */
5983 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5984 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5985 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5986 break;
5987
5988 default:
5989 break;
5990 }
5991
c18487ee 5992 bnx2x__link_reset(bp);
a2fbb9ea 5993
34f80b04
EG
5994 return 0;
5995}
5996
5997#define ILT_PER_FUNC (768/2)
5998#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5999/* the phys address is shifted right 12 bits and has an added
6000 1=valid bit added to the 53rd bit
6001 then since this is a wide register(TM)
6002 we split it into two 32 bit writes
6003 */
6004#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6005#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6006#define PXP_ONE_ILT(x) (((x) << 10) | x)
6007#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6008
6009#define CNIC_ILT_LINES 0
6010
6011static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6012{
6013 int reg;
6014
6015 if (CHIP_IS_E1H(bp))
6016 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6017 else /* E1 */
6018 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6019
6020 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6021}
6022
6023static int bnx2x_init_func(struct bnx2x *bp)
6024{
6025 int port = BP_PORT(bp);
6026 int func = BP_FUNC(bp);
8badd27a 6027 u32 addr, val;
34f80b04
EG
6028 int i;
6029
6030 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6031
8badd27a
EG
6032 /* set MSI reconfigure capability */
6033 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6034 val = REG_RD(bp, addr);
6035 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6036 REG_WR(bp, addr, val);
6037
34f80b04
EG
6038 i = FUNC_ILT_BASE(func);
6039
6040 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6041 if (CHIP_IS_E1H(bp)) {
6042 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6043 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6044 } else /* E1 */
6045 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6046 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6047
6048
6049 if (CHIP_IS_E1H(bp)) {
6050 for (i = 0; i < 9; i++)
6051 bnx2x_init_block(bp,
6052 cm_start[func][i], cm_end[func][i]);
6053
6054 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6055 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6056 }
6057
6058 /* HC init per function */
6059 if (CHIP_IS_E1H(bp)) {
6060 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6061
6062 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6063 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6064 }
6065 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6066
c14423fe 6067 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6068 REG_WR(bp, 0x2114, 0xffffffff);
6069 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6070
34f80b04
EG
6071 return 0;
6072}
6073
6074static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6075{
6076 int i, rc = 0;
a2fbb9ea 6077
34f80b04
EG
6078 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6079 BP_FUNC(bp), load_code);
a2fbb9ea 6080
34f80b04
EG
6081 bp->dmae_ready = 0;
6082 mutex_init(&bp->dmae_mutex);
6083 bnx2x_gunzip_init(bp);
a2fbb9ea 6084
34f80b04
EG
6085 switch (load_code) {
6086 case FW_MSG_CODE_DRV_LOAD_COMMON:
6087 rc = bnx2x_init_common(bp);
6088 if (rc)
6089 goto init_hw_err;
6090 /* no break */
6091
6092 case FW_MSG_CODE_DRV_LOAD_PORT:
6093 bp->dmae_ready = 1;
6094 rc = bnx2x_init_port(bp);
6095 if (rc)
6096 goto init_hw_err;
6097 /* no break */
6098
6099 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6100 bp->dmae_ready = 1;
6101 rc = bnx2x_init_func(bp);
6102 if (rc)
6103 goto init_hw_err;
6104 break;
6105
6106 default:
6107 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6108 break;
6109 }
6110
6111 if (!BP_NOMCP(bp)) {
6112 int func = BP_FUNC(bp);
a2fbb9ea
ET
6113
6114 bp->fw_drv_pulse_wr_seq =
34f80b04 6115 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6116 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6117 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6118 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6119 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6120 } else
6121 bp->func_stx = 0;
a2fbb9ea 6122
34f80b04
EG
6123 /* this needs to be done before gunzip end */
6124 bnx2x_zero_def_sb(bp);
6125 for_each_queue(bp, i)
6126 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6127
6128init_hw_err:
6129 bnx2x_gunzip_end(bp);
6130
6131 return rc;
a2fbb9ea
ET
6132}
6133
c14423fe 6134/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6135static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6136{
34f80b04 6137 int func = BP_FUNC(bp);
f1410647
ET
6138 u32 seq = ++bp->fw_seq;
6139 u32 rc = 0;
19680c48
EG
6140 u32 cnt = 1;
6141 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6142
34f80b04 6143 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6144 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6145
19680c48
EG
6146 do {
6147 /* let the FW do it's magic ... */
6148 msleep(delay);
a2fbb9ea 6149
19680c48 6150 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6151
19680c48
EG
6152 /* Give the FW up to 2 second (200*10ms) */
6153 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6154
6155 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6156 cnt*delay, rc, seq);
a2fbb9ea
ET
6157
6158 /* is this a reply to our command? */
6159 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6160 rc &= FW_MSG_CODE_MASK;
f1410647 6161
a2fbb9ea
ET
6162 } else {
6163 /* FW BUG! */
6164 BNX2X_ERR("FW failed to respond!\n");
6165 bnx2x_fw_dump(bp);
6166 rc = 0;
6167 }
f1410647 6168
a2fbb9ea
ET
6169 return rc;
6170}
6171
6172static void bnx2x_free_mem(struct bnx2x *bp)
6173{
6174
6175#define BNX2X_PCI_FREE(x, y, size) \
6176 do { \
6177 if (x) { \
6178 pci_free_consistent(bp->pdev, size, x, y); \
6179 x = NULL; \
6180 y = 0; \
6181 } \
6182 } while (0)
6183
6184#define BNX2X_FREE(x) \
6185 do { \
6186 if (x) { \
6187 vfree(x); \
6188 x = NULL; \
6189 } \
6190 } while (0)
6191
6192 int i;
6193
6194 /* fastpath */
555f6c78 6195 /* Common */
a2fbb9ea
ET
6196 for_each_queue(bp, i) {
6197
555f6c78 6198 /* status blocks */
a2fbb9ea
ET
6199 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6200 bnx2x_fp(bp, i, status_blk_mapping),
6201 sizeof(struct host_status_block) +
6202 sizeof(struct eth_tx_db_data));
555f6c78
EG
6203 }
6204 /* Rx */
6205 for_each_rx_queue(bp, i) {
a2fbb9ea 6206
555f6c78 6207 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6208 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6209 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6210 bnx2x_fp(bp, i, rx_desc_mapping),
6211 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6212
6213 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6214 bnx2x_fp(bp, i, rx_comp_mapping),
6215 sizeof(struct eth_fast_path_rx_cqe) *
6216 NUM_RCQ_BD);
a2fbb9ea 6217
7a9b2557 6218 /* SGE ring */
32626230 6219 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6220 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6221 bnx2x_fp(bp, i, rx_sge_mapping),
6222 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6223 }
555f6c78
EG
6224 /* Tx */
6225 for_each_tx_queue(bp, i) {
6226
6227 /* fastpath tx rings: tx_buf tx_desc */
6228 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6229 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6230 bnx2x_fp(bp, i, tx_desc_mapping),
6231 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6232 }
a2fbb9ea
ET
6233 /* end of fastpath */
6234
6235 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6236 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6237
6238 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6239 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6240
6241#ifdef BCM_ISCSI
6242 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6243 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6244 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6245 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6246#endif
7a9b2557 6247 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6248
6249#undef BNX2X_PCI_FREE
6250#undef BNX2X_KFREE
6251}
6252
6253static int bnx2x_alloc_mem(struct bnx2x *bp)
6254{
6255
6256#define BNX2X_PCI_ALLOC(x, y, size) \
6257 do { \
6258 x = pci_alloc_consistent(bp->pdev, size, y); \
6259 if (x == NULL) \
6260 goto alloc_mem_err; \
6261 memset(x, 0, size); \
6262 } while (0)
6263
6264#define BNX2X_ALLOC(x, size) \
6265 do { \
6266 x = vmalloc(size); \
6267 if (x == NULL) \
6268 goto alloc_mem_err; \
6269 memset(x, 0, size); \
6270 } while (0)
6271
6272 int i;
6273
6274 /* fastpath */
555f6c78 6275 /* Common */
a2fbb9ea
ET
6276 for_each_queue(bp, i) {
6277 bnx2x_fp(bp, i, bp) = bp;
6278
555f6c78 6279 /* status blocks */
a2fbb9ea
ET
6280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6281 &bnx2x_fp(bp, i, status_blk_mapping),
6282 sizeof(struct host_status_block) +
6283 sizeof(struct eth_tx_db_data));
555f6c78
EG
6284 }
6285 /* Rx */
6286 for_each_rx_queue(bp, i) {
a2fbb9ea 6287
555f6c78 6288 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6289 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6290 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6291 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6292 &bnx2x_fp(bp, i, rx_desc_mapping),
6293 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6294
6295 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6296 &bnx2x_fp(bp, i, rx_comp_mapping),
6297 sizeof(struct eth_fast_path_rx_cqe) *
6298 NUM_RCQ_BD);
6299
7a9b2557
VZ
6300 /* SGE ring */
6301 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6302 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6303 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6304 &bnx2x_fp(bp, i, rx_sge_mapping),
6305 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6306 }
555f6c78
EG
6307 /* Tx */
6308 for_each_tx_queue(bp, i) {
6309
6310 bnx2x_fp(bp, i, hw_tx_prods) =
6311 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6312
6313 bnx2x_fp(bp, i, tx_prods_mapping) =
6314 bnx2x_fp(bp, i, status_blk_mapping) +
6315 sizeof(struct host_status_block);
6316
6317 /* fastpath tx rings: tx_buf tx_desc */
6318 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6319 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6320 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6321 &bnx2x_fp(bp, i, tx_desc_mapping),
6322 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6323 }
a2fbb9ea
ET
6324 /* end of fastpath */
6325
6326 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6327 sizeof(struct host_def_status_block));
6328
6329 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6330 sizeof(struct bnx2x_slowpath));
6331
6332#ifdef BCM_ISCSI
6333 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6334
6335 /* Initialize T1 */
6336 for (i = 0; i < 64*1024; i += 64) {
6337 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6338 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6339 }
6340
6341 /* allocate searcher T2 table
6342 we allocate 1/4 of alloc num for T2
6343 (which is not entered into the ILT) */
6344 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6345
6346 /* Initialize T2 */
6347 for (i = 0; i < 16*1024; i += 64)
6348 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6349
c14423fe 6350 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6351 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6352
6353 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6354 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6355
6356 /* QM queues (128*MAX_CONN) */
6357 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6358#endif
6359
6360 /* Slow path ring */
6361 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6362
6363 return 0;
6364
6365alloc_mem_err:
6366 bnx2x_free_mem(bp);
6367 return -ENOMEM;
6368
6369#undef BNX2X_PCI_ALLOC
6370#undef BNX2X_ALLOC
6371}
6372
6373static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6374{
6375 int i;
6376
555f6c78 6377 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6378 struct bnx2x_fastpath *fp = &bp->fp[i];
6379
6380 u16 bd_cons = fp->tx_bd_cons;
6381 u16 sw_prod = fp->tx_pkt_prod;
6382 u16 sw_cons = fp->tx_pkt_cons;
6383
a2fbb9ea
ET
6384 while (sw_cons != sw_prod) {
6385 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6386 sw_cons++;
6387 }
6388 }
6389}
6390
6391static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6392{
6393 int i, j;
6394
555f6c78 6395 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6396 struct bnx2x_fastpath *fp = &bp->fp[j];
6397
a2fbb9ea
ET
6398 for (i = 0; i < NUM_RX_BD; i++) {
6399 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6400 struct sk_buff *skb = rx_buf->skb;
6401
6402 if (skb == NULL)
6403 continue;
6404
6405 pci_unmap_single(bp->pdev,
6406 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6407 bp->rx_buf_size,
a2fbb9ea
ET
6408 PCI_DMA_FROMDEVICE);
6409
6410 rx_buf->skb = NULL;
6411 dev_kfree_skb(skb);
6412 }
7a9b2557 6413 if (!fp->disable_tpa)
32626230
EG
6414 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6415 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6416 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6417 }
6418}
6419
6420static void bnx2x_free_skbs(struct bnx2x *bp)
6421{
6422 bnx2x_free_tx_skbs(bp);
6423 bnx2x_free_rx_skbs(bp);
6424}
6425
6426static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6427{
34f80b04 6428 int i, offset = 1;
a2fbb9ea
ET
6429
6430 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6431 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6432 bp->msix_table[0].vector);
6433
6434 for_each_queue(bp, i) {
c14423fe 6435 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6436 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6437 bnx2x_fp(bp, i, state));
6438
34f80b04 6439 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6440 }
a2fbb9ea
ET
6441}
6442
6443static void bnx2x_free_irq(struct bnx2x *bp)
6444{
a2fbb9ea 6445 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6446 bnx2x_free_msix_irqs(bp);
6447 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6448 bp->flags &= ~USING_MSIX_FLAG;
6449
8badd27a
EG
6450 } else if (bp->flags & USING_MSI_FLAG) {
6451 free_irq(bp->pdev->irq, bp->dev);
6452 pci_disable_msi(bp->pdev);
6453 bp->flags &= ~USING_MSI_FLAG;
6454
a2fbb9ea
ET
6455 } else
6456 free_irq(bp->pdev->irq, bp->dev);
6457}
6458
6459static int bnx2x_enable_msix(struct bnx2x *bp)
6460{
8badd27a
EG
6461 int i, rc, offset = 1;
6462 int igu_vec = 0;
a2fbb9ea 6463
8badd27a
EG
6464 bp->msix_table[0].entry = igu_vec;
6465 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6466
34f80b04 6467 for_each_queue(bp, i) {
8badd27a 6468 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6469 bp->msix_table[i + offset].entry = igu_vec;
6470 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6471 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6472 }
6473
34f80b04 6474 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6475 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6476 if (rc) {
8badd27a
EG
6477 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6478 return rc;
34f80b04 6479 }
8badd27a 6480
a2fbb9ea
ET
6481 bp->flags |= USING_MSIX_FLAG;
6482
6483 return 0;
a2fbb9ea
ET
6484}
6485
a2fbb9ea
ET
6486static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6487{
34f80b04 6488 int i, rc, offset = 1;
a2fbb9ea 6489
a2fbb9ea
ET
6490 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6491 bp->dev->name, bp->dev);
a2fbb9ea
ET
6492 if (rc) {
6493 BNX2X_ERR("request sp irq failed\n");
6494 return -EBUSY;
6495 }
6496
6497 for_each_queue(bp, i) {
555f6c78
EG
6498 struct bnx2x_fastpath *fp = &bp->fp[i];
6499
6500 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6501 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6502 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6503 if (rc) {
555f6c78 6504 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6505 bnx2x_free_msix_irqs(bp);
6506 return -EBUSY;
6507 }
6508
555f6c78 6509 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6510 }
6511
555f6c78
EG
6512 i = BNX2X_NUM_QUEUES(bp);
6513 if (is_multi(bp))
6514 printk(KERN_INFO PFX
6515 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6516 bp->dev->name, bp->msix_table[0].vector,
6517 bp->msix_table[offset].vector,
6518 bp->msix_table[offset + i - 1].vector);
6519 else
6520 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6521 bp->dev->name, bp->msix_table[0].vector,
6522 bp->msix_table[offset + i - 1].vector);
6523
a2fbb9ea 6524 return 0;
a2fbb9ea
ET
6525}
6526
8badd27a
EG
6527static int bnx2x_enable_msi(struct bnx2x *bp)
6528{
6529 int rc;
6530
6531 rc = pci_enable_msi(bp->pdev);
6532 if (rc) {
6533 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6534 return -1;
6535 }
6536 bp->flags |= USING_MSI_FLAG;
6537
6538 return 0;
6539}
6540
a2fbb9ea
ET
6541static int bnx2x_req_irq(struct bnx2x *bp)
6542{
8badd27a 6543 unsigned long flags;
34f80b04 6544 int rc;
a2fbb9ea 6545
8badd27a
EG
6546 if (bp->flags & USING_MSI_FLAG)
6547 flags = 0;
6548 else
6549 flags = IRQF_SHARED;
6550
6551 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6552 bp->dev->name, bp->dev);
a2fbb9ea
ET
6553 if (!rc)
6554 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6555
6556 return rc;
a2fbb9ea
ET
6557}
6558
65abd74d
YG
6559static void bnx2x_napi_enable(struct bnx2x *bp)
6560{
6561 int i;
6562
555f6c78 6563 for_each_rx_queue(bp, i)
65abd74d
YG
6564 napi_enable(&bnx2x_fp(bp, i, napi));
6565}
6566
6567static void bnx2x_napi_disable(struct bnx2x *bp)
6568{
6569 int i;
6570
555f6c78 6571 for_each_rx_queue(bp, i)
65abd74d
YG
6572 napi_disable(&bnx2x_fp(bp, i, napi));
6573}
6574
6575static void bnx2x_netif_start(struct bnx2x *bp)
6576{
6577 if (atomic_dec_and_test(&bp->intr_sem)) {
6578 if (netif_running(bp->dev)) {
65abd74d
YG
6579 bnx2x_napi_enable(bp);
6580 bnx2x_int_enable(bp);
555f6c78
EG
6581 if (bp->state == BNX2X_STATE_OPEN)
6582 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6583 }
6584 }
6585}
6586
f8ef6e44 6587static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6588{
f8ef6e44 6589 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6590 bnx2x_napi_disable(bp);
65abd74d 6591 if (netif_running(bp->dev)) {
65abd74d
YG
6592 netif_tx_disable(bp->dev);
6593 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6594 }
6595}
6596
a2fbb9ea
ET
6597/*
6598 * Init service functions
6599 */
6600
3101c2bc 6601static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6602{
6603 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6604 int port = BP_PORT(bp);
a2fbb9ea
ET
6605
6606 /* CAM allocation
6607 * unicasts 0-31:port0 32-63:port1
6608 * multicast 64-127:port0 128-191:port1
6609 */
8d9c5f34 6610 config->hdr.length = 2;
af246401 6611 config->hdr.offset = port ? 32 : 0;
0626b899 6612 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6613 config->hdr.reserved1 = 0;
6614
6615 /* primary MAC */
6616 config->config_table[0].cam_entry.msb_mac_addr =
6617 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6618 config->config_table[0].cam_entry.middle_mac_addr =
6619 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6620 config->config_table[0].cam_entry.lsb_mac_addr =
6621 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6622 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6623 if (set)
6624 config->config_table[0].target_table_entry.flags = 0;
6625 else
6626 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6627 config->config_table[0].target_table_entry.client_id = 0;
6628 config->config_table[0].target_table_entry.vlan_id = 0;
6629
3101c2bc
YG
6630 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6631 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6632 config->config_table[0].cam_entry.msb_mac_addr,
6633 config->config_table[0].cam_entry.middle_mac_addr,
6634 config->config_table[0].cam_entry.lsb_mac_addr);
6635
6636 /* broadcast */
4781bfad
EG
6637 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6638 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6639 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6640 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6641 if (set)
6642 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6643 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6644 else
6645 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6646 config->config_table[1].target_table_entry.client_id = 0;
6647 config->config_table[1].target_table_entry.vlan_id = 0;
6648
6649 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6650 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6651 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6652}
6653
3101c2bc 6654static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6655{
6656 struct mac_configuration_cmd_e1h *config =
6657 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6658
3101c2bc 6659 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6660 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6661 return;
6662 }
6663
6664 /* CAM allocation for E1H
6665 * unicasts: by func number
6666 * multicast: 20+FUNC*20, 20 each
6667 */
8d9c5f34 6668 config->hdr.length = 1;
34f80b04 6669 config->hdr.offset = BP_FUNC(bp);
0626b899 6670 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6671 config->hdr.reserved1 = 0;
6672
6673 /* primary MAC */
6674 config->config_table[0].msb_mac_addr =
6675 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6676 config->config_table[0].middle_mac_addr =
6677 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6678 config->config_table[0].lsb_mac_addr =
6679 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6680 config->config_table[0].client_id = BP_L_ID(bp);
6681 config->config_table[0].vlan_id = 0;
6682 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6683 if (set)
6684 config->config_table[0].flags = BP_PORT(bp);
6685 else
6686 config->config_table[0].flags =
6687 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6688
3101c2bc
YG
6689 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6690 (set ? "setting" : "clearing"),
34f80b04
EG
6691 config->config_table[0].msb_mac_addr,
6692 config->config_table[0].middle_mac_addr,
6693 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6694
6695 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6696 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6697 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6698}
6699
a2fbb9ea
ET
6700static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6701 int *state_p, int poll)
6702{
6703 /* can take a while if any port is running */
8b3a0f0b 6704 int cnt = 5000;
a2fbb9ea 6705
c14423fe
ET
6706 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6707 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6708
6709 might_sleep();
34f80b04 6710 while (cnt--) {
a2fbb9ea
ET
6711 if (poll) {
6712 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6713 /* if index is different from 0
6714 * the reply for some commands will
3101c2bc 6715 * be on the non default queue
a2fbb9ea
ET
6716 */
6717 if (idx)
6718 bnx2x_rx_int(&bp->fp[idx], 10);
6719 }
a2fbb9ea 6720
3101c2bc 6721 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6722 if (*state_p == state) {
6723#ifdef BNX2X_STOP_ON_ERROR
6724 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6725#endif
a2fbb9ea 6726 return 0;
8b3a0f0b 6727 }
a2fbb9ea 6728
a2fbb9ea 6729 msleep(1);
a2fbb9ea
ET
6730 }
6731
a2fbb9ea 6732 /* timeout! */
49d66772
ET
6733 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6734 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6735#ifdef BNX2X_STOP_ON_ERROR
6736 bnx2x_panic();
6737#endif
a2fbb9ea 6738
49d66772 6739 return -EBUSY;
a2fbb9ea
ET
6740}
6741
6742static int bnx2x_setup_leading(struct bnx2x *bp)
6743{
34f80b04 6744 int rc;
a2fbb9ea 6745
c14423fe 6746 /* reset IGU state */
34f80b04 6747 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6748
6749 /* SETUP ramrod */
6750 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6751
34f80b04
EG
6752 /* Wait for completion */
6753 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6754
34f80b04 6755 return rc;
a2fbb9ea
ET
6756}
6757
6758static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6759{
555f6c78
EG
6760 struct bnx2x_fastpath *fp = &bp->fp[index];
6761
a2fbb9ea 6762 /* reset IGU state */
555f6c78 6763 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6764
228241eb 6765 /* SETUP ramrod */
555f6c78
EG
6766 fp->state = BNX2X_FP_STATE_OPENING;
6767 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6768 fp->cl_id, 0);
a2fbb9ea
ET
6769
6770 /* Wait for completion */
6771 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6772 &(fp->state), 0);
a2fbb9ea
ET
6773}
6774
a2fbb9ea 6775static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6776
8badd27a 6777static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6778{
555f6c78 6779 int num_queues;
a2fbb9ea 6780
8badd27a
EG
6781 switch (int_mode) {
6782 case INT_MODE_INTx:
6783 case INT_MODE_MSI:
555f6c78
EG
6784 num_queues = 1;
6785 bp->num_rx_queues = num_queues;
6786 bp->num_tx_queues = num_queues;
6787 DP(NETIF_MSG_IFUP,
6788 "set number of queues to %d\n", num_queues);
8badd27a
EG
6789 break;
6790
6791 case INT_MODE_MSIX:
6792 default:
555f6c78
EG
6793 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6794 num_queues = min_t(u32, num_online_cpus(),
6795 BNX2X_MAX_QUEUES(bp));
34f80b04 6796 else
555f6c78
EG
6797 num_queues = 1;
6798 bp->num_rx_queues = num_queues;
6799 bp->num_tx_queues = num_queues;
6800 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6801 " number of tx queues to %d\n",
6802 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6803 /* if we can't use MSI-X we only need one fp,
6804 * so try to enable MSI-X with the requested number of fp's
6805 * and fallback to MSI or legacy INTx with one fp
6806 */
8badd27a 6807 if (bnx2x_enable_msix(bp)) {
34f80b04 6808 /* failed to enable MSI-X */
555f6c78
EG
6809 num_queues = 1;
6810 bp->num_rx_queues = num_queues;
6811 bp->num_tx_queues = num_queues;
6812 if (bp->multi_mode)
6813 BNX2X_ERR("Multi requested but failed to "
6814 "enable MSI-X set number of "
6815 "queues to %d\n", num_queues);
a2fbb9ea 6816 }
8badd27a 6817 break;
a2fbb9ea 6818 }
555f6c78 6819 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6820}
6821
6822static void bnx2x_set_rx_mode(struct net_device *dev);
6823
6824/* must be called with rtnl_lock */
6825static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6826{
6827 u32 load_code;
6828 int i, rc = 0;
6829#ifdef BNX2X_STOP_ON_ERROR
6830 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6831 if (unlikely(bp->panic))
6832 return -EPERM;
6833#endif
6834
6835 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6836
6837 bnx2x_set_int_mode(bp);
c14423fe 6838
a2fbb9ea
ET
6839 if (bnx2x_alloc_mem(bp))
6840 return -ENOMEM;
6841
555f6c78 6842 for_each_rx_queue(bp, i)
7a9b2557
VZ
6843 bnx2x_fp(bp, i, disable_tpa) =
6844 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6845
555f6c78 6846 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6847 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6848 bnx2x_poll, 128);
6849
6850#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6851 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6852 struct bnx2x_fastpath *fp = &bp->fp[i];
6853
6854 fp->poll_no_work = 0;
6855 fp->poll_calls = 0;
6856 fp->poll_max_calls = 0;
6857 fp->poll_complete = 0;
6858 fp->poll_exit = 0;
6859 }
6860#endif
6861 bnx2x_napi_enable(bp);
6862
34f80b04
EG
6863 if (bp->flags & USING_MSIX_FLAG) {
6864 rc = bnx2x_req_msix_irqs(bp);
6865 if (rc) {
6866 pci_disable_msix(bp->pdev);
2dfe0e1f 6867 goto load_error1;
34f80b04
EG
6868 }
6869 } else {
8badd27a
EG
6870 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6871 bnx2x_enable_msi(bp);
34f80b04
EG
6872 bnx2x_ack_int(bp);
6873 rc = bnx2x_req_irq(bp);
6874 if (rc) {
2dfe0e1f 6875 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6876 if (bp->flags & USING_MSI_FLAG)
6877 pci_disable_msi(bp->pdev);
2dfe0e1f 6878 goto load_error1;
a2fbb9ea 6879 }
8badd27a
EG
6880 if (bp->flags & USING_MSI_FLAG) {
6881 bp->dev->irq = bp->pdev->irq;
6882 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6883 bp->dev->name, bp->pdev->irq);
6884 }
a2fbb9ea
ET
6885 }
6886
2dfe0e1f
EG
6887 /* Send LOAD_REQUEST command to MCP
6888 Returns the type of LOAD command:
6889 if it is the first port to be initialized
6890 common blocks should be initialized, otherwise - not
6891 */
6892 if (!BP_NOMCP(bp)) {
6893 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6894 if (!load_code) {
6895 BNX2X_ERR("MCP response failure, aborting\n");
6896 rc = -EBUSY;
6897 goto load_error2;
6898 }
6899 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6900 rc = -EBUSY; /* other port in diagnostic mode */
6901 goto load_error2;
6902 }
6903
6904 } else {
6905 int port = BP_PORT(bp);
6906
6907 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6908 load_count[0], load_count[1], load_count[2]);
6909 load_count[0]++;
6910 load_count[1 + port]++;
6911 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6912 load_count[0], load_count[1], load_count[2]);
6913 if (load_count[0] == 1)
6914 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6915 else if (load_count[1 + port] == 1)
6916 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6917 else
6918 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6919 }
6920
6921 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6922 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6923 bp->port.pmf = 1;
6924 else
6925 bp->port.pmf = 0;
6926 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6927
a2fbb9ea 6928 /* Initialize HW */
34f80b04
EG
6929 rc = bnx2x_init_hw(bp, load_code);
6930 if (rc) {
a2fbb9ea 6931 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6932 goto load_error2;
a2fbb9ea
ET
6933 }
6934
a2fbb9ea 6935 /* Setup NIC internals and enable interrupts */
471de716 6936 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6937
6938 /* Send LOAD_DONE command to MCP */
34f80b04 6939 if (!BP_NOMCP(bp)) {
228241eb
ET
6940 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6941 if (!load_code) {
da5a662a 6942 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6943 rc = -EBUSY;
2dfe0e1f 6944 goto load_error3;
a2fbb9ea
ET
6945 }
6946 }
6947
6948 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6949
34f80b04
EG
6950 rc = bnx2x_setup_leading(bp);
6951 if (rc) {
da5a662a 6952 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6953 goto load_error3;
34f80b04 6954 }
a2fbb9ea 6955
34f80b04
EG
6956 if (CHIP_IS_E1H(bp))
6957 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6958 BNX2X_ERR("!!! mf_cfg function disabled\n");
6959 bp->state = BNX2X_STATE_DISABLED;
6960 }
a2fbb9ea 6961
34f80b04
EG
6962 if (bp->state == BNX2X_STATE_OPEN)
6963 for_each_nondefault_queue(bp, i) {
6964 rc = bnx2x_setup_multi(bp, i);
6965 if (rc)
2dfe0e1f 6966 goto load_error3;
34f80b04 6967 }
a2fbb9ea 6968
34f80b04 6969 if (CHIP_IS_E1(bp))
3101c2bc 6970 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6971 else
3101c2bc 6972 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6973
6974 if (bp->port.pmf)
b5bf9068 6975 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6976
6977 /* Start fast path */
34f80b04
EG
6978 switch (load_mode) {
6979 case LOAD_NORMAL:
6980 /* Tx queue should be only reenabled */
555f6c78 6981 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6982 /* Initialize the receive filter. */
34f80b04
EG
6983 bnx2x_set_rx_mode(bp->dev);
6984 break;
6985
6986 case LOAD_OPEN:
555f6c78 6987 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6988 /* Initialize the receive filter. */
34f80b04 6989 bnx2x_set_rx_mode(bp->dev);
34f80b04 6990 break;
a2fbb9ea 6991
34f80b04 6992 case LOAD_DIAG:
2dfe0e1f 6993 /* Initialize the receive filter. */
a2fbb9ea 6994 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6995 bp->state = BNX2X_STATE_DIAG;
6996 break;
6997
6998 default:
6999 break;
a2fbb9ea
ET
7000 }
7001
34f80b04
EG
7002 if (!bp->port.pmf)
7003 bnx2x__link_status_update(bp);
7004
a2fbb9ea
ET
7005 /* start the timer */
7006 mod_timer(&bp->timer, jiffies + bp->current_interval);
7007
34f80b04 7008
a2fbb9ea
ET
7009 return 0;
7010
2dfe0e1f
EG
7011load_error3:
7012 bnx2x_int_disable_sync(bp, 1);
7013 if (!BP_NOMCP(bp)) {
7014 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7015 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7016 }
7017 bp->port.pmf = 0;
7a9b2557
VZ
7018 /* Free SKBs, SGEs, TPA pool and driver internals */
7019 bnx2x_free_skbs(bp);
555f6c78 7020 for_each_rx_queue(bp, i)
3196a88a 7021 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7022load_error2:
d1014634
YG
7023 /* Release IRQs */
7024 bnx2x_free_irq(bp);
2dfe0e1f
EG
7025load_error1:
7026 bnx2x_napi_disable(bp);
555f6c78 7027 for_each_rx_queue(bp, i)
7cde1c8b 7028 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7029 bnx2x_free_mem(bp);
7030
7031 /* TBD we really need to reset the chip
7032 if we want to recover from this */
34f80b04 7033 return rc;
a2fbb9ea
ET
7034}
7035
7036static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7037{
555f6c78 7038 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7039 int rc;
7040
c14423fe 7041 /* halt the connection */
555f6c78
EG
7042 fp->state = BNX2X_FP_STATE_HALTING;
7043 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7044
34f80b04 7045 /* Wait for completion */
a2fbb9ea 7046 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7047 &(fp->state), 1);
c14423fe 7048 if (rc) /* timeout */
a2fbb9ea
ET
7049 return rc;
7050
7051 /* delete cfc entry */
7052 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7053
34f80b04
EG
7054 /* Wait for completion */
7055 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7056 &(fp->state), 1);
34f80b04 7057 return rc;
a2fbb9ea
ET
7058}
7059
da5a662a 7060static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7061{
4781bfad 7062 __le16 dsb_sp_prod_idx;
c14423fe 7063 /* if the other port is handling traffic,
a2fbb9ea 7064 this can take a lot of time */
34f80b04
EG
7065 int cnt = 500;
7066 int rc;
a2fbb9ea
ET
7067
7068 might_sleep();
7069
7070 /* Send HALT ramrod */
7071 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7072 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7073
34f80b04
EG
7074 /* Wait for completion */
7075 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7076 &(bp->fp[0].state), 1);
7077 if (rc) /* timeout */
da5a662a 7078 return rc;
a2fbb9ea 7079
49d66772 7080 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7081
228241eb 7082 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7083 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7084
49d66772 7085 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7086 we are going to reset the chip anyway
7087 so there is not much to do if this times out
7088 */
34f80b04 7089 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7090 if (!cnt) {
7091 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7092 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7093 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7094#ifdef BNX2X_STOP_ON_ERROR
7095 bnx2x_panic();
7096#endif
36e552ab 7097 rc = -EBUSY;
34f80b04
EG
7098 break;
7099 }
7100 cnt--;
da5a662a 7101 msleep(1);
5650d9d4 7102 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7103 }
7104 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7105 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7106
7107 return rc;
a2fbb9ea
ET
7108}
7109
34f80b04
EG
7110static void bnx2x_reset_func(struct bnx2x *bp)
7111{
7112 int port = BP_PORT(bp);
7113 int func = BP_FUNC(bp);
7114 int base, i;
7115
7116 /* Configure IGU */
7117 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7118 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7119
34f80b04
EG
7120 /* Clear ILT */
7121 base = FUNC_ILT_BASE(func);
7122 for (i = base; i < base + ILT_PER_FUNC; i++)
7123 bnx2x_ilt_wr(bp, i, 0);
7124}
7125
7126static void bnx2x_reset_port(struct bnx2x *bp)
7127{
7128 int port = BP_PORT(bp);
7129 u32 val;
7130
7131 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7132
7133 /* Do not rcv packets to BRB */
7134 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7135 /* Do not direct rcv packets that are not for MCP to the BRB */
7136 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7137 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7138
7139 /* Configure AEU */
7140 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7141
7142 msleep(100);
7143 /* Check for BRB port occupancy */
7144 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7145 if (val)
7146 DP(NETIF_MSG_IFDOWN,
33471629 7147 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7148
7149 /* TODO: Close Doorbell port? */
7150}
7151
34f80b04
EG
7152static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7153{
7154 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7155 BP_FUNC(bp), reset_code);
7156
7157 switch (reset_code) {
7158 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7159 bnx2x_reset_port(bp);
7160 bnx2x_reset_func(bp);
7161 bnx2x_reset_common(bp);
7162 break;
7163
7164 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7165 bnx2x_reset_port(bp);
7166 bnx2x_reset_func(bp);
7167 break;
7168
7169 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7170 bnx2x_reset_func(bp);
7171 break;
49d66772 7172
34f80b04
EG
7173 default:
7174 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7175 break;
7176 }
7177}
7178
33471629 7179/* must be called with rtnl_lock */
34f80b04 7180static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7181{
da5a662a 7182 int port = BP_PORT(bp);
a2fbb9ea 7183 u32 reset_code = 0;
da5a662a 7184 int i, cnt, rc;
a2fbb9ea
ET
7185
7186 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7187
228241eb
ET
7188 bp->rx_mode = BNX2X_RX_MODE_NONE;
7189 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7190
f8ef6e44 7191 bnx2x_netif_stop(bp, 1);
e94d8af3 7192
34f80b04
EG
7193 del_timer_sync(&bp->timer);
7194 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7195 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7196 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7197
70b9986c
EG
7198 /* Release IRQs */
7199 bnx2x_free_irq(bp);
7200
555f6c78
EG
7201 /* Wait until tx fastpath tasks complete */
7202 for_each_tx_queue(bp, i) {
228241eb
ET
7203 struct bnx2x_fastpath *fp = &bp->fp[i];
7204
34f80b04 7205 cnt = 1000;
3e5b510e 7206 smp_mb();
e8b5fc51 7207 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7208
65abd74d 7209 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7210 if (!cnt) {
7211 BNX2X_ERR("timeout waiting for queue[%d]\n",
7212 i);
7213#ifdef BNX2X_STOP_ON_ERROR
7214 bnx2x_panic();
7215 return -EBUSY;
7216#else
7217 break;
7218#endif
7219 }
7220 cnt--;
da5a662a 7221 msleep(1);
3e5b510e 7222 smp_mb();
34f80b04 7223 }
228241eb 7224 }
da5a662a
VZ
7225 /* Give HW time to discard old tx messages */
7226 msleep(1);
a2fbb9ea 7227
3101c2bc
YG
7228 if (CHIP_IS_E1(bp)) {
7229 struct mac_configuration_cmd *config =
7230 bnx2x_sp(bp, mcast_config);
7231
7232 bnx2x_set_mac_addr_e1(bp, 0);
7233
8d9c5f34 7234 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7235 CAM_INVALIDATE(config->config_table[i]);
7236
8d9c5f34 7237 config->hdr.length = i;
3101c2bc
YG
7238 if (CHIP_REV_IS_SLOW(bp))
7239 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7240 else
7241 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7242 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7243 config->hdr.reserved1 = 0;
7244
7245 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7246 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7247 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7248
7249 } else { /* E1H */
65abd74d
YG
7250 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7251
3101c2bc
YG
7252 bnx2x_set_mac_addr_e1h(bp, 0);
7253
7254 for (i = 0; i < MC_HASH_SIZE; i++)
7255 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7256 }
7257
65abd74d
YG
7258 if (unload_mode == UNLOAD_NORMAL)
7259 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7260
7261 else if (bp->flags & NO_WOL_FLAG) {
7262 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7263 if (CHIP_IS_E1H(bp))
7264 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7265
7266 } else if (bp->wol) {
7267 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7268 u8 *mac_addr = bp->dev->dev_addr;
7269 u32 val;
7270 /* The mac address is written to entries 1-4 to
7271 preserve entry 0 which is used by the PMF */
7272 u8 entry = (BP_E1HVN(bp) + 1)*8;
7273
7274 val = (mac_addr[0] << 8) | mac_addr[1];
7275 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7276
7277 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7278 (mac_addr[4] << 8) | mac_addr[5];
7279 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7280
7281 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7282
7283 } else
7284 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7285
34f80b04
EG
7286 /* Close multi and leading connections
7287 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7288 for_each_nondefault_queue(bp, i)
7289 if (bnx2x_stop_multi(bp, i))
228241eb 7290 goto unload_error;
a2fbb9ea 7291
da5a662a
VZ
7292 rc = bnx2x_stop_leading(bp);
7293 if (rc) {
34f80b04 7294 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7295#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7296 return -EBUSY;
da5a662a
VZ
7297#else
7298 goto unload_error;
34f80b04 7299#endif
228241eb
ET
7300 }
7301
7302unload_error:
34f80b04 7303 if (!BP_NOMCP(bp))
228241eb 7304 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7305 else {
7306 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7307 load_count[0], load_count[1], load_count[2]);
7308 load_count[0]--;
da5a662a 7309 load_count[1 + port]--;
34f80b04
EG
7310 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7311 load_count[0], load_count[1], load_count[2]);
7312 if (load_count[0] == 0)
7313 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7314 else if (load_count[1 + port] == 0)
34f80b04
EG
7315 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7316 else
7317 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7318 }
a2fbb9ea 7319
34f80b04
EG
7320 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7321 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7322 bnx2x__link_reset(bp);
a2fbb9ea
ET
7323
7324 /* Reset the chip */
228241eb 7325 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7326
7327 /* Report UNLOAD_DONE to MCP */
34f80b04 7328 if (!BP_NOMCP(bp))
a2fbb9ea 7329 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7330 bp->port.pmf = 0;
a2fbb9ea 7331
7a9b2557 7332 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7333 bnx2x_free_skbs(bp);
555f6c78 7334 for_each_rx_queue(bp, i)
3196a88a 7335 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7336 for_each_rx_queue(bp, i)
7cde1c8b 7337 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7338 bnx2x_free_mem(bp);
7339
7340 bp->state = BNX2X_STATE_CLOSED;
228241eb 7341
a2fbb9ea
ET
7342 netif_carrier_off(bp->dev);
7343
7344 return 0;
7345}
7346
34f80b04
EG
7347static void bnx2x_reset_task(struct work_struct *work)
7348{
7349 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7350
7351#ifdef BNX2X_STOP_ON_ERROR
7352 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7353 " so reset not done to allow debug dump,\n"
7354 KERN_ERR " you will need to reboot when done\n");
7355 return;
7356#endif
7357
7358 rtnl_lock();
7359
7360 if (!netif_running(bp->dev))
7361 goto reset_task_exit;
7362
7363 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7364 bnx2x_nic_load(bp, LOAD_NORMAL);
7365
7366reset_task_exit:
7367 rtnl_unlock();
7368}
7369
a2fbb9ea
ET
7370/* end of nic load/unload */
7371
7372/* ethtool_ops */
7373
7374/*
7375 * Init service functions
7376 */
7377
f1ef27ef
EG
7378static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7379{
7380 switch (func) {
7381 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7382 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7383 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7384 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7385 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7386 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7387 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7388 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7389 default:
7390 BNX2X_ERR("Unsupported function index: %d\n", func);
7391 return (u32)(-1);
7392 }
7393}
7394
7395static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7396{
7397 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7398
7399 /* Flush all outstanding writes */
7400 mmiowb();
7401
7402 /* Pretend to be function 0 */
7403 REG_WR(bp, reg, 0);
7404 /* Flush the GRC transaction (in the chip) */
7405 new_val = REG_RD(bp, reg);
7406 if (new_val != 0) {
7407 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7408 new_val);
7409 BUG();
7410 }
7411
7412 /* From now we are in the "like-E1" mode */
7413 bnx2x_int_disable(bp);
7414
7415 /* Flush all outstanding writes */
7416 mmiowb();
7417
7418 /* Restore the original funtion settings */
7419 REG_WR(bp, reg, orig_func);
7420 new_val = REG_RD(bp, reg);
7421 if (new_val != orig_func) {
7422 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7423 orig_func, new_val);
7424 BUG();
7425 }
7426}
7427
7428static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7429{
7430 if (CHIP_IS_E1H(bp))
7431 bnx2x_undi_int_disable_e1h(bp, func);
7432 else
7433 bnx2x_int_disable(bp);
7434}
7435
34f80b04
EG
7436static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7437{
7438 u32 val;
7439
7440 /* Check if there is any driver already loaded */
7441 val = REG_RD(bp, MISC_REG_UNPREPARED);
7442 if (val == 0x1) {
7443 /* Check if it is the UNDI driver
7444 * UNDI driver initializes CID offset for normal bell to 0x7
7445 */
4a37fb66 7446 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7447 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7448 if (val == 0x7) {
7449 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7450 /* save our func */
34f80b04 7451 int func = BP_FUNC(bp);
da5a662a
VZ
7452 u32 swap_en;
7453 u32 swap_val;
34f80b04 7454
b4661739
EG
7455 /* clear the UNDI indication */
7456 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7457
34f80b04
EG
7458 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7459
7460 /* try unload UNDI on port 0 */
7461 bp->func = 0;
da5a662a
VZ
7462 bp->fw_seq =
7463 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7464 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7465 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7466
7467 /* if UNDI is loaded on the other port */
7468 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7469
da5a662a
VZ
7470 /* send "DONE" for previous unload */
7471 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7472
7473 /* unload UNDI on port 1 */
34f80b04 7474 bp->func = 1;
da5a662a
VZ
7475 bp->fw_seq =
7476 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7477 DRV_MSG_SEQ_NUMBER_MASK);
7478 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7479
7480 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7481 }
7482
b4661739
EG
7483 /* now it's safe to release the lock */
7484 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7485
f1ef27ef 7486 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7487
7488 /* close input traffic and wait for it */
7489 /* Do not rcv packets to BRB */
7490 REG_WR(bp,
7491 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7492 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7493 /* Do not direct rcv packets that are not for MCP to
7494 * the BRB */
7495 REG_WR(bp,
7496 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7497 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7498 /* clear AEU */
7499 REG_WR(bp,
7500 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7501 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7502 msleep(10);
7503
7504 /* save NIG port swap info */
7505 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7506 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7507 /* reset device */
7508 REG_WR(bp,
7509 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7510 0xd3ffffff);
34f80b04
EG
7511 REG_WR(bp,
7512 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7513 0x1403);
da5a662a
VZ
7514 /* take the NIG out of reset and restore swap values */
7515 REG_WR(bp,
7516 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7517 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7518 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7519 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7520
7521 /* send unload done to the MCP */
7522 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7523
7524 /* restore our func and fw_seq */
7525 bp->func = func;
7526 bp->fw_seq =
7527 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7528 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7529
7530 } else
7531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7532 }
7533}
7534
7535static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7536{
7537 u32 val, val2, val3, val4, id;
72ce58c3 7538 u16 pmc;
34f80b04
EG
7539
7540 /* Get the chip revision id and number. */
7541 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7542 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7543 id = ((val & 0xffff) << 16);
7544 val = REG_RD(bp, MISC_REG_CHIP_REV);
7545 id |= ((val & 0xf) << 12);
7546 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7547 id |= ((val & 0xff) << 4);
5a40e08e 7548 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7549 id |= (val & 0xf);
7550 bp->common.chip_id = id;
7551 bp->link_params.chip_id = bp->common.chip_id;
7552 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7553
1c06328c
EG
7554 val = (REG_RD(bp, 0x2874) & 0x55);
7555 if ((bp->common.chip_id & 0x1) ||
7556 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7557 bp->flags |= ONE_PORT_FLAG;
7558 BNX2X_DEV_INFO("single port device\n");
7559 }
7560
34f80b04
EG
7561 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7562 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7563 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7564 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7565 bp->common.flash_size, bp->common.flash_size);
7566
7567 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7568 bp->link_params.shmem_base = bp->common.shmem_base;
7569 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7570
7571 if (!bp->common.shmem_base ||
7572 (bp->common.shmem_base < 0xA0000) ||
7573 (bp->common.shmem_base >= 0xC0000)) {
7574 BNX2X_DEV_INFO("MCP not active\n");
7575 bp->flags |= NO_MCP_FLAG;
7576 return;
7577 }
7578
7579 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7580 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7581 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7582 BNX2X_ERR("BAD MCP validity signature\n");
7583
7584 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7585 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7586
7587 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7588 SHARED_HW_CFG_LED_MODE_MASK) >>
7589 SHARED_HW_CFG_LED_MODE_SHIFT);
7590
c2c8b03e
EG
7591 bp->link_params.feature_config_flags = 0;
7592 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7593 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7594 bp->link_params.feature_config_flags |=
7595 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7596 else
7597 bp->link_params.feature_config_flags &=
7598 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7599
34f80b04
EG
7600 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7601 bp->common.bc_ver = val;
7602 BNX2X_DEV_INFO("bc_ver %X\n", val);
7603 if (val < BNX2X_BC_VER) {
7604 /* for now only warn
7605 * later we might need to enforce this */
7606 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7607 " please upgrade BC\n", BNX2X_BC_VER, val);
7608 }
72ce58c3
EG
7609
7610 if (BP_E1HVN(bp) == 0) {
7611 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7612 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7613 } else {
7614 /* no WOL capability for E1HVN != 0 */
7615 bp->flags |= NO_WOL_FLAG;
7616 }
7617 BNX2X_DEV_INFO("%sWoL capable\n",
7618 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7619
7620 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7621 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7622 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7623 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7624
7625 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7626 val, val2, val3, val4);
7627}
7628
7629static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7630 u32 switch_cfg)
a2fbb9ea 7631{
34f80b04 7632 int port = BP_PORT(bp);
a2fbb9ea
ET
7633 u32 ext_phy_type;
7634
a2fbb9ea
ET
7635 switch (switch_cfg) {
7636 case SWITCH_CFG_1G:
7637 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7638
c18487ee
YR
7639 ext_phy_type =
7640 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7641 switch (ext_phy_type) {
7642 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7643 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7644 ext_phy_type);
7645
34f80b04
EG
7646 bp->port.supported |= (SUPPORTED_10baseT_Half |
7647 SUPPORTED_10baseT_Full |
7648 SUPPORTED_100baseT_Half |
7649 SUPPORTED_100baseT_Full |
7650 SUPPORTED_1000baseT_Full |
7651 SUPPORTED_2500baseX_Full |
7652 SUPPORTED_TP |
7653 SUPPORTED_FIBRE |
7654 SUPPORTED_Autoneg |
7655 SUPPORTED_Pause |
7656 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7657 break;
7658
7659 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7660 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7661 ext_phy_type);
7662
34f80b04
EG
7663 bp->port.supported |= (SUPPORTED_10baseT_Half |
7664 SUPPORTED_10baseT_Full |
7665 SUPPORTED_100baseT_Half |
7666 SUPPORTED_100baseT_Full |
7667 SUPPORTED_1000baseT_Full |
7668 SUPPORTED_TP |
7669 SUPPORTED_FIBRE |
7670 SUPPORTED_Autoneg |
7671 SUPPORTED_Pause |
7672 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7673 break;
7674
7675 default:
7676 BNX2X_ERR("NVRAM config error. "
7677 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7678 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7679 return;
7680 }
7681
34f80b04
EG
7682 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7683 port*0x10);
7684 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7685 break;
7686
7687 case SWITCH_CFG_10G:
7688 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7689
c18487ee
YR
7690 ext_phy_type =
7691 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7692 switch (ext_phy_type) {
7693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7694 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7695 ext_phy_type);
7696
34f80b04
EG
7697 bp->port.supported |= (SUPPORTED_10baseT_Half |
7698 SUPPORTED_10baseT_Full |
7699 SUPPORTED_100baseT_Half |
7700 SUPPORTED_100baseT_Full |
7701 SUPPORTED_1000baseT_Full |
7702 SUPPORTED_2500baseX_Full |
7703 SUPPORTED_10000baseT_Full |
7704 SUPPORTED_TP |
7705 SUPPORTED_FIBRE |
7706 SUPPORTED_Autoneg |
7707 SUPPORTED_Pause |
7708 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7709 break;
7710
589abe3a
EG
7711 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7712 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7713 ext_phy_type);
f1410647 7714
34f80b04 7715 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7716 SUPPORTED_1000baseT_Full |
34f80b04 7717 SUPPORTED_FIBRE |
589abe3a 7718 SUPPORTED_Autoneg |
34f80b04
EG
7719 SUPPORTED_Pause |
7720 SUPPORTED_Asym_Pause);
f1410647
ET
7721 break;
7722
589abe3a
EG
7723 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7724 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7725 ext_phy_type);
7726
34f80b04 7727 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7728 SUPPORTED_2500baseX_Full |
34f80b04 7729 SUPPORTED_1000baseT_Full |
589abe3a
EG
7730 SUPPORTED_FIBRE |
7731 SUPPORTED_Autoneg |
7732 SUPPORTED_Pause |
7733 SUPPORTED_Asym_Pause);
7734 break;
7735
7736 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7737 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7738 ext_phy_type);
7739
7740 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7741 SUPPORTED_FIBRE |
7742 SUPPORTED_Pause |
7743 SUPPORTED_Asym_Pause);
f1410647
ET
7744 break;
7745
589abe3a
EG
7746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7747 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7748 ext_phy_type);
7749
34f80b04
EG
7750 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7751 SUPPORTED_1000baseT_Full |
7752 SUPPORTED_FIBRE |
34f80b04
EG
7753 SUPPORTED_Pause |
7754 SUPPORTED_Asym_Pause);
f1410647
ET
7755 break;
7756
589abe3a
EG
7757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7758 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7759 ext_phy_type);
7760
34f80b04 7761 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7762 SUPPORTED_1000baseT_Full |
34f80b04 7763 SUPPORTED_Autoneg |
589abe3a 7764 SUPPORTED_FIBRE |
34f80b04
EG
7765 SUPPORTED_Pause |
7766 SUPPORTED_Asym_Pause);
c18487ee
YR
7767 break;
7768
f1410647
ET
7769 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7770 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7771 ext_phy_type);
7772
34f80b04
EG
7773 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7774 SUPPORTED_TP |
7775 SUPPORTED_Autoneg |
7776 SUPPORTED_Pause |
7777 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7778 break;
7779
28577185
EG
7780 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7781 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7782 ext_phy_type);
7783
7784 bp->port.supported |= (SUPPORTED_10baseT_Half |
7785 SUPPORTED_10baseT_Full |
7786 SUPPORTED_100baseT_Half |
7787 SUPPORTED_100baseT_Full |
7788 SUPPORTED_1000baseT_Full |
7789 SUPPORTED_10000baseT_Full |
7790 SUPPORTED_TP |
7791 SUPPORTED_Autoneg |
7792 SUPPORTED_Pause |
7793 SUPPORTED_Asym_Pause);
7794 break;
7795
c18487ee
YR
7796 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7797 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7798 bp->link_params.ext_phy_config);
7799 break;
7800
a2fbb9ea
ET
7801 default:
7802 BNX2X_ERR("NVRAM config error. "
7803 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7804 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7805 return;
7806 }
7807
34f80b04
EG
7808 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7809 port*0x18);
7810 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7811
a2fbb9ea
ET
7812 break;
7813
7814 default:
7815 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7816 bp->port.link_config);
a2fbb9ea
ET
7817 return;
7818 }
34f80b04 7819 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7820
7821 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7822 if (!(bp->link_params.speed_cap_mask &
7823 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7824 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7825
c18487ee
YR
7826 if (!(bp->link_params.speed_cap_mask &
7827 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7828 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7829
c18487ee
YR
7830 if (!(bp->link_params.speed_cap_mask &
7831 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7832 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7833
c18487ee
YR
7834 if (!(bp->link_params.speed_cap_mask &
7835 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7836 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7837
c18487ee
YR
7838 if (!(bp->link_params.speed_cap_mask &
7839 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7840 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7841 SUPPORTED_1000baseT_Full);
a2fbb9ea 7842
c18487ee
YR
7843 if (!(bp->link_params.speed_cap_mask &
7844 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7845 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7846
c18487ee
YR
7847 if (!(bp->link_params.speed_cap_mask &
7848 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7849 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7850
34f80b04 7851 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7852}
7853
34f80b04 7854static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7855{
c18487ee 7856 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7857
34f80b04 7858 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7859 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7860 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7861 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7862 bp->port.advertising = bp->port.supported;
a2fbb9ea 7863 } else {
c18487ee
YR
7864 u32 ext_phy_type =
7865 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7866
7867 if ((ext_phy_type ==
7868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7869 (ext_phy_type ==
7870 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7871 /* force 10G, no AN */
c18487ee 7872 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7873 bp->port.advertising =
a2fbb9ea
ET
7874 (ADVERTISED_10000baseT_Full |
7875 ADVERTISED_FIBRE);
7876 break;
7877 }
7878 BNX2X_ERR("NVRAM config error. "
7879 "Invalid link_config 0x%x"
7880 " Autoneg not supported\n",
34f80b04 7881 bp->port.link_config);
a2fbb9ea
ET
7882 return;
7883 }
7884 break;
7885
7886 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7887 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7888 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7889 bp->port.advertising = (ADVERTISED_10baseT_Full |
7890 ADVERTISED_TP);
a2fbb9ea
ET
7891 } else {
7892 BNX2X_ERR("NVRAM config error. "
7893 "Invalid link_config 0x%x"
7894 " speed_cap_mask 0x%x\n",
34f80b04 7895 bp->port.link_config,
c18487ee 7896 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7897 return;
7898 }
7899 break;
7900
7901 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7902 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7903 bp->link_params.req_line_speed = SPEED_10;
7904 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7905 bp->port.advertising = (ADVERTISED_10baseT_Half |
7906 ADVERTISED_TP);
a2fbb9ea
ET
7907 } else {
7908 BNX2X_ERR("NVRAM config error. "
7909 "Invalid link_config 0x%x"
7910 " speed_cap_mask 0x%x\n",
34f80b04 7911 bp->port.link_config,
c18487ee 7912 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7913 return;
7914 }
7915 break;
7916
7917 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7918 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7919 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7920 bp->port.advertising = (ADVERTISED_100baseT_Full |
7921 ADVERTISED_TP);
a2fbb9ea
ET
7922 } else {
7923 BNX2X_ERR("NVRAM config error. "
7924 "Invalid link_config 0x%x"
7925 " speed_cap_mask 0x%x\n",
34f80b04 7926 bp->port.link_config,
c18487ee 7927 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7928 return;
7929 }
7930 break;
7931
7932 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7933 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7934 bp->link_params.req_line_speed = SPEED_100;
7935 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7936 bp->port.advertising = (ADVERTISED_100baseT_Half |
7937 ADVERTISED_TP);
a2fbb9ea
ET
7938 } else {
7939 BNX2X_ERR("NVRAM config error. "
7940 "Invalid link_config 0x%x"
7941 " speed_cap_mask 0x%x\n",
34f80b04 7942 bp->port.link_config,
c18487ee 7943 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7944 return;
7945 }
7946 break;
7947
7948 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7949 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7950 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7951 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7952 ADVERTISED_TP);
a2fbb9ea
ET
7953 } else {
7954 BNX2X_ERR("NVRAM config error. "
7955 "Invalid link_config 0x%x"
7956 " speed_cap_mask 0x%x\n",
34f80b04 7957 bp->port.link_config,
c18487ee 7958 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7959 return;
7960 }
7961 break;
7962
7963 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7964 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7965 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7966 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7967 ADVERTISED_TP);
a2fbb9ea
ET
7968 } else {
7969 BNX2X_ERR("NVRAM config error. "
7970 "Invalid link_config 0x%x"
7971 " speed_cap_mask 0x%x\n",
34f80b04 7972 bp->port.link_config,
c18487ee 7973 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7974 return;
7975 }
7976 break;
7977
7978 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7979 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7980 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7981 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7982 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7983 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7984 ADVERTISED_FIBRE);
a2fbb9ea
ET
7985 } else {
7986 BNX2X_ERR("NVRAM config error. "
7987 "Invalid link_config 0x%x"
7988 " speed_cap_mask 0x%x\n",
34f80b04 7989 bp->port.link_config,
c18487ee 7990 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7991 return;
7992 }
7993 break;
7994
7995 default:
7996 BNX2X_ERR("NVRAM config error. "
7997 "BAD link speed link_config 0x%x\n",
34f80b04 7998 bp->port.link_config);
c18487ee 7999 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8000 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8001 break;
8002 }
a2fbb9ea 8003
34f80b04
EG
8004 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8005 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8006 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8007 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8008 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8009
c18487ee 8010 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8011 " advertising 0x%x\n",
c18487ee
YR
8012 bp->link_params.req_line_speed,
8013 bp->link_params.req_duplex,
34f80b04 8014 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8015}
8016
34f80b04 8017static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8018{
34f80b04
EG
8019 int port = BP_PORT(bp);
8020 u32 val, val2;
589abe3a 8021 u32 config;
c2c8b03e 8022 u16 i;
a2fbb9ea 8023
c18487ee 8024 bp->link_params.bp = bp;
34f80b04 8025 bp->link_params.port = port;
c18487ee 8026
c18487ee 8027 bp->link_params.lane_config =
a2fbb9ea 8028 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8029 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8030 SHMEM_RD(bp,
8031 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8032 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8033 SHMEM_RD(bp,
8034 dev_info.port_hw_config[port].speed_capability_mask);
8035
34f80b04 8036 bp->port.link_config =
a2fbb9ea
ET
8037 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8038
c2c8b03e
EG
8039 /* Get the 4 lanes xgxs config rx and tx */
8040 for (i = 0; i < 2; i++) {
8041 val = SHMEM_RD(bp,
8042 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8043 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8044 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8045
8046 val = SHMEM_RD(bp,
8047 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8048 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8049 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8050 }
8051
589abe3a
EG
8052 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8053 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8054 bp->link_params.feature_config_flags |=
8055 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8056 else
8057 bp->link_params.feature_config_flags &=
8058 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8059
3ce2c3f9
EG
8060 /* If the device is capable of WoL, set the default state according
8061 * to the HW
8062 */
8063 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8064 (config & PORT_FEATURE_WOL_ENABLED));
8065
c2c8b03e
EG
8066 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8067 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8068 bp->link_params.lane_config,
8069 bp->link_params.ext_phy_config,
34f80b04 8070 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8071
34f80b04 8072 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8073 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8074 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8075
8076 bnx2x_link_settings_requested(bp);
8077
8078 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8079 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8080 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8081 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8082 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8083 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8084 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8085 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8086 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8087 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8088}
8089
8090static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8091{
8092 int func = BP_FUNC(bp);
8093 u32 val, val2;
8094 int rc = 0;
a2fbb9ea 8095
34f80b04 8096 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8097
34f80b04
EG
8098 bp->e1hov = 0;
8099 bp->e1hmf = 0;
8100 if (CHIP_IS_E1H(bp)) {
8101 bp->mf_config =
8102 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8103
3196a88a
EG
8104 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8105 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8106 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8107
34f80b04
EG
8108 bp->e1hov = val;
8109 bp->e1hmf = 1;
8110 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8111 "(0x%04x)\n",
8112 func, bp->e1hov, bp->e1hov);
8113 } else {
8114 BNX2X_DEV_INFO("Single function mode\n");
8115 if (BP_E1HVN(bp)) {
8116 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8117 " aborting\n", func);
8118 rc = -EPERM;
8119 }
8120 }
8121 }
a2fbb9ea 8122
34f80b04
EG
8123 if (!BP_NOMCP(bp)) {
8124 bnx2x_get_port_hwinfo(bp);
8125
8126 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8127 DRV_MSG_SEQ_NUMBER_MASK);
8128 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8129 }
8130
8131 if (IS_E1HMF(bp)) {
8132 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8133 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8134 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8135 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8136 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8137 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8138 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8139 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8140 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8141 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8142 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8143 ETH_ALEN);
8144 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8145 ETH_ALEN);
a2fbb9ea 8146 }
34f80b04
EG
8147
8148 return rc;
a2fbb9ea
ET
8149 }
8150
34f80b04
EG
8151 if (BP_NOMCP(bp)) {
8152 /* only supposed to happen on emulation/FPGA */
33471629 8153 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8154 random_ether_addr(bp->dev->dev_addr);
8155 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8156 }
a2fbb9ea 8157
34f80b04
EG
8158 return rc;
8159}
8160
8161static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8162{
8163 int func = BP_FUNC(bp);
87942b46 8164 int timer_interval;
34f80b04
EG
8165 int rc;
8166
da5a662a
VZ
8167 /* Disable interrupt handling until HW is initialized */
8168 atomic_set(&bp->intr_sem, 1);
8169
34f80b04 8170 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8171
1cf167f2 8172 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8173 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8174
8175 rc = bnx2x_get_hwinfo(bp);
8176
8177 /* need to reset chip if undi was active */
8178 if (!BP_NOMCP(bp))
8179 bnx2x_undi_unload(bp);
8180
8181 if (CHIP_REV_IS_FPGA(bp))
8182 printk(KERN_ERR PFX "FPGA detected\n");
8183
8184 if (BP_NOMCP(bp) && (func == 0))
8185 printk(KERN_ERR PFX
8186 "MCP disabled, must load devices in order!\n");
8187
555f6c78 8188 /* Set multi queue mode */
8badd27a
EG
8189 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8190 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8191 printk(KERN_ERR PFX
8badd27a 8192 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8193 multi_mode = ETH_RSS_MODE_DISABLED;
8194 }
8195 bp->multi_mode = multi_mode;
8196
8197
7a9b2557
VZ
8198 /* Set TPA flags */
8199 if (disable_tpa) {
8200 bp->flags &= ~TPA_ENABLE_FLAG;
8201 bp->dev->features &= ~NETIF_F_LRO;
8202 } else {
8203 bp->flags |= TPA_ENABLE_FLAG;
8204 bp->dev->features |= NETIF_F_LRO;
8205 }
8206
8d5726c4 8207 bp->mrrs = mrrs;
7a9b2557 8208
34f80b04
EG
8209 bp->tx_ring_size = MAX_TX_AVAIL;
8210 bp->rx_ring_size = MAX_RX_AVAIL;
8211
8212 bp->rx_csum = 1;
34f80b04
EG
8213
8214 bp->tx_ticks = 50;
8215 bp->rx_ticks = 25;
8216
87942b46
EG
8217 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8218 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8219
8220 init_timer(&bp->timer);
8221 bp->timer.expires = jiffies + bp->current_interval;
8222 bp->timer.data = (unsigned long) bp;
8223 bp->timer.function = bnx2x_timer;
8224
8225 return rc;
a2fbb9ea
ET
8226}
8227
8228/*
8229 * ethtool service functions
8230 */
8231
8232/* All ethtool functions called with rtnl_lock */
8233
8234static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8235{
8236 struct bnx2x *bp = netdev_priv(dev);
8237
34f80b04
EG
8238 cmd->supported = bp->port.supported;
8239 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8240
8241 if (netif_carrier_ok(dev)) {
c18487ee
YR
8242 cmd->speed = bp->link_vars.line_speed;
8243 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8244 } else {
c18487ee
YR
8245 cmd->speed = bp->link_params.req_line_speed;
8246 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8247 }
34f80b04
EG
8248 if (IS_E1HMF(bp)) {
8249 u16 vn_max_rate;
8250
8251 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8252 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8253 if (vn_max_rate < cmd->speed)
8254 cmd->speed = vn_max_rate;
8255 }
a2fbb9ea 8256
c18487ee
YR
8257 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8258 u32 ext_phy_type =
8259 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8260
8261 switch (ext_phy_type) {
8262 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8263 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8264 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8265 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8266 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8267 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8268 cmd->port = PORT_FIBRE;
8269 break;
8270
8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8273 cmd->port = PORT_TP;
8274 break;
8275
c18487ee
YR
8276 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8277 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8278 bp->link_params.ext_phy_config);
8279 break;
8280
f1410647
ET
8281 default:
8282 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8283 bp->link_params.ext_phy_config);
8284 break;
f1410647
ET
8285 }
8286 } else
a2fbb9ea 8287 cmd->port = PORT_TP;
a2fbb9ea 8288
34f80b04 8289 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8290 cmd->transceiver = XCVR_INTERNAL;
8291
c18487ee 8292 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8293 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8294 else
a2fbb9ea 8295 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8296
8297 cmd->maxtxpkt = 0;
8298 cmd->maxrxpkt = 0;
8299
8300 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8301 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8302 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8303 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8304 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8305 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8306 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8307
8308 return 0;
8309}
8310
8311static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8312{
8313 struct bnx2x *bp = netdev_priv(dev);
8314 u32 advertising;
8315
34f80b04
EG
8316 if (IS_E1HMF(bp))
8317 return 0;
8318
a2fbb9ea
ET
8319 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8320 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8321 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8322 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8323 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8324 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8325 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8326
a2fbb9ea 8327 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8328 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8329 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8330 return -EINVAL;
f1410647 8331 }
a2fbb9ea
ET
8332
8333 /* advertise the requested speed and duplex if supported */
34f80b04 8334 cmd->advertising &= bp->port.supported;
a2fbb9ea 8335
c18487ee
YR
8336 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8337 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8338 bp->port.advertising |= (ADVERTISED_Autoneg |
8339 cmd->advertising);
a2fbb9ea
ET
8340
8341 } else { /* forced speed */
8342 /* advertise the requested speed and duplex if supported */
8343 switch (cmd->speed) {
8344 case SPEED_10:
8345 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8346 if (!(bp->port.supported &
f1410647
ET
8347 SUPPORTED_10baseT_Full)) {
8348 DP(NETIF_MSG_LINK,
8349 "10M full not supported\n");
a2fbb9ea 8350 return -EINVAL;
f1410647 8351 }
a2fbb9ea
ET
8352
8353 advertising = (ADVERTISED_10baseT_Full |
8354 ADVERTISED_TP);
8355 } else {
34f80b04 8356 if (!(bp->port.supported &
f1410647
ET
8357 SUPPORTED_10baseT_Half)) {
8358 DP(NETIF_MSG_LINK,
8359 "10M half not supported\n");
a2fbb9ea 8360 return -EINVAL;
f1410647 8361 }
a2fbb9ea
ET
8362
8363 advertising = (ADVERTISED_10baseT_Half |
8364 ADVERTISED_TP);
8365 }
8366 break;
8367
8368 case SPEED_100:
8369 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8370 if (!(bp->port.supported &
f1410647
ET
8371 SUPPORTED_100baseT_Full)) {
8372 DP(NETIF_MSG_LINK,
8373 "100M full not supported\n");
a2fbb9ea 8374 return -EINVAL;
f1410647 8375 }
a2fbb9ea
ET
8376
8377 advertising = (ADVERTISED_100baseT_Full |
8378 ADVERTISED_TP);
8379 } else {
34f80b04 8380 if (!(bp->port.supported &
f1410647
ET
8381 SUPPORTED_100baseT_Half)) {
8382 DP(NETIF_MSG_LINK,
8383 "100M half not supported\n");
a2fbb9ea 8384 return -EINVAL;
f1410647 8385 }
a2fbb9ea
ET
8386
8387 advertising = (ADVERTISED_100baseT_Half |
8388 ADVERTISED_TP);
8389 }
8390 break;
8391
8392 case SPEED_1000:
f1410647
ET
8393 if (cmd->duplex != DUPLEX_FULL) {
8394 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8395 return -EINVAL;
f1410647 8396 }
a2fbb9ea 8397
34f80b04 8398 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8399 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8400 return -EINVAL;
f1410647 8401 }
a2fbb9ea
ET
8402
8403 advertising = (ADVERTISED_1000baseT_Full |
8404 ADVERTISED_TP);
8405 break;
8406
8407 case SPEED_2500:
f1410647
ET
8408 if (cmd->duplex != DUPLEX_FULL) {
8409 DP(NETIF_MSG_LINK,
8410 "2.5G half not supported\n");
a2fbb9ea 8411 return -EINVAL;
f1410647 8412 }
a2fbb9ea 8413
34f80b04 8414 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8415 DP(NETIF_MSG_LINK,
8416 "2.5G full not supported\n");
a2fbb9ea 8417 return -EINVAL;
f1410647 8418 }
a2fbb9ea 8419
f1410647 8420 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8421 ADVERTISED_TP);
8422 break;
8423
8424 case SPEED_10000:
f1410647
ET
8425 if (cmd->duplex != DUPLEX_FULL) {
8426 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8427 return -EINVAL;
f1410647 8428 }
a2fbb9ea 8429
34f80b04 8430 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8431 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8432 return -EINVAL;
f1410647 8433 }
a2fbb9ea
ET
8434
8435 advertising = (ADVERTISED_10000baseT_Full |
8436 ADVERTISED_FIBRE);
8437 break;
8438
8439 default:
f1410647 8440 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8441 return -EINVAL;
8442 }
8443
c18487ee
YR
8444 bp->link_params.req_line_speed = cmd->speed;
8445 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8446 bp->port.advertising = advertising;
a2fbb9ea
ET
8447 }
8448
c18487ee 8449 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8450 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8451 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8452 bp->port.advertising);
a2fbb9ea 8453
34f80b04 8454 if (netif_running(dev)) {
bb2a0f7a 8455 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8456 bnx2x_link_set(bp);
8457 }
a2fbb9ea
ET
8458
8459 return 0;
8460}
8461
c18487ee
YR
8462#define PHY_FW_VER_LEN 10
8463
a2fbb9ea
ET
8464static void bnx2x_get_drvinfo(struct net_device *dev,
8465 struct ethtool_drvinfo *info)
8466{
8467 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8468 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8469
8470 strcpy(info->driver, DRV_MODULE_NAME);
8471 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8472
8473 phy_fw_ver[0] = '\0';
34f80b04 8474 if (bp->port.pmf) {
4a37fb66 8475 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8476 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8477 (bp->state != BNX2X_STATE_CLOSED),
8478 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8479 bnx2x_release_phy_lock(bp);
34f80b04 8480 }
c18487ee 8481
f0e53a84
EG
8482 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8483 (bp->common.bc_ver & 0xff0000) >> 16,
8484 (bp->common.bc_ver & 0xff00) >> 8,
8485 (bp->common.bc_ver & 0xff),
8486 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8487 strcpy(info->bus_info, pci_name(bp->pdev));
8488 info->n_stats = BNX2X_NUM_STATS;
8489 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8490 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8491 info->regdump_len = 0;
8492}
8493
8494static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8495{
8496 struct bnx2x *bp = netdev_priv(dev);
8497
8498 if (bp->flags & NO_WOL_FLAG) {
8499 wol->supported = 0;
8500 wol->wolopts = 0;
8501 } else {
8502 wol->supported = WAKE_MAGIC;
8503 if (bp->wol)
8504 wol->wolopts = WAKE_MAGIC;
8505 else
8506 wol->wolopts = 0;
8507 }
8508 memset(&wol->sopass, 0, sizeof(wol->sopass));
8509}
8510
8511static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8512{
8513 struct bnx2x *bp = netdev_priv(dev);
8514
8515 if (wol->wolopts & ~WAKE_MAGIC)
8516 return -EINVAL;
8517
8518 if (wol->wolopts & WAKE_MAGIC) {
8519 if (bp->flags & NO_WOL_FLAG)
8520 return -EINVAL;
8521
8522 bp->wol = 1;
34f80b04 8523 } else
a2fbb9ea 8524 bp->wol = 0;
34f80b04 8525
a2fbb9ea
ET
8526 return 0;
8527}
8528
8529static u32 bnx2x_get_msglevel(struct net_device *dev)
8530{
8531 struct bnx2x *bp = netdev_priv(dev);
8532
8533 return bp->msglevel;
8534}
8535
8536static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8537{
8538 struct bnx2x *bp = netdev_priv(dev);
8539
8540 if (capable(CAP_NET_ADMIN))
8541 bp->msglevel = level;
8542}
8543
8544static int bnx2x_nway_reset(struct net_device *dev)
8545{
8546 struct bnx2x *bp = netdev_priv(dev);
8547
34f80b04
EG
8548 if (!bp->port.pmf)
8549 return 0;
a2fbb9ea 8550
34f80b04 8551 if (netif_running(dev)) {
bb2a0f7a 8552 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8553 bnx2x_link_set(bp);
8554 }
a2fbb9ea
ET
8555
8556 return 0;
8557}
8558
8559static int bnx2x_get_eeprom_len(struct net_device *dev)
8560{
8561 struct bnx2x *bp = netdev_priv(dev);
8562
34f80b04 8563 return bp->common.flash_size;
a2fbb9ea
ET
8564}
8565
8566static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8567{
34f80b04 8568 int port = BP_PORT(bp);
a2fbb9ea
ET
8569 int count, i;
8570 u32 val = 0;
8571
8572 /* adjust timeout for emulation/FPGA */
8573 count = NVRAM_TIMEOUT_COUNT;
8574 if (CHIP_REV_IS_SLOW(bp))
8575 count *= 100;
8576
8577 /* request access to nvram interface */
8578 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8579 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8580
8581 for (i = 0; i < count*10; i++) {
8582 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8583 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8584 break;
8585
8586 udelay(5);
8587 }
8588
8589 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8590 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8591 return -EBUSY;
8592 }
8593
8594 return 0;
8595}
8596
8597static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8598{
34f80b04 8599 int port = BP_PORT(bp);
a2fbb9ea
ET
8600 int count, i;
8601 u32 val = 0;
8602
8603 /* adjust timeout for emulation/FPGA */
8604 count = NVRAM_TIMEOUT_COUNT;
8605 if (CHIP_REV_IS_SLOW(bp))
8606 count *= 100;
8607
8608 /* relinquish nvram interface */
8609 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8610 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8611
8612 for (i = 0; i < count*10; i++) {
8613 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8614 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8615 break;
8616
8617 udelay(5);
8618 }
8619
8620 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8621 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8622 return -EBUSY;
8623 }
8624
8625 return 0;
8626}
8627
8628static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8629{
8630 u32 val;
8631
8632 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8633
8634 /* enable both bits, even on read */
8635 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8636 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8637 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8638}
8639
8640static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8641{
8642 u32 val;
8643
8644 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8645
8646 /* disable both bits, even after read */
8647 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8648 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8649 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8650}
8651
4781bfad 8652static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8653 u32 cmd_flags)
8654{
f1410647 8655 int count, i, rc;
a2fbb9ea
ET
8656 u32 val;
8657
8658 /* build the command word */
8659 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8660
8661 /* need to clear DONE bit separately */
8662 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8663
8664 /* address of the NVRAM to read from */
8665 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8666 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8667
8668 /* issue a read command */
8669 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8670
8671 /* adjust timeout for emulation/FPGA */
8672 count = NVRAM_TIMEOUT_COUNT;
8673 if (CHIP_REV_IS_SLOW(bp))
8674 count *= 100;
8675
8676 /* wait for completion */
8677 *ret_val = 0;
8678 rc = -EBUSY;
8679 for (i = 0; i < count; i++) {
8680 udelay(5);
8681 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8682
8683 if (val & MCPR_NVM_COMMAND_DONE) {
8684 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8685 /* we read nvram data in cpu order
8686 * but ethtool sees it as an array of bytes
8687 * converting to big-endian will do the work */
4781bfad 8688 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8689 rc = 0;
8690 break;
8691 }
8692 }
8693
8694 return rc;
8695}
8696
8697static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8698 int buf_size)
8699{
8700 int rc;
8701 u32 cmd_flags;
4781bfad 8702 __be32 val;
a2fbb9ea
ET
8703
8704 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8705 DP(BNX2X_MSG_NVM,
c14423fe 8706 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8707 offset, buf_size);
8708 return -EINVAL;
8709 }
8710
34f80b04
EG
8711 if (offset + buf_size > bp->common.flash_size) {
8712 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8713 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8714 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8715 return -EINVAL;
8716 }
8717
8718 /* request access to nvram interface */
8719 rc = bnx2x_acquire_nvram_lock(bp);
8720 if (rc)
8721 return rc;
8722
8723 /* enable access to nvram interface */
8724 bnx2x_enable_nvram_access(bp);
8725
8726 /* read the first word(s) */
8727 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8728 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8729 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8730 memcpy(ret_buf, &val, 4);
8731
8732 /* advance to the next dword */
8733 offset += sizeof(u32);
8734 ret_buf += sizeof(u32);
8735 buf_size -= sizeof(u32);
8736 cmd_flags = 0;
8737 }
8738
8739 if (rc == 0) {
8740 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8741 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8742 memcpy(ret_buf, &val, 4);
8743 }
8744
8745 /* disable access to nvram interface */
8746 bnx2x_disable_nvram_access(bp);
8747 bnx2x_release_nvram_lock(bp);
8748
8749 return rc;
8750}
8751
8752static int bnx2x_get_eeprom(struct net_device *dev,
8753 struct ethtool_eeprom *eeprom, u8 *eebuf)
8754{
8755 struct bnx2x *bp = netdev_priv(dev);
8756 int rc;
8757
2add3acb
EG
8758 if (!netif_running(dev))
8759 return -EAGAIN;
8760
34f80b04 8761 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8762 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8763 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8764 eeprom->len, eeprom->len);
8765
8766 /* parameters already validated in ethtool_get_eeprom */
8767
8768 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8769
8770 return rc;
8771}
8772
8773static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8774 u32 cmd_flags)
8775{
f1410647 8776 int count, i, rc;
a2fbb9ea
ET
8777
8778 /* build the command word */
8779 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8780
8781 /* need to clear DONE bit separately */
8782 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8783
8784 /* write the data */
8785 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8786
8787 /* address of the NVRAM to write to */
8788 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8789 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8790
8791 /* issue the write command */
8792 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8793
8794 /* adjust timeout for emulation/FPGA */
8795 count = NVRAM_TIMEOUT_COUNT;
8796 if (CHIP_REV_IS_SLOW(bp))
8797 count *= 100;
8798
8799 /* wait for completion */
8800 rc = -EBUSY;
8801 for (i = 0; i < count; i++) {
8802 udelay(5);
8803 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8804 if (val & MCPR_NVM_COMMAND_DONE) {
8805 rc = 0;
8806 break;
8807 }
8808 }
8809
8810 return rc;
8811}
8812
f1410647 8813#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8814
8815static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8816 int buf_size)
8817{
8818 int rc;
8819 u32 cmd_flags;
8820 u32 align_offset;
4781bfad 8821 __be32 val;
a2fbb9ea 8822
34f80b04
EG
8823 if (offset + buf_size > bp->common.flash_size) {
8824 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8825 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8826 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8827 return -EINVAL;
8828 }
8829
8830 /* request access to nvram interface */
8831 rc = bnx2x_acquire_nvram_lock(bp);
8832 if (rc)
8833 return rc;
8834
8835 /* enable access to nvram interface */
8836 bnx2x_enable_nvram_access(bp);
8837
8838 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8839 align_offset = (offset & ~0x03);
8840 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8841
8842 if (rc == 0) {
8843 val &= ~(0xff << BYTE_OFFSET(offset));
8844 val |= (*data_buf << BYTE_OFFSET(offset));
8845
8846 /* nvram data is returned as an array of bytes
8847 * convert it back to cpu order */
8848 val = be32_to_cpu(val);
8849
a2fbb9ea
ET
8850 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8851 cmd_flags);
8852 }
8853
8854 /* disable access to nvram interface */
8855 bnx2x_disable_nvram_access(bp);
8856 bnx2x_release_nvram_lock(bp);
8857
8858 return rc;
8859}
8860
8861static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8862 int buf_size)
8863{
8864 int rc;
8865 u32 cmd_flags;
8866 u32 val;
8867 u32 written_so_far;
8868
34f80b04 8869 if (buf_size == 1) /* ethtool */
a2fbb9ea 8870 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8871
8872 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8873 DP(BNX2X_MSG_NVM,
c14423fe 8874 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8875 offset, buf_size);
8876 return -EINVAL;
8877 }
8878
34f80b04
EG
8879 if (offset + buf_size > bp->common.flash_size) {
8880 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8881 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8882 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8883 return -EINVAL;
8884 }
8885
8886 /* request access to nvram interface */
8887 rc = bnx2x_acquire_nvram_lock(bp);
8888 if (rc)
8889 return rc;
8890
8891 /* enable access to nvram interface */
8892 bnx2x_enable_nvram_access(bp);
8893
8894 written_so_far = 0;
8895 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8896 while ((written_so_far < buf_size) && (rc == 0)) {
8897 if (written_so_far == (buf_size - sizeof(u32)))
8898 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8899 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8900 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8901 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8902 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8903
8904 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8905
8906 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8907
8908 /* advance to the next dword */
8909 offset += sizeof(u32);
8910 data_buf += sizeof(u32);
8911 written_so_far += sizeof(u32);
8912 cmd_flags = 0;
8913 }
8914
8915 /* disable access to nvram interface */
8916 bnx2x_disable_nvram_access(bp);
8917 bnx2x_release_nvram_lock(bp);
8918
8919 return rc;
8920}
8921
8922static int bnx2x_set_eeprom(struct net_device *dev,
8923 struct ethtool_eeprom *eeprom, u8 *eebuf)
8924{
8925 struct bnx2x *bp = netdev_priv(dev);
8926 int rc;
8927
9f4c9583
EG
8928 if (!netif_running(dev))
8929 return -EAGAIN;
8930
34f80b04 8931 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8932 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8933 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8934 eeprom->len, eeprom->len);
8935
8936 /* parameters already validated in ethtool_set_eeprom */
8937
c18487ee 8938 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8939 if (eeprom->magic == 0x00504859)
8940 if (bp->port.pmf) {
8941
4a37fb66 8942 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8943 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8944 bp->link_params.ext_phy_config,
8945 (bp->state != BNX2X_STATE_CLOSED),
8946 eebuf, eeprom->len);
bb2a0f7a
YG
8947 if ((bp->state == BNX2X_STATE_OPEN) ||
8948 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8949 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8950 &bp->link_vars, 1);
34f80b04
EG
8951 rc |= bnx2x_phy_init(&bp->link_params,
8952 &bp->link_vars);
bb2a0f7a 8953 }
4a37fb66 8954 bnx2x_release_phy_lock(bp);
34f80b04
EG
8955
8956 } else /* Only the PMF can access the PHY */
8957 return -EINVAL;
8958 else
c18487ee 8959 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8960
8961 return rc;
8962}
8963
8964static int bnx2x_get_coalesce(struct net_device *dev,
8965 struct ethtool_coalesce *coal)
8966{
8967 struct bnx2x *bp = netdev_priv(dev);
8968
8969 memset(coal, 0, sizeof(struct ethtool_coalesce));
8970
8971 coal->rx_coalesce_usecs = bp->rx_ticks;
8972 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8973
8974 return 0;
8975}
8976
8977static int bnx2x_set_coalesce(struct net_device *dev,
8978 struct ethtool_coalesce *coal)
8979{
8980 struct bnx2x *bp = netdev_priv(dev);
8981
8982 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8983 if (bp->rx_ticks > 3000)
8984 bp->rx_ticks = 3000;
8985
8986 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8987 if (bp->tx_ticks > 0x3000)
8988 bp->tx_ticks = 0x3000;
8989
34f80b04 8990 if (netif_running(dev))
a2fbb9ea
ET
8991 bnx2x_update_coalesce(bp);
8992
8993 return 0;
8994}
8995
8996static void bnx2x_get_ringparam(struct net_device *dev,
8997 struct ethtool_ringparam *ering)
8998{
8999 struct bnx2x *bp = netdev_priv(dev);
9000
9001 ering->rx_max_pending = MAX_RX_AVAIL;
9002 ering->rx_mini_max_pending = 0;
9003 ering->rx_jumbo_max_pending = 0;
9004
9005 ering->rx_pending = bp->rx_ring_size;
9006 ering->rx_mini_pending = 0;
9007 ering->rx_jumbo_pending = 0;
9008
9009 ering->tx_max_pending = MAX_TX_AVAIL;
9010 ering->tx_pending = bp->tx_ring_size;
9011}
9012
9013static int bnx2x_set_ringparam(struct net_device *dev,
9014 struct ethtool_ringparam *ering)
9015{
9016 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9017 int rc = 0;
a2fbb9ea
ET
9018
9019 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9020 (ering->tx_pending > MAX_TX_AVAIL) ||
9021 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9022 return -EINVAL;
9023
9024 bp->rx_ring_size = ering->rx_pending;
9025 bp->tx_ring_size = ering->tx_pending;
9026
34f80b04
EG
9027 if (netif_running(dev)) {
9028 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9029 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9030 }
9031
34f80b04 9032 return rc;
a2fbb9ea
ET
9033}
9034
9035static void bnx2x_get_pauseparam(struct net_device *dev,
9036 struct ethtool_pauseparam *epause)
9037{
9038 struct bnx2x *bp = netdev_priv(dev);
9039
c0700f90 9040 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9041 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9042
c0700f90
DM
9043 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9044 BNX2X_FLOW_CTRL_RX);
9045 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9046 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9047
9048 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9049 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9050 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9051}
9052
9053static int bnx2x_set_pauseparam(struct net_device *dev,
9054 struct ethtool_pauseparam *epause)
9055{
9056 struct bnx2x *bp = netdev_priv(dev);
9057
34f80b04
EG
9058 if (IS_E1HMF(bp))
9059 return 0;
9060
a2fbb9ea
ET
9061 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9062 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9063 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9064
c0700f90 9065 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9066
f1410647 9067 if (epause->rx_pause)
c0700f90 9068 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9069
f1410647 9070 if (epause->tx_pause)
c0700f90 9071 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9072
c0700f90
DM
9073 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9074 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9075
c18487ee 9076 if (epause->autoneg) {
34f80b04 9077 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9078 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9079 return -EINVAL;
9080 }
a2fbb9ea 9081
c18487ee 9082 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9083 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9084 }
a2fbb9ea 9085
c18487ee
YR
9086 DP(NETIF_MSG_LINK,
9087 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9088
9089 if (netif_running(dev)) {
bb2a0f7a 9090 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9091 bnx2x_link_set(bp);
9092 }
a2fbb9ea
ET
9093
9094 return 0;
9095}
9096
df0f2343
VZ
9097static int bnx2x_set_flags(struct net_device *dev, u32 data)
9098{
9099 struct bnx2x *bp = netdev_priv(dev);
9100 int changed = 0;
9101 int rc = 0;
9102
9103 /* TPA requires Rx CSUM offloading */
9104 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9105 if (!(dev->features & NETIF_F_LRO)) {
9106 dev->features |= NETIF_F_LRO;
9107 bp->flags |= TPA_ENABLE_FLAG;
9108 changed = 1;
9109 }
9110
9111 } else if (dev->features & NETIF_F_LRO) {
9112 dev->features &= ~NETIF_F_LRO;
9113 bp->flags &= ~TPA_ENABLE_FLAG;
9114 changed = 1;
9115 }
9116
9117 if (changed && netif_running(dev)) {
9118 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9119 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9120 }
9121
9122 return rc;
9123}
9124
a2fbb9ea
ET
9125static u32 bnx2x_get_rx_csum(struct net_device *dev)
9126{
9127 struct bnx2x *bp = netdev_priv(dev);
9128
9129 return bp->rx_csum;
9130}
9131
9132static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9133{
9134 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9135 int rc = 0;
a2fbb9ea
ET
9136
9137 bp->rx_csum = data;
df0f2343
VZ
9138
9139 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9140 TPA'ed packets will be discarded due to wrong TCP CSUM */
9141 if (!data) {
9142 u32 flags = ethtool_op_get_flags(dev);
9143
9144 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9145 }
9146
9147 return rc;
a2fbb9ea
ET
9148}
9149
9150static int bnx2x_set_tso(struct net_device *dev, u32 data)
9151{
755735eb 9152 if (data) {
a2fbb9ea 9153 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9154 dev->features |= NETIF_F_TSO6;
9155 } else {
a2fbb9ea 9156 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9157 dev->features &= ~NETIF_F_TSO6;
9158 }
9159
a2fbb9ea
ET
9160 return 0;
9161}
9162
f3c87cdd 9163static const struct {
a2fbb9ea
ET
9164 char string[ETH_GSTRING_LEN];
9165} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9166 { "register_test (offline)" },
9167 { "memory_test (offline)" },
9168 { "loopback_test (offline)" },
9169 { "nvram_test (online)" },
9170 { "interrupt_test (online)" },
9171 { "link_test (online)" },
d3d4f495 9172 { "idle check (online)" }
a2fbb9ea
ET
9173};
9174
9175static int bnx2x_self_test_count(struct net_device *dev)
9176{
9177 return BNX2X_NUM_TESTS;
9178}
9179
f3c87cdd
YG
9180static int bnx2x_test_registers(struct bnx2x *bp)
9181{
9182 int idx, i, rc = -ENODEV;
9183 u32 wr_val = 0;
9dabc424 9184 int port = BP_PORT(bp);
f3c87cdd
YG
9185 static const struct {
9186 u32 offset0;
9187 u32 offset1;
9188 u32 mask;
9189 } reg_tbl[] = {
9190/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9191 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9192 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9193 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9194 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9195 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9196 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9197 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9198 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9199 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9200/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9201 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9202 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9203 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9204 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9205 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9206 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9207 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9208 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9209 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9210/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9211 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9212 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9213 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9214 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9215 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9216 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9217 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9218 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9219 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9220/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9221 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9222 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9223 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9224 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9225 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9226 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9227 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9228
9229 { 0xffffffff, 0, 0x00000000 }
9230 };
9231
9232 if (!netif_running(bp->dev))
9233 return rc;
9234
9235 /* Repeat the test twice:
9236 First by writing 0x00000000, second by writing 0xffffffff */
9237 for (idx = 0; idx < 2; idx++) {
9238
9239 switch (idx) {
9240 case 0:
9241 wr_val = 0;
9242 break;
9243 case 1:
9244 wr_val = 0xffffffff;
9245 break;
9246 }
9247
9248 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9249 u32 offset, mask, save_val, val;
f3c87cdd
YG
9250
9251 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9252 mask = reg_tbl[i].mask;
9253
9254 save_val = REG_RD(bp, offset);
9255
9256 REG_WR(bp, offset, wr_val);
9257 val = REG_RD(bp, offset);
9258
9259 /* Restore the original register's value */
9260 REG_WR(bp, offset, save_val);
9261
9262 /* verify that value is as expected value */
9263 if ((val & mask) != (wr_val & mask))
9264 goto test_reg_exit;
9265 }
9266 }
9267
9268 rc = 0;
9269
9270test_reg_exit:
9271 return rc;
9272}
9273
9274static int bnx2x_test_memory(struct bnx2x *bp)
9275{
9276 int i, j, rc = -ENODEV;
9277 u32 val;
9278 static const struct {
9279 u32 offset;
9280 int size;
9281 } mem_tbl[] = {
9282 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9283 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9284 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9285 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9286 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9287 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9288 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9289
9290 { 0xffffffff, 0 }
9291 };
9292 static const struct {
9293 char *name;
9294 u32 offset;
9dabc424
YG
9295 u32 e1_mask;
9296 u32 e1h_mask;
f3c87cdd 9297 } prty_tbl[] = {
9dabc424
YG
9298 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9299 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9300 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9301 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9302 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9303 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9304
9305 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9306 };
9307
9308 if (!netif_running(bp->dev))
9309 return rc;
9310
9311 /* Go through all the memories */
9312 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9313 for (j = 0; j < mem_tbl[i].size; j++)
9314 REG_RD(bp, mem_tbl[i].offset + j*4);
9315
9316 /* Check the parity status */
9317 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9318 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9319 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9320 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9321 DP(NETIF_MSG_HW,
9322 "%s is 0x%x\n", prty_tbl[i].name, val);
9323 goto test_mem_exit;
9324 }
9325 }
9326
9327 rc = 0;
9328
9329test_mem_exit:
9330 return rc;
9331}
9332
f3c87cdd
YG
9333static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9334{
9335 int cnt = 1000;
9336
9337 if (link_up)
9338 while (bnx2x_link_test(bp) && cnt--)
9339 msleep(10);
9340}
9341
9342static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9343{
9344 unsigned int pkt_size, num_pkts, i;
9345 struct sk_buff *skb;
9346 unsigned char *packet;
9347 struct bnx2x_fastpath *fp = &bp->fp[0];
9348 u16 tx_start_idx, tx_idx;
9349 u16 rx_start_idx, rx_idx;
9350 u16 pkt_prod;
9351 struct sw_tx_bd *tx_buf;
9352 struct eth_tx_bd *tx_bd;
9353 dma_addr_t mapping;
9354 union eth_rx_cqe *cqe;
9355 u8 cqe_fp_flags;
9356 struct sw_rx_bd *rx_buf;
9357 u16 len;
9358 int rc = -ENODEV;
9359
b5bf9068
EG
9360 /* check the loopback mode */
9361 switch (loopback_mode) {
9362 case BNX2X_PHY_LOOPBACK:
9363 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9364 return -EINVAL;
9365 break;
9366 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9367 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9368 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9369 break;
9370 default:
f3c87cdd 9371 return -EINVAL;
b5bf9068 9372 }
f3c87cdd 9373
b5bf9068
EG
9374 /* prepare the loopback packet */
9375 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9376 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9377 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9378 if (!skb) {
9379 rc = -ENOMEM;
9380 goto test_loopback_exit;
9381 }
9382 packet = skb_put(skb, pkt_size);
9383 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9384 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9385 for (i = ETH_HLEN; i < pkt_size; i++)
9386 packet[i] = (unsigned char) (i & 0xff);
9387
b5bf9068 9388 /* send the loopback packet */
f3c87cdd
YG
9389 num_pkts = 0;
9390 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9391 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9392
9393 pkt_prod = fp->tx_pkt_prod++;
9394 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9395 tx_buf->first_bd = fp->tx_bd_prod;
9396 tx_buf->skb = skb;
9397
9398 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9399 mapping = pci_map_single(bp->pdev, skb->data,
9400 skb_headlen(skb), PCI_DMA_TODEVICE);
9401 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9402 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9403 tx_bd->nbd = cpu_to_le16(1);
9404 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9405 tx_bd->vlan = cpu_to_le16(pkt_prod);
9406 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9407 ETH_TX_BD_FLAGS_END_BD);
9408 tx_bd->general_data = ((UNICAST_ADDRESS <<
9409 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9410
58f4c4cf
EG
9411 wmb();
9412
4781bfad 9413 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9414 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9415 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9416 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9417
9418 mmiowb();
9419
9420 num_pkts++;
9421 fp->tx_bd_prod++;
9422 bp->dev->trans_start = jiffies;
9423
9424 udelay(100);
9425
9426 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9427 if (tx_idx != tx_start_idx + num_pkts)
9428 goto test_loopback_exit;
9429
9430 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9431 if (rx_idx != rx_start_idx + num_pkts)
9432 goto test_loopback_exit;
9433
9434 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9435 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9436 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9437 goto test_loopback_rx_exit;
9438
9439 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9440 if (len != pkt_size)
9441 goto test_loopback_rx_exit;
9442
9443 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9444 skb = rx_buf->skb;
9445 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9446 for (i = ETH_HLEN; i < pkt_size; i++)
9447 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9448 goto test_loopback_rx_exit;
9449
9450 rc = 0;
9451
9452test_loopback_rx_exit:
f3c87cdd
YG
9453
9454 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9455 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9456 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9457 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9458
9459 /* Update producers */
9460 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9461 fp->rx_sge_prod);
f3c87cdd
YG
9462
9463test_loopback_exit:
9464 bp->link_params.loopback_mode = LOOPBACK_NONE;
9465
9466 return rc;
9467}
9468
9469static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9470{
b5bf9068 9471 int rc = 0, res;
f3c87cdd
YG
9472
9473 if (!netif_running(bp->dev))
9474 return BNX2X_LOOPBACK_FAILED;
9475
f8ef6e44 9476 bnx2x_netif_stop(bp, 1);
3910c8ae 9477 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9478
b5bf9068
EG
9479 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9480 if (res) {
9481 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9482 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9483 }
9484
b5bf9068
EG
9485 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9486 if (res) {
9487 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9488 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9489 }
9490
3910c8ae 9491 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9492 bnx2x_netif_start(bp);
9493
9494 return rc;
9495}
9496
9497#define CRC32_RESIDUAL 0xdebb20e3
9498
9499static int bnx2x_test_nvram(struct bnx2x *bp)
9500{
9501 static const struct {
9502 int offset;
9503 int size;
9504 } nvram_tbl[] = {
9505 { 0, 0x14 }, /* bootstrap */
9506 { 0x14, 0xec }, /* dir */
9507 { 0x100, 0x350 }, /* manuf_info */
9508 { 0x450, 0xf0 }, /* feature_info */
9509 { 0x640, 0x64 }, /* upgrade_key_info */
9510 { 0x6a4, 0x64 },
9511 { 0x708, 0x70 }, /* manuf_key_info */
9512 { 0x778, 0x70 },
9513 { 0, 0 }
9514 };
4781bfad 9515 __be32 buf[0x350 / 4];
f3c87cdd
YG
9516 u8 *data = (u8 *)buf;
9517 int i, rc;
9518 u32 magic, csum;
9519
9520 rc = bnx2x_nvram_read(bp, 0, data, 4);
9521 if (rc) {
9522 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9523 goto test_nvram_exit;
9524 }
9525
9526 magic = be32_to_cpu(buf[0]);
9527 if (magic != 0x669955aa) {
9528 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9529 rc = -ENODEV;
9530 goto test_nvram_exit;
9531 }
9532
9533 for (i = 0; nvram_tbl[i].size; i++) {
9534
9535 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9536 nvram_tbl[i].size);
9537 if (rc) {
9538 DP(NETIF_MSG_PROBE,
9539 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9540 goto test_nvram_exit;
9541 }
9542
9543 csum = ether_crc_le(nvram_tbl[i].size, data);
9544 if (csum != CRC32_RESIDUAL) {
9545 DP(NETIF_MSG_PROBE,
9546 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9547 rc = -ENODEV;
9548 goto test_nvram_exit;
9549 }
9550 }
9551
9552test_nvram_exit:
9553 return rc;
9554}
9555
9556static int bnx2x_test_intr(struct bnx2x *bp)
9557{
9558 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9559 int i, rc;
9560
9561 if (!netif_running(bp->dev))
9562 return -ENODEV;
9563
8d9c5f34 9564 config->hdr.length = 0;
af246401
EG
9565 if (CHIP_IS_E1(bp))
9566 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9567 else
9568 config->hdr.offset = BP_FUNC(bp);
0626b899 9569 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9570 config->hdr.reserved1 = 0;
9571
9572 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9573 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9574 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9575 if (rc == 0) {
9576 bp->set_mac_pending++;
9577 for (i = 0; i < 10; i++) {
9578 if (!bp->set_mac_pending)
9579 break;
9580 msleep_interruptible(10);
9581 }
9582 if (i == 10)
9583 rc = -ENODEV;
9584 }
9585
9586 return rc;
9587}
9588
a2fbb9ea
ET
9589static void bnx2x_self_test(struct net_device *dev,
9590 struct ethtool_test *etest, u64 *buf)
9591{
9592 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9593
9594 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9595
f3c87cdd 9596 if (!netif_running(dev))
a2fbb9ea 9597 return;
a2fbb9ea 9598
33471629 9599 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9600 if (IS_E1HMF(bp))
9601 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9602
9603 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9604 u8 link_up;
9605
9606 link_up = bp->link_vars.link_up;
9607 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9608 bnx2x_nic_load(bp, LOAD_DIAG);
9609 /* wait until link state is restored */
9610 bnx2x_wait_for_link(bp, link_up);
9611
9612 if (bnx2x_test_registers(bp) != 0) {
9613 buf[0] = 1;
9614 etest->flags |= ETH_TEST_FL_FAILED;
9615 }
9616 if (bnx2x_test_memory(bp) != 0) {
9617 buf[1] = 1;
9618 etest->flags |= ETH_TEST_FL_FAILED;
9619 }
9620 buf[2] = bnx2x_test_loopback(bp, link_up);
9621 if (buf[2] != 0)
9622 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9623
f3c87cdd
YG
9624 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9625 bnx2x_nic_load(bp, LOAD_NORMAL);
9626 /* wait until link state is restored */
9627 bnx2x_wait_for_link(bp, link_up);
9628 }
9629 if (bnx2x_test_nvram(bp) != 0) {
9630 buf[3] = 1;
a2fbb9ea
ET
9631 etest->flags |= ETH_TEST_FL_FAILED;
9632 }
f3c87cdd
YG
9633 if (bnx2x_test_intr(bp) != 0) {
9634 buf[4] = 1;
9635 etest->flags |= ETH_TEST_FL_FAILED;
9636 }
9637 if (bp->port.pmf)
9638 if (bnx2x_link_test(bp) != 0) {
9639 buf[5] = 1;
9640 etest->flags |= ETH_TEST_FL_FAILED;
9641 }
f3c87cdd
YG
9642
9643#ifdef BNX2X_EXTRA_DEBUG
9644 bnx2x_panic_dump(bp);
9645#endif
a2fbb9ea
ET
9646}
9647
de832a55
EG
9648static const struct {
9649 long offset;
9650 int size;
9651 u8 string[ETH_GSTRING_LEN];
9652} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9653/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9654 { Q_STATS_OFFSET32(error_bytes_received_hi),
9655 8, "[%d]: rx_error_bytes" },
9656 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9657 8, "[%d]: rx_ucast_packets" },
9658 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9659 8, "[%d]: rx_mcast_packets" },
9660 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9661 8, "[%d]: rx_bcast_packets" },
9662 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9663 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9664 4, "[%d]: rx_phy_ip_err_discards"},
9665 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9666 4, "[%d]: rx_skb_alloc_discard" },
9667 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9668
9669/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9670 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9671 8, "[%d]: tx_packets" }
9672};
9673
bb2a0f7a
YG
9674static const struct {
9675 long offset;
9676 int size;
9677 u32 flags;
66e855f3
YG
9678#define STATS_FLAGS_PORT 1
9679#define STATS_FLAGS_FUNC 2
de832a55 9680#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9681 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9682} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9683/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9684 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9685 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9686 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9687 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9688 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9689 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9690 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9691 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9692 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9693 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9694 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9695 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9696 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9697 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9698 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9699 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9700 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9701/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9702 8, STATS_FLAGS_PORT, "rx_fragments" },
9703 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9704 8, STATS_FLAGS_PORT, "rx_jabbers" },
9705 { STATS_OFFSET32(no_buff_discard_hi),
9706 8, STATS_FLAGS_BOTH, "rx_discards" },
9707 { STATS_OFFSET32(mac_filter_discard),
9708 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9709 { STATS_OFFSET32(xxoverflow_discard),
9710 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9711 { STATS_OFFSET32(brb_drop_hi),
9712 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9713 { STATS_OFFSET32(brb_truncate_hi),
9714 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9715 { STATS_OFFSET32(pause_frames_received_hi),
9716 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9717 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9718 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9719 { STATS_OFFSET32(nig_timer_max),
9720 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9721/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9722 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9723 { STATS_OFFSET32(rx_skb_alloc_failed),
9724 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9725 { STATS_OFFSET32(hw_csum_err),
9726 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9727
9728 { STATS_OFFSET32(total_bytes_transmitted_hi),
9729 8, STATS_FLAGS_BOTH, "tx_bytes" },
9730 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9731 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9732 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9733 8, STATS_FLAGS_BOTH, "tx_packets" },
9734 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9735 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9736 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9737 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9738 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9739 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9740 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9741 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9742/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9743 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9744 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9745 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9746 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9747 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9748 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9749 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9750 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9751 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9752 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9753 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9754 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9755 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9756 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9757 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9758 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9759 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9760 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9761 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9762/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9763 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9764 { STATS_OFFSET32(pause_frames_sent_hi),
9765 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9766};
9767
de832a55
EG
9768#define IS_PORT_STAT(i) \
9769 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9770#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9771#define IS_E1HMF_MODE_STAT(bp) \
9772 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9773
a2fbb9ea
ET
9774static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9775{
bb2a0f7a 9776 struct bnx2x *bp = netdev_priv(dev);
de832a55 9777 int i, j, k;
bb2a0f7a 9778
a2fbb9ea
ET
9779 switch (stringset) {
9780 case ETH_SS_STATS:
de832a55
EG
9781 if (is_multi(bp)) {
9782 k = 0;
9783 for_each_queue(bp, i) {
9784 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9785 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9786 bnx2x_q_stats_arr[j].string, i);
9787 k += BNX2X_NUM_Q_STATS;
9788 }
9789 if (IS_E1HMF_MODE_STAT(bp))
9790 break;
9791 for (j = 0; j < BNX2X_NUM_STATS; j++)
9792 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9793 bnx2x_stats_arr[j].string);
9794 } else {
9795 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9796 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9797 continue;
9798 strcpy(buf + j*ETH_GSTRING_LEN,
9799 bnx2x_stats_arr[i].string);
9800 j++;
9801 }
bb2a0f7a 9802 }
a2fbb9ea
ET
9803 break;
9804
9805 case ETH_SS_TEST:
9806 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9807 break;
9808 }
9809}
9810
9811static int bnx2x_get_stats_count(struct net_device *dev)
9812{
bb2a0f7a 9813 struct bnx2x *bp = netdev_priv(dev);
de832a55 9814 int i, num_stats;
bb2a0f7a 9815
de832a55
EG
9816 if (is_multi(bp)) {
9817 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9818 if (!IS_E1HMF_MODE_STAT(bp))
9819 num_stats += BNX2X_NUM_STATS;
9820 } else {
9821 if (IS_E1HMF_MODE_STAT(bp)) {
9822 num_stats = 0;
9823 for (i = 0; i < BNX2X_NUM_STATS; i++)
9824 if (IS_FUNC_STAT(i))
9825 num_stats++;
9826 } else
9827 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9828 }
de832a55 9829
bb2a0f7a 9830 return num_stats;
a2fbb9ea
ET
9831}
9832
9833static void bnx2x_get_ethtool_stats(struct net_device *dev,
9834 struct ethtool_stats *stats, u64 *buf)
9835{
9836 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9837 u32 *hw_stats, *offset;
9838 int i, j, k;
bb2a0f7a 9839
de832a55
EG
9840 if (is_multi(bp)) {
9841 k = 0;
9842 for_each_queue(bp, i) {
9843 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9844 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9845 if (bnx2x_q_stats_arr[j].size == 0) {
9846 /* skip this counter */
9847 buf[k + j] = 0;
9848 continue;
9849 }
9850 offset = (hw_stats +
9851 bnx2x_q_stats_arr[j].offset);
9852 if (bnx2x_q_stats_arr[j].size == 4) {
9853 /* 4-byte counter */
9854 buf[k + j] = (u64) *offset;
9855 continue;
9856 }
9857 /* 8-byte counter */
9858 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9859 }
9860 k += BNX2X_NUM_Q_STATS;
9861 }
9862 if (IS_E1HMF_MODE_STAT(bp))
9863 return;
9864 hw_stats = (u32 *)&bp->eth_stats;
9865 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9866 if (bnx2x_stats_arr[j].size == 0) {
9867 /* skip this counter */
9868 buf[k + j] = 0;
9869 continue;
9870 }
9871 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9872 if (bnx2x_stats_arr[j].size == 4) {
9873 /* 4-byte counter */
9874 buf[k + j] = (u64) *offset;
9875 continue;
9876 }
9877 /* 8-byte counter */
9878 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9879 }
de832a55
EG
9880 } else {
9881 hw_stats = (u32 *)&bp->eth_stats;
9882 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9883 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9884 continue;
9885 if (bnx2x_stats_arr[i].size == 0) {
9886 /* skip this counter */
9887 buf[j] = 0;
9888 j++;
9889 continue;
9890 }
9891 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9892 if (bnx2x_stats_arr[i].size == 4) {
9893 /* 4-byte counter */
9894 buf[j] = (u64) *offset;
9895 j++;
9896 continue;
9897 }
9898 /* 8-byte counter */
9899 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9900 j++;
a2fbb9ea 9901 }
a2fbb9ea
ET
9902 }
9903}
9904
9905static int bnx2x_phys_id(struct net_device *dev, u32 data)
9906{
9907 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9908 int port = BP_PORT(bp);
a2fbb9ea
ET
9909 int i;
9910
34f80b04
EG
9911 if (!netif_running(dev))
9912 return 0;
9913
9914 if (!bp->port.pmf)
9915 return 0;
9916
a2fbb9ea
ET
9917 if (data == 0)
9918 data = 2;
9919
9920 for (i = 0; i < (data * 2); i++) {
c18487ee 9921 if ((i % 2) == 0)
34f80b04 9922 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9923 bp->link_params.hw_led_mode,
9924 bp->link_params.chip_id);
9925 else
34f80b04 9926 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9927 bp->link_params.hw_led_mode,
9928 bp->link_params.chip_id);
9929
a2fbb9ea
ET
9930 msleep_interruptible(500);
9931 if (signal_pending(current))
9932 break;
9933 }
9934
c18487ee 9935 if (bp->link_vars.link_up)
34f80b04 9936 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9937 bp->link_vars.line_speed,
9938 bp->link_params.hw_led_mode,
9939 bp->link_params.chip_id);
a2fbb9ea
ET
9940
9941 return 0;
9942}
9943
9944static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9945 .get_settings = bnx2x_get_settings,
9946 .set_settings = bnx2x_set_settings,
9947 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9948 .get_wol = bnx2x_get_wol,
9949 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9950 .get_msglevel = bnx2x_get_msglevel,
9951 .set_msglevel = bnx2x_set_msglevel,
9952 .nway_reset = bnx2x_nway_reset,
9953 .get_link = ethtool_op_get_link,
9954 .get_eeprom_len = bnx2x_get_eeprom_len,
9955 .get_eeprom = bnx2x_get_eeprom,
9956 .set_eeprom = bnx2x_set_eeprom,
9957 .get_coalesce = bnx2x_get_coalesce,
9958 .set_coalesce = bnx2x_set_coalesce,
9959 .get_ringparam = bnx2x_get_ringparam,
9960 .set_ringparam = bnx2x_set_ringparam,
9961 .get_pauseparam = bnx2x_get_pauseparam,
9962 .set_pauseparam = bnx2x_set_pauseparam,
9963 .get_rx_csum = bnx2x_get_rx_csum,
9964 .set_rx_csum = bnx2x_set_rx_csum,
9965 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9966 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9967 .set_flags = bnx2x_set_flags,
9968 .get_flags = ethtool_op_get_flags,
9969 .get_sg = ethtool_op_get_sg,
9970 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9971 .get_tso = ethtool_op_get_tso,
9972 .set_tso = bnx2x_set_tso,
9973 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9974 .self_test = bnx2x_self_test,
9975 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9976 .phys_id = bnx2x_phys_id,
9977 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9978 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9979};
9980
9981/* end of ethtool_ops */
9982
9983/****************************************************************************
9984* General service functions
9985****************************************************************************/
9986
9987static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9988{
9989 u16 pmcsr;
9990
9991 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9992
9993 switch (state) {
9994 case PCI_D0:
34f80b04 9995 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9996 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9997 PCI_PM_CTRL_PME_STATUS));
9998
9999 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10000 /* delay required during transition out of D3hot */
a2fbb9ea 10001 msleep(20);
34f80b04 10002 break;
a2fbb9ea 10003
34f80b04
EG
10004 case PCI_D3hot:
10005 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10006 pmcsr |= 3;
a2fbb9ea 10007
34f80b04
EG
10008 if (bp->wol)
10009 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10010
34f80b04
EG
10011 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10012 pmcsr);
a2fbb9ea 10013
34f80b04
EG
10014 /* No more memory access after this point until
10015 * device is brought back to D0.
10016 */
10017 break;
10018
10019 default:
10020 return -EINVAL;
10021 }
10022 return 0;
a2fbb9ea
ET
10023}
10024
237907c1
EG
10025static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10026{
10027 u16 rx_cons_sb;
10028
10029 /* Tell compiler that status block fields can change */
10030 barrier();
10031 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10032 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10033 rx_cons_sb++;
10034 return (fp->rx_comp_cons != rx_cons_sb);
10035}
10036
34f80b04
EG
10037/*
10038 * net_device service functions
10039 */
10040
a2fbb9ea
ET
10041static int bnx2x_poll(struct napi_struct *napi, int budget)
10042{
10043 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10044 napi);
10045 struct bnx2x *bp = fp->bp;
10046 int work_done = 0;
10047
10048#ifdef BNX2X_STOP_ON_ERROR
10049 if (unlikely(bp->panic))
34f80b04 10050 goto poll_panic;
a2fbb9ea
ET
10051#endif
10052
10053 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10054 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10055 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10056
10057 bnx2x_update_fpsb_idx(fp);
10058
237907c1 10059 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
10060 bnx2x_tx_int(fp, budget);
10061
237907c1 10062 if (bnx2x_has_rx_work(fp))
a2fbb9ea 10063 work_done = bnx2x_rx_int(fp, budget);
da5a662a 10064 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10065
10066 /* must not complete if we consumed full budget */
da5a662a 10067 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10068
10069#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10070poll_panic:
a2fbb9ea 10071#endif
288379f0 10072 napi_complete(napi);
a2fbb9ea 10073
0626b899 10074 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10075 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10076 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10077 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10078 }
a2fbb9ea
ET
10079 return work_done;
10080}
10081
755735eb
EG
10082
10083/* we split the first BD into headers and data BDs
33471629 10084 * to ease the pain of our fellow microcode engineers
755735eb
EG
10085 * we use one mapping for both BDs
10086 * So far this has only been observed to happen
10087 * in Other Operating Systems(TM)
10088 */
10089static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10090 struct bnx2x_fastpath *fp,
10091 struct eth_tx_bd **tx_bd, u16 hlen,
10092 u16 bd_prod, int nbd)
10093{
10094 struct eth_tx_bd *h_tx_bd = *tx_bd;
10095 struct eth_tx_bd *d_tx_bd;
10096 dma_addr_t mapping;
10097 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10098
10099 /* first fix first BD */
10100 h_tx_bd->nbd = cpu_to_le16(nbd);
10101 h_tx_bd->nbytes = cpu_to_le16(hlen);
10102
10103 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10104 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10105 h_tx_bd->addr_lo, h_tx_bd->nbd);
10106
10107 /* now get a new data BD
10108 * (after the pbd) and fill it */
10109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10110 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10111
10112 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10113 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10114
10115 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10116 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10117 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10118 d_tx_bd->vlan = 0;
10119 /* this marks the BD as one that has no individual mapping
10120 * the FW ignores this flag in a BD not marked start
10121 */
10122 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10123 DP(NETIF_MSG_TX_QUEUED,
10124 "TSO split data size is %d (%x:%x)\n",
10125 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10126
10127 /* update tx_bd for marking the last BD flag */
10128 *tx_bd = d_tx_bd;
10129
10130 return bd_prod;
10131}
10132
10133static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10134{
10135 if (fix > 0)
10136 csum = (u16) ~csum_fold(csum_sub(csum,
10137 csum_partial(t_header - fix, fix, 0)));
10138
10139 else if (fix < 0)
10140 csum = (u16) ~csum_fold(csum_add(csum,
10141 csum_partial(t_header, -fix, 0)));
10142
10143 return swab16(csum);
10144}
10145
10146static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10147{
10148 u32 rc;
10149
10150 if (skb->ip_summed != CHECKSUM_PARTIAL)
10151 rc = XMIT_PLAIN;
10152
10153 else {
4781bfad 10154 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10155 rc = XMIT_CSUM_V6;
10156 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10157 rc |= XMIT_CSUM_TCP;
10158
10159 } else {
10160 rc = XMIT_CSUM_V4;
10161 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10162 rc |= XMIT_CSUM_TCP;
10163 }
10164 }
10165
10166 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10167 rc |= XMIT_GSO_V4;
10168
10169 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10170 rc |= XMIT_GSO_V6;
10171
10172 return rc;
10173}
10174
632da4d6 10175#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
10176/* check if packet requires linearization (packet is too fragmented) */
10177static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10178 u32 xmit_type)
10179{
10180 int to_copy = 0;
10181 int hlen = 0;
10182 int first_bd_sz = 0;
10183
10184 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10185 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10186
10187 if (xmit_type & XMIT_GSO) {
10188 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10189 /* Check if LSO packet needs to be copied:
10190 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10191 int wnd_size = MAX_FETCH_BD - 3;
33471629 10192 /* Number of windows to check */
755735eb
EG
10193 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10194 int wnd_idx = 0;
10195 int frag_idx = 0;
10196 u32 wnd_sum = 0;
10197
10198 /* Headers length */
10199 hlen = (int)(skb_transport_header(skb) - skb->data) +
10200 tcp_hdrlen(skb);
10201
10202 /* Amount of data (w/o headers) on linear part of SKB*/
10203 first_bd_sz = skb_headlen(skb) - hlen;
10204
10205 wnd_sum = first_bd_sz;
10206
10207 /* Calculate the first sum - it's special */
10208 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10209 wnd_sum +=
10210 skb_shinfo(skb)->frags[frag_idx].size;
10211
10212 /* If there was data on linear skb data - check it */
10213 if (first_bd_sz > 0) {
10214 if (unlikely(wnd_sum < lso_mss)) {
10215 to_copy = 1;
10216 goto exit_lbl;
10217 }
10218
10219 wnd_sum -= first_bd_sz;
10220 }
10221
10222 /* Others are easier: run through the frag list and
10223 check all windows */
10224 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10225 wnd_sum +=
10226 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10227
10228 if (unlikely(wnd_sum < lso_mss)) {
10229 to_copy = 1;
10230 break;
10231 }
10232 wnd_sum -=
10233 skb_shinfo(skb)->frags[wnd_idx].size;
10234 }
10235
10236 } else {
10237 /* in non-LSO too fragmented packet should always
10238 be linearized */
10239 to_copy = 1;
10240 }
10241 }
10242
10243exit_lbl:
10244 if (unlikely(to_copy))
10245 DP(NETIF_MSG_TX_QUEUED,
10246 "Linearization IS REQUIRED for %s packet. "
10247 "num_frags %d hlen %d first_bd_sz %d\n",
10248 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10249 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10250
10251 return to_copy;
10252}
632da4d6 10253#endif
755735eb
EG
10254
10255/* called with netif_tx_lock
a2fbb9ea 10256 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10257 * netif_wake_queue()
a2fbb9ea
ET
10258 */
10259static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10260{
10261 struct bnx2x *bp = netdev_priv(dev);
10262 struct bnx2x_fastpath *fp;
555f6c78 10263 struct netdev_queue *txq;
a2fbb9ea
ET
10264 struct sw_tx_bd *tx_buf;
10265 struct eth_tx_bd *tx_bd;
10266 struct eth_tx_parse_bd *pbd = NULL;
10267 u16 pkt_prod, bd_prod;
755735eb 10268 int nbd, fp_index;
a2fbb9ea 10269 dma_addr_t mapping;
755735eb
EG
10270 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10271 int vlan_off = (bp->e1hov ? 4 : 0);
10272 int i;
10273 u8 hlen = 0;
a2fbb9ea
ET
10274
10275#ifdef BNX2X_STOP_ON_ERROR
10276 if (unlikely(bp->panic))
10277 return NETDEV_TX_BUSY;
10278#endif
10279
555f6c78
EG
10280 fp_index = skb_get_queue_mapping(skb);
10281 txq = netdev_get_tx_queue(dev, fp_index);
10282
a2fbb9ea 10283 fp = &bp->fp[fp_index];
755735eb 10284
231fd58a 10285 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10286 fp->eth_q_stats.driver_xoff++,
555f6c78 10287 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10288 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10289 return NETDEV_TX_BUSY;
10290 }
10291
755735eb
EG
10292 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10293 " gso type %x xmit_type %x\n",
10294 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10295 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10296
632da4d6 10297#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10298 /* First, check if we need to linearize the skb
755735eb
EG
10299 (due to FW restrictions) */
10300 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10301 /* Statistics of linearization */
10302 bp->lin_cnt++;
10303 if (skb_linearize(skb) != 0) {
10304 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10305 "silently dropping this SKB\n");
10306 dev_kfree_skb_any(skb);
da5a662a 10307 return NETDEV_TX_OK;
755735eb
EG
10308 }
10309 }
632da4d6 10310#endif
755735eb 10311
a2fbb9ea 10312 /*
755735eb 10313 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10314 then for TSO or xsum we have a parsing info BD,
755735eb 10315 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10316 (don't forget to mark the last one as last,
10317 and to unmap only AFTER you write to the BD ...)
755735eb 10318 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10319 */
10320
10321 pkt_prod = fp->tx_pkt_prod++;
755735eb 10322 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10323
755735eb 10324 /* get a tx_buf and first BD */
a2fbb9ea
ET
10325 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10326 tx_bd = &fp->tx_desc_ring[bd_prod];
10327
10328 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10329 tx_bd->general_data = (UNICAST_ADDRESS <<
10330 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10331 /* header nbd */
10332 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10333
755735eb
EG
10334 /* remember the first BD of the packet */
10335 tx_buf->first_bd = fp->tx_bd_prod;
10336 tx_buf->skb = skb;
a2fbb9ea
ET
10337
10338 DP(NETIF_MSG_TX_QUEUED,
10339 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10340 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10341
0c6671b0
EG
10342#ifdef BCM_VLAN
10343 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10344 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10345 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10346 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10347 vlan_off += 4;
10348 } else
0c6671b0 10349#endif
755735eb 10350 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10351
755735eb 10352 if (xmit_type) {
755735eb 10353 /* turn on parsing and get a BD */
a2fbb9ea
ET
10354 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10355 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10356
10357 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10358 }
10359
10360 if (xmit_type & XMIT_CSUM) {
10361 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10362
10363 /* for now NS flag is not used in Linux */
4781bfad
EG
10364 pbd->global_data =
10365 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10366 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10367
755735eb
EG
10368 pbd->ip_hlen = (skb_transport_header(skb) -
10369 skb_network_header(skb)) / 2;
10370
10371 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10372
755735eb
EG
10373 pbd->total_hlen = cpu_to_le16(hlen);
10374 hlen = hlen*2 - vlan_off;
a2fbb9ea 10375
755735eb
EG
10376 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10377
10378 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10379 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10380 ETH_TX_BD_FLAGS_IP_CSUM;
10381 else
10382 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10383
10384 if (xmit_type & XMIT_CSUM_TCP) {
10385 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10386
10387 } else {
10388 s8 fix = SKB_CS_OFF(skb); /* signed! */
10389
a2fbb9ea 10390 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10391 pbd->cs_offset = fix / 2;
a2fbb9ea 10392
755735eb
EG
10393 DP(NETIF_MSG_TX_QUEUED,
10394 "hlen %d offset %d fix %d csum before fix %x\n",
10395 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10396 SKB_CS(skb));
10397
10398 /* HW bug: fixup the CSUM */
10399 pbd->tcp_pseudo_csum =
10400 bnx2x_csum_fix(skb_transport_header(skb),
10401 SKB_CS(skb), fix);
10402
10403 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10404 pbd->tcp_pseudo_csum);
10405 }
a2fbb9ea
ET
10406 }
10407
10408 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10409 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10410
10411 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10412 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10413 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10414 tx_bd->nbd = cpu_to_le16(nbd);
10415 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10416
10417 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10418 " nbytes %d flags %x vlan %x\n",
10419 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10420 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10421 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10422
755735eb 10423 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10424
10425 DP(NETIF_MSG_TX_QUEUED,
10426 "TSO packet len %d hlen %d total len %d tso size %d\n",
10427 skb->len, hlen, skb_headlen(skb),
10428 skb_shinfo(skb)->gso_size);
10429
10430 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10431
755735eb
EG
10432 if (unlikely(skb_headlen(skb) > hlen))
10433 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10434 bd_prod, ++nbd);
a2fbb9ea
ET
10435
10436 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10437 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10438 pbd->tcp_flags = pbd_tcp_flags(skb);
10439
10440 if (xmit_type & XMIT_GSO_V4) {
10441 pbd->ip_id = swab16(ip_hdr(skb)->id);
10442 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10443 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10444 ip_hdr(skb)->daddr,
10445 0, IPPROTO_TCP, 0));
755735eb
EG
10446
10447 } else
10448 pbd->tcp_pseudo_csum =
10449 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10450 &ipv6_hdr(skb)->daddr,
10451 0, IPPROTO_TCP, 0));
10452
a2fbb9ea
ET
10453 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10454 }
10455
755735eb
EG
10456 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10457 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10458
755735eb
EG
10459 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10460 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10461
755735eb
EG
10462 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10463 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10464
755735eb
EG
10465 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10466 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10467 tx_bd->nbytes = cpu_to_le16(frag->size);
10468 tx_bd->vlan = cpu_to_le16(pkt_prod);
10469 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10470
755735eb
EG
10471 DP(NETIF_MSG_TX_QUEUED,
10472 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10473 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10474 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10475 }
10476
755735eb 10477 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10478 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10479
10480 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10481 tx_bd, tx_bd->bd_flags.as_bitfield);
10482
a2fbb9ea
ET
10483 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10484
755735eb 10485 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10486 * if the packet contains or ends with it
10487 */
10488 if (TX_BD_POFF(bd_prod) < nbd)
10489 nbd++;
10490
10491 if (pbd)
10492 DP(NETIF_MSG_TX_QUEUED,
10493 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10494 " tcp_flags %x xsum %x seq %u hlen %u\n",
10495 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10496 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10497 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10498
755735eb 10499 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10500
58f4c4cf
EG
10501 /*
10502 * Make sure that the BD data is updated before updating the producer
10503 * since FW might read the BD right after the producer is updated.
10504 * This is only applicable for weak-ordered memory model archs such
10505 * as IA-64. The following barrier is also mandatory since FW will
10506 * assumes packets must have BDs.
10507 */
10508 wmb();
10509
4781bfad 10510 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10511 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10512 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10513 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10514
10515 mmiowb();
10516
755735eb 10517 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10518 dev->trans_start = jiffies;
10519
10520 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10521 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10522 if we put Tx into XOFF state. */
10523 smp_mb();
555f6c78 10524 netif_tx_stop_queue(txq);
de832a55 10525 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10526 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10527 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10528 }
10529 fp->tx_pkt++;
10530
10531 return NETDEV_TX_OK;
10532}
10533
bb2a0f7a 10534/* called with rtnl_lock */
a2fbb9ea
ET
10535static int bnx2x_open(struct net_device *dev)
10536{
10537 struct bnx2x *bp = netdev_priv(dev);
10538
6eccabb3
EG
10539 netif_carrier_off(dev);
10540
a2fbb9ea
ET
10541 bnx2x_set_power_state(bp, PCI_D0);
10542
bb2a0f7a 10543 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10544}
10545
bb2a0f7a 10546/* called with rtnl_lock */
a2fbb9ea
ET
10547static int bnx2x_close(struct net_device *dev)
10548{
a2fbb9ea
ET
10549 struct bnx2x *bp = netdev_priv(dev);
10550
10551 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10552 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10553 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10554 if (!CHIP_REV_IS_SLOW(bp))
10555 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10556
10557 return 0;
10558}
10559
34f80b04
EG
10560/* called with netif_tx_lock from set_multicast */
10561static void bnx2x_set_rx_mode(struct net_device *dev)
10562{
10563 struct bnx2x *bp = netdev_priv(dev);
10564 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10565 int port = BP_PORT(bp);
10566
10567 if (bp->state != BNX2X_STATE_OPEN) {
10568 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10569 return;
10570 }
10571
10572 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10573
10574 if (dev->flags & IFF_PROMISC)
10575 rx_mode = BNX2X_RX_MODE_PROMISC;
10576
10577 else if ((dev->flags & IFF_ALLMULTI) ||
10578 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10579 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10580
10581 else { /* some multicasts */
10582 if (CHIP_IS_E1(bp)) {
10583 int i, old, offset;
10584 struct dev_mc_list *mclist;
10585 struct mac_configuration_cmd *config =
10586 bnx2x_sp(bp, mcast_config);
10587
10588 for (i = 0, mclist = dev->mc_list;
10589 mclist && (i < dev->mc_count);
10590 i++, mclist = mclist->next) {
10591
10592 config->config_table[i].
10593 cam_entry.msb_mac_addr =
10594 swab16(*(u16 *)&mclist->dmi_addr[0]);
10595 config->config_table[i].
10596 cam_entry.middle_mac_addr =
10597 swab16(*(u16 *)&mclist->dmi_addr[2]);
10598 config->config_table[i].
10599 cam_entry.lsb_mac_addr =
10600 swab16(*(u16 *)&mclist->dmi_addr[4]);
10601 config->config_table[i].cam_entry.flags =
10602 cpu_to_le16(port);
10603 config->config_table[i].
10604 target_table_entry.flags = 0;
10605 config->config_table[i].
10606 target_table_entry.client_id = 0;
10607 config->config_table[i].
10608 target_table_entry.vlan_id = 0;
10609
10610 DP(NETIF_MSG_IFUP,
10611 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10612 config->config_table[i].
10613 cam_entry.msb_mac_addr,
10614 config->config_table[i].
10615 cam_entry.middle_mac_addr,
10616 config->config_table[i].
10617 cam_entry.lsb_mac_addr);
10618 }
8d9c5f34 10619 old = config->hdr.length;
34f80b04
EG
10620 if (old > i) {
10621 for (; i < old; i++) {
10622 if (CAM_IS_INVALID(config->
10623 config_table[i])) {
af246401 10624 /* already invalidated */
34f80b04
EG
10625 break;
10626 }
10627 /* invalidate */
10628 CAM_INVALIDATE(config->
10629 config_table[i]);
10630 }
10631 }
10632
10633 if (CHIP_REV_IS_SLOW(bp))
10634 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10635 else
10636 offset = BNX2X_MAX_MULTICAST*(1 + port);
10637
8d9c5f34 10638 config->hdr.length = i;
34f80b04 10639 config->hdr.offset = offset;
8d9c5f34 10640 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10641 config->hdr.reserved1 = 0;
10642
10643 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10644 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10645 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10646 0);
10647 } else { /* E1H */
10648 /* Accept one or more multicasts */
10649 struct dev_mc_list *mclist;
10650 u32 mc_filter[MC_HASH_SIZE];
10651 u32 crc, bit, regidx;
10652 int i;
10653
10654 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10655
10656 for (i = 0, mclist = dev->mc_list;
10657 mclist && (i < dev->mc_count);
10658 i++, mclist = mclist->next) {
10659
7c510e4b
JB
10660 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10661 mclist->dmi_addr);
34f80b04
EG
10662
10663 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10664 bit = (crc >> 24) & 0xff;
10665 regidx = bit >> 5;
10666 bit &= 0x1f;
10667 mc_filter[regidx] |= (1 << bit);
10668 }
10669
10670 for (i = 0; i < MC_HASH_SIZE; i++)
10671 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10672 mc_filter[i]);
10673 }
10674 }
10675
10676 bp->rx_mode = rx_mode;
10677 bnx2x_set_storm_rx_mode(bp);
10678}
10679
10680/* called with rtnl_lock */
a2fbb9ea
ET
10681static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10682{
10683 struct sockaddr *addr = p;
10684 struct bnx2x *bp = netdev_priv(dev);
10685
34f80b04 10686 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10687 return -EINVAL;
10688
10689 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10690 if (netif_running(dev)) {
10691 if (CHIP_IS_E1(bp))
3101c2bc 10692 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10693 else
3101c2bc 10694 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10695 }
a2fbb9ea
ET
10696
10697 return 0;
10698}
10699
c18487ee 10700/* called with rtnl_lock */
a2fbb9ea
ET
10701static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10702{
10703 struct mii_ioctl_data *data = if_mii(ifr);
10704 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10705 int port = BP_PORT(bp);
a2fbb9ea
ET
10706 int err;
10707
10708 switch (cmd) {
10709 case SIOCGMIIPHY:
34f80b04 10710 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10711
c14423fe 10712 /* fallthrough */
c18487ee 10713
a2fbb9ea 10714 case SIOCGMIIREG: {
c18487ee 10715 u16 mii_regval;
a2fbb9ea 10716
c18487ee
YR
10717 if (!netif_running(dev))
10718 return -EAGAIN;
a2fbb9ea 10719
34f80b04 10720 mutex_lock(&bp->port.phy_mutex);
3196a88a 10721 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10722 DEFAULT_PHY_DEV_ADDR,
10723 (data->reg_num & 0x1f), &mii_regval);
10724 data->val_out = mii_regval;
34f80b04 10725 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10726 return err;
10727 }
10728
10729 case SIOCSMIIREG:
10730 if (!capable(CAP_NET_ADMIN))
10731 return -EPERM;
10732
c18487ee
YR
10733 if (!netif_running(dev))
10734 return -EAGAIN;
10735
34f80b04 10736 mutex_lock(&bp->port.phy_mutex);
3196a88a 10737 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10738 DEFAULT_PHY_DEV_ADDR,
10739 (data->reg_num & 0x1f), data->val_in);
34f80b04 10740 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10741 return err;
10742
10743 default:
10744 /* do nothing */
10745 break;
10746 }
10747
10748 return -EOPNOTSUPP;
10749}
10750
34f80b04 10751/* called with rtnl_lock */
a2fbb9ea
ET
10752static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10753{
10754 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10755 int rc = 0;
a2fbb9ea
ET
10756
10757 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10758 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10759 return -EINVAL;
10760
10761 /* This does not race with packet allocation
c14423fe 10762 * because the actual alloc size is
a2fbb9ea
ET
10763 * only updated as part of load
10764 */
10765 dev->mtu = new_mtu;
10766
10767 if (netif_running(dev)) {
34f80b04
EG
10768 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10769 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10770 }
34f80b04
EG
10771
10772 return rc;
a2fbb9ea
ET
10773}
10774
10775static void bnx2x_tx_timeout(struct net_device *dev)
10776{
10777 struct bnx2x *bp = netdev_priv(dev);
10778
10779#ifdef BNX2X_STOP_ON_ERROR
10780 if (!bp->panic)
10781 bnx2x_panic();
10782#endif
10783 /* This allows the netif to be shutdown gracefully before resetting */
10784 schedule_work(&bp->reset_task);
10785}
10786
10787#ifdef BCM_VLAN
34f80b04 10788/* called with rtnl_lock */
a2fbb9ea
ET
10789static void bnx2x_vlan_rx_register(struct net_device *dev,
10790 struct vlan_group *vlgrp)
10791{
10792 struct bnx2x *bp = netdev_priv(dev);
10793
10794 bp->vlgrp = vlgrp;
0c6671b0
EG
10795
10796 /* Set flags according to the required capabilities */
10797 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10798
10799 if (dev->features & NETIF_F_HW_VLAN_TX)
10800 bp->flags |= HW_VLAN_TX_FLAG;
10801
10802 if (dev->features & NETIF_F_HW_VLAN_RX)
10803 bp->flags |= HW_VLAN_RX_FLAG;
10804
a2fbb9ea 10805 if (netif_running(dev))
49d66772 10806 bnx2x_set_client_config(bp);
a2fbb9ea 10807}
34f80b04 10808
a2fbb9ea
ET
10809#endif
10810
10811#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10812static void poll_bnx2x(struct net_device *dev)
10813{
10814 struct bnx2x *bp = netdev_priv(dev);
10815
10816 disable_irq(bp->pdev->irq);
10817 bnx2x_interrupt(bp->pdev->irq, dev);
10818 enable_irq(bp->pdev->irq);
10819}
10820#endif
10821
c64213cd
SH
10822static const struct net_device_ops bnx2x_netdev_ops = {
10823 .ndo_open = bnx2x_open,
10824 .ndo_stop = bnx2x_close,
10825 .ndo_start_xmit = bnx2x_start_xmit,
10826 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10827 .ndo_set_mac_address = bnx2x_change_mac_addr,
10828 .ndo_validate_addr = eth_validate_addr,
10829 .ndo_do_ioctl = bnx2x_ioctl,
10830 .ndo_change_mtu = bnx2x_change_mtu,
10831 .ndo_tx_timeout = bnx2x_tx_timeout,
10832#ifdef BCM_VLAN
10833 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10834#endif
10835#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10836 .ndo_poll_controller = poll_bnx2x,
10837#endif
10838};
10839
10840
34f80b04
EG
10841static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10842 struct net_device *dev)
a2fbb9ea
ET
10843{
10844 struct bnx2x *bp;
10845 int rc;
10846
10847 SET_NETDEV_DEV(dev, &pdev->dev);
10848 bp = netdev_priv(dev);
10849
34f80b04
EG
10850 bp->dev = dev;
10851 bp->pdev = pdev;
a2fbb9ea 10852 bp->flags = 0;
34f80b04 10853 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10854
10855 rc = pci_enable_device(pdev);
10856 if (rc) {
10857 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10858 goto err_out;
10859 }
10860
10861 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10862 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10863 " aborting\n");
10864 rc = -ENODEV;
10865 goto err_out_disable;
10866 }
10867
10868 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10869 printk(KERN_ERR PFX "Cannot find second PCI device"
10870 " base address, aborting\n");
10871 rc = -ENODEV;
10872 goto err_out_disable;
10873 }
10874
34f80b04
EG
10875 if (atomic_read(&pdev->enable_cnt) == 1) {
10876 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10877 if (rc) {
10878 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10879 " aborting\n");
10880 goto err_out_disable;
10881 }
a2fbb9ea 10882
34f80b04
EG
10883 pci_set_master(pdev);
10884 pci_save_state(pdev);
10885 }
a2fbb9ea
ET
10886
10887 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10888 if (bp->pm_cap == 0) {
10889 printk(KERN_ERR PFX "Cannot find power management"
10890 " capability, aborting\n");
10891 rc = -EIO;
10892 goto err_out_release;
10893 }
10894
10895 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10896 if (bp->pcie_cap == 0) {
10897 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10898 " aborting\n");
10899 rc = -EIO;
10900 goto err_out_release;
10901 }
10902
10903 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10904 bp->flags |= USING_DAC_FLAG;
10905 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10906 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10907 " failed, aborting\n");
10908 rc = -EIO;
10909 goto err_out_release;
10910 }
10911
10912 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10913 printk(KERN_ERR PFX "System does not support DMA,"
10914 " aborting\n");
10915 rc = -EIO;
10916 goto err_out_release;
10917 }
10918
34f80b04
EG
10919 dev->mem_start = pci_resource_start(pdev, 0);
10920 dev->base_addr = dev->mem_start;
10921 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10922
10923 dev->irq = pdev->irq;
10924
275f165f 10925 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10926 if (!bp->regview) {
10927 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10928 rc = -ENOMEM;
10929 goto err_out_release;
10930 }
10931
34f80b04
EG
10932 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10933 min_t(u64, BNX2X_DB_SIZE,
10934 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10935 if (!bp->doorbells) {
10936 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10937 rc = -ENOMEM;
10938 goto err_out_unmap;
10939 }
10940
10941 bnx2x_set_power_state(bp, PCI_D0);
10942
34f80b04
EG
10943 /* clean indirect addresses */
10944 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10945 PCICFG_VENDOR_ID_OFFSET);
10946 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10947 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10948 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10949 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10950
34f80b04 10951 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10952
c64213cd 10953 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10954 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10955 dev->features |= NETIF_F_SG;
10956 dev->features |= NETIF_F_HW_CSUM;
10957 if (bp->flags & USING_DAC_FLAG)
10958 dev->features |= NETIF_F_HIGHDMA;
10959#ifdef BCM_VLAN
10960 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10961 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10962#endif
10963 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10964 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10965
10966 return 0;
10967
10968err_out_unmap:
10969 if (bp->regview) {
10970 iounmap(bp->regview);
10971 bp->regview = NULL;
10972 }
a2fbb9ea
ET
10973 if (bp->doorbells) {
10974 iounmap(bp->doorbells);
10975 bp->doorbells = NULL;
10976 }
10977
10978err_out_release:
34f80b04
EG
10979 if (atomic_read(&pdev->enable_cnt) == 1)
10980 pci_release_regions(pdev);
a2fbb9ea
ET
10981
10982err_out_disable:
10983 pci_disable_device(pdev);
10984 pci_set_drvdata(pdev, NULL);
10985
10986err_out:
10987 return rc;
10988}
10989
25047950
ET
10990static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10991{
10992 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10993
10994 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10995 return val;
10996}
10997
10998/* return value of 1=2.5GHz 2=5GHz */
10999static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11000{
11001 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11002
11003 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11004 return val;
11005}
11006
a2fbb9ea
ET
11007static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11008 const struct pci_device_id *ent)
11009{
11010 static int version_printed;
11011 struct net_device *dev = NULL;
11012 struct bnx2x *bp;
25047950 11013 int rc;
a2fbb9ea
ET
11014
11015 if (version_printed++ == 0)
11016 printk(KERN_INFO "%s", version);
11017
11018 /* dev zeroed in init_etherdev */
555f6c78 11019 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11020 if (!dev) {
11021 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11022 return -ENOMEM;
34f80b04 11023 }
a2fbb9ea 11024
a2fbb9ea
ET
11025 bp = netdev_priv(dev);
11026 bp->msglevel = debug;
11027
34f80b04 11028 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11029 if (rc < 0) {
11030 free_netdev(dev);
11031 return rc;
11032 }
11033
a2fbb9ea
ET
11034 pci_set_drvdata(pdev, dev);
11035
34f80b04 11036 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11037 if (rc)
11038 goto init_one_exit;
11039
11040 rc = register_netdev(dev);
34f80b04 11041 if (rc) {
693fc0d1 11042 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11043 goto init_one_exit;
11044 }
11045
25047950 11046 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11047 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11048 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11049 bnx2x_get_pcie_width(bp),
11050 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11051 dev->base_addr, bp->pdev->irq);
e174961c 11052 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 11053 return 0;
34f80b04
EG
11054
11055init_one_exit:
11056 if (bp->regview)
11057 iounmap(bp->regview);
11058
11059 if (bp->doorbells)
11060 iounmap(bp->doorbells);
11061
11062 free_netdev(dev);
11063
11064 if (atomic_read(&pdev->enable_cnt) == 1)
11065 pci_release_regions(pdev);
11066
11067 pci_disable_device(pdev);
11068 pci_set_drvdata(pdev, NULL);
11069
11070 return rc;
a2fbb9ea
ET
11071}
11072
11073static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11074{
11075 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11076 struct bnx2x *bp;
11077
11078 if (!dev) {
228241eb
ET
11079 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11080 return;
11081 }
228241eb 11082 bp = netdev_priv(dev);
a2fbb9ea 11083
a2fbb9ea
ET
11084 unregister_netdev(dev);
11085
11086 if (bp->regview)
11087 iounmap(bp->regview);
11088
11089 if (bp->doorbells)
11090 iounmap(bp->doorbells);
11091
11092 free_netdev(dev);
34f80b04
EG
11093
11094 if (atomic_read(&pdev->enable_cnt) == 1)
11095 pci_release_regions(pdev);
11096
a2fbb9ea
ET
11097 pci_disable_device(pdev);
11098 pci_set_drvdata(pdev, NULL);
11099}
11100
11101static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11102{
11103 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11104 struct bnx2x *bp;
11105
34f80b04
EG
11106 if (!dev) {
11107 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11108 return -ENODEV;
11109 }
11110 bp = netdev_priv(dev);
a2fbb9ea 11111
34f80b04 11112 rtnl_lock();
a2fbb9ea 11113
34f80b04 11114 pci_save_state(pdev);
228241eb 11115
34f80b04
EG
11116 if (!netif_running(dev)) {
11117 rtnl_unlock();
11118 return 0;
11119 }
a2fbb9ea
ET
11120
11121 netif_device_detach(dev);
a2fbb9ea 11122
da5a662a 11123 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11124
a2fbb9ea 11125 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11126
34f80b04
EG
11127 rtnl_unlock();
11128
a2fbb9ea
ET
11129 return 0;
11130}
11131
11132static int bnx2x_resume(struct pci_dev *pdev)
11133{
11134 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11135 struct bnx2x *bp;
a2fbb9ea
ET
11136 int rc;
11137
228241eb
ET
11138 if (!dev) {
11139 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11140 return -ENODEV;
11141 }
228241eb 11142 bp = netdev_priv(dev);
a2fbb9ea 11143
34f80b04
EG
11144 rtnl_lock();
11145
228241eb 11146 pci_restore_state(pdev);
34f80b04
EG
11147
11148 if (!netif_running(dev)) {
11149 rtnl_unlock();
11150 return 0;
11151 }
11152
a2fbb9ea
ET
11153 bnx2x_set_power_state(bp, PCI_D0);
11154 netif_device_attach(dev);
11155
da5a662a 11156 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11157
34f80b04
EG
11158 rtnl_unlock();
11159
11160 return rc;
a2fbb9ea
ET
11161}
11162
f8ef6e44
YG
11163static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11164{
11165 int i;
11166
11167 bp->state = BNX2X_STATE_ERROR;
11168
11169 bp->rx_mode = BNX2X_RX_MODE_NONE;
11170
11171 bnx2x_netif_stop(bp, 0);
11172
11173 del_timer_sync(&bp->timer);
11174 bp->stats_state = STATS_STATE_DISABLED;
11175 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11176
11177 /* Release IRQs */
11178 bnx2x_free_irq(bp);
11179
11180 if (CHIP_IS_E1(bp)) {
11181 struct mac_configuration_cmd *config =
11182 bnx2x_sp(bp, mcast_config);
11183
8d9c5f34 11184 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11185 CAM_INVALIDATE(config->config_table[i]);
11186 }
11187
11188 /* Free SKBs, SGEs, TPA pool and driver internals */
11189 bnx2x_free_skbs(bp);
555f6c78 11190 for_each_rx_queue(bp, i)
f8ef6e44 11191 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11192 for_each_rx_queue(bp, i)
7cde1c8b 11193 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11194 bnx2x_free_mem(bp);
11195
11196 bp->state = BNX2X_STATE_CLOSED;
11197
11198 netif_carrier_off(bp->dev);
11199
11200 return 0;
11201}
11202
11203static void bnx2x_eeh_recover(struct bnx2x *bp)
11204{
11205 u32 val;
11206
11207 mutex_init(&bp->port.phy_mutex);
11208
11209 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11210 bp->link_params.shmem_base = bp->common.shmem_base;
11211 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11212
11213 if (!bp->common.shmem_base ||
11214 (bp->common.shmem_base < 0xA0000) ||
11215 (bp->common.shmem_base >= 0xC0000)) {
11216 BNX2X_DEV_INFO("MCP not active\n");
11217 bp->flags |= NO_MCP_FLAG;
11218 return;
11219 }
11220
11221 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11222 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11223 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11224 BNX2X_ERR("BAD MCP validity signature\n");
11225
11226 if (!BP_NOMCP(bp)) {
11227 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11228 & DRV_MSG_SEQ_NUMBER_MASK);
11229 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11230 }
11231}
11232
493adb1f
WX
11233/**
11234 * bnx2x_io_error_detected - called when PCI error is detected
11235 * @pdev: Pointer to PCI device
11236 * @state: The current pci connection state
11237 *
11238 * This function is called after a PCI bus error affecting
11239 * this device has been detected.
11240 */
11241static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11242 pci_channel_state_t state)
11243{
11244 struct net_device *dev = pci_get_drvdata(pdev);
11245 struct bnx2x *bp = netdev_priv(dev);
11246
11247 rtnl_lock();
11248
11249 netif_device_detach(dev);
11250
11251 if (netif_running(dev))
f8ef6e44 11252 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11253
11254 pci_disable_device(pdev);
11255
11256 rtnl_unlock();
11257
11258 /* Request a slot reset */
11259 return PCI_ERS_RESULT_NEED_RESET;
11260}
11261
11262/**
11263 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11264 * @pdev: Pointer to PCI device
11265 *
11266 * Restart the card from scratch, as if from a cold-boot.
11267 */
11268static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11269{
11270 struct net_device *dev = pci_get_drvdata(pdev);
11271 struct bnx2x *bp = netdev_priv(dev);
11272
11273 rtnl_lock();
11274
11275 if (pci_enable_device(pdev)) {
11276 dev_err(&pdev->dev,
11277 "Cannot re-enable PCI device after reset\n");
11278 rtnl_unlock();
11279 return PCI_ERS_RESULT_DISCONNECT;
11280 }
11281
11282 pci_set_master(pdev);
11283 pci_restore_state(pdev);
11284
11285 if (netif_running(dev))
11286 bnx2x_set_power_state(bp, PCI_D0);
11287
11288 rtnl_unlock();
11289
11290 return PCI_ERS_RESULT_RECOVERED;
11291}
11292
11293/**
11294 * bnx2x_io_resume - called when traffic can start flowing again
11295 * @pdev: Pointer to PCI device
11296 *
11297 * This callback is called when the error recovery driver tells us that
11298 * its OK to resume normal operation.
11299 */
11300static void bnx2x_io_resume(struct pci_dev *pdev)
11301{
11302 struct net_device *dev = pci_get_drvdata(pdev);
11303 struct bnx2x *bp = netdev_priv(dev);
11304
11305 rtnl_lock();
11306
f8ef6e44
YG
11307 bnx2x_eeh_recover(bp);
11308
493adb1f 11309 if (netif_running(dev))
f8ef6e44 11310 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11311
11312 netif_device_attach(dev);
11313
11314 rtnl_unlock();
11315}
11316
11317static struct pci_error_handlers bnx2x_err_handler = {
11318 .error_detected = bnx2x_io_error_detected,
11319 .slot_reset = bnx2x_io_slot_reset,
11320 .resume = bnx2x_io_resume,
11321};
11322
a2fbb9ea 11323static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11324 .name = DRV_MODULE_NAME,
11325 .id_table = bnx2x_pci_tbl,
11326 .probe = bnx2x_init_one,
11327 .remove = __devexit_p(bnx2x_remove_one),
11328 .suspend = bnx2x_suspend,
11329 .resume = bnx2x_resume,
11330 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11331};
11332
11333static int __init bnx2x_init(void)
11334{
1cf167f2
EG
11335 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11336 if (bnx2x_wq == NULL) {
11337 printk(KERN_ERR PFX "Cannot create workqueue\n");
11338 return -ENOMEM;
11339 }
11340
a2fbb9ea
ET
11341 return pci_register_driver(&bnx2x_pci_driver);
11342}
11343
11344static void __exit bnx2x_cleanup(void)
11345{
11346 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11347
11348 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11349}
11350
11351module_init(bnx2x_init);
11352module_exit(bnx2x_cleanup);
11353