]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Comments and prints
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
56
e8b5fc51
VZ
57#define DRV_MODULE_VERSION "1.45.26"
58#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 59#define BNX2X_BC_VER 0x040200
a2fbb9ea 60
34f80b04
EG
61/* Time in jiffies before concluding the transmitter is hung */
62#define TX_TIMEOUT (5*HZ)
a2fbb9ea 63
53a10565 64static char version[] __devinitdata =
34f80b04 65 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
66 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
24e3fcef 68MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 72
555f6c78
EG
73static int multi_mode = 1;
74module_param(multi_mode, int, 0);
75
19680c48 76static int disable_tpa;
19680c48 77module_param(disable_tpa, int, 0);
9898f86d 78MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
79
80static int int_mode;
81module_param(int_mode, int, 0);
82MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
83
9898f86d 84static int poll;
a2fbb9ea 85module_param(poll, int, 0);
9898f86d 86MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
87
88static int mrrs = -1;
89module_param(mrrs, int, 0);
90MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
91
9898f86d 92static int debug;
a2fbb9ea 93module_param(debug, int, 0);
9898f86d
EG
94MODULE_PARM_DESC(debug, " Default debug msglevel");
95
96static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 97
1cf167f2 98static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
4781bfad 473 __be32 data[9];
a2fbb9ea
ET
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
8440d2b6
EG
507 /* Indices */
508 /* Common */
509 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
510 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
511 " spq_prod_idx(%u)\n",
512 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
514
515 /* Rx */
516 for_each_rx_queue(bp, i) {
a2fbb9ea 517 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 518
8440d2b6 519 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
520 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
521 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 522 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
523 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
526 " fp_u_idx(%x) *sb_u_idx(%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge,
528 le16_to_cpu(fp->fp_u_idx),
529 fp->status_blk->u_status_block.status_block_index);
530 }
a2fbb9ea 531
8440d2b6
EG
532 /* Tx */
533 for_each_tx_queue(bp, i) {
534 struct bnx2x_fastpath *fp = &bp->fp[i];
535 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 536
8440d2b6
EG
537 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
538 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
539 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
542 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543 fp->status_blk->c_status_block.status_block_index,
544 hw_prods->packets_prod, hw_prods->bds_prod);
545 }
a2fbb9ea 546
8440d2b6
EG
547 /* Rings */
548 /* Rx */
549 for_each_rx_queue(bp, i) {
550 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
551
552 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 554 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
555 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557
558 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 559 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
560 }
561
3196a88a
EG
562 start = RX_SGE(fp->rx_sge_prod);
563 end = RX_SGE(fp->last_max_sge);
8440d2b6 564 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
565 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567
568 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
569 j, rx_sge[1], rx_sge[0], sw_page->page);
570 }
571
a2fbb9ea
ET
572 start = RCQ_BD(fp->rx_comp_cons - 10);
573 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 574 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576
577 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578 j, cqe[0], cqe[1], cqe[2], cqe[3]);
579 }
580 }
581
8440d2b6
EG
582 /* Tx */
583 for_each_tx_queue(bp, i) {
584 struct bnx2x_fastpath *fp = &bp->fp[i];
585
586 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588 for (j = start; j != end; j = TX_BD(j + 1)) {
589 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
590
591 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592 sw_bd->skb, sw_bd->first_bd);
593 }
594
595 start = TX_BD(fp->tx_bd_cons - 10);
596 end = TX_BD(fp->tx_bd_cons + 254);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
599
600 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
602 }
603 }
a2fbb9ea 604
34f80b04 605 bnx2x_fw_dump(bp);
a2fbb9ea
ET
606 bnx2x_mc_assert(bp);
607 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
608}
609
615f8fd9 610static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 611{
34f80b04 612 int port = BP_PORT(bp);
a2fbb9ea
ET
613 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614 u32 val = REG_RD(bp, addr);
615 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 616 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
617
618 if (msix) {
8badd27a
EG
619 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
621 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
623 } else if (msi) {
624 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
628 } else {
629 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 630 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
631 HC_CONFIG_0_REG_INT_LINE_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 633
8badd27a
EG
634 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
635 val, port, addr);
615f8fd9
ET
636
637 REG_WR(bp, addr, val);
638
a2fbb9ea
ET
639 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
640 }
641
8badd27a
EG
642 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
643 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
644
645 REG_WR(bp, addr, val);
34f80b04
EG
646
647 if (CHIP_IS_E1H(bp)) {
648 /* init leading/trailing edge */
649 if (IS_E1HMF(bp)) {
8badd27a 650 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 651 if (bp->port.pmf)
4acac6a5
EG
652 /* enable nig and gpio3 attention */
653 val |= 0x1100;
34f80b04
EG
654 } else
655 val = 0xffff;
656
657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
659 }
a2fbb9ea
ET
660}
661
615f8fd9 662static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 663{
34f80b04 664 int port = BP_PORT(bp);
a2fbb9ea
ET
665 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666 u32 val = REG_RD(bp, addr);
667
668 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
675
8badd27a
EG
676 /* flush all outstanding writes */
677 mmiowb();
678
a2fbb9ea
ET
679 REG_WR(bp, addr, val);
680 if (REG_RD(bp, addr) != val)
681 BNX2X_ERR("BUG! proper val not read from IGU!\n");
682}
683
f8ef6e44 684static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 685{
a2fbb9ea 686 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 687 int i, offset;
a2fbb9ea 688
34f80b04 689 /* disable interrupt handling */
a2fbb9ea 690 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
691 if (disable_hw)
692 /* prevent the HW from sending interrupts */
693 bnx2x_int_disable(bp);
a2fbb9ea
ET
694
695 /* make sure all ISRs are done */
696 if (msix) {
8badd27a
EG
697 synchronize_irq(bp->msix_table[0].vector);
698 offset = 1;
a2fbb9ea 699 for_each_queue(bp, i)
8badd27a 700 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
701 } else
702 synchronize_irq(bp->pdev->irq);
703
704 /* make sure sp_task is not running */
1cf167f2
EG
705 cancel_delayed_work(&bp->sp_task);
706 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
707}
708
34f80b04 709/* fast path */
a2fbb9ea
ET
710
711/*
34f80b04 712 * General service functions
a2fbb9ea
ET
713 */
714
34f80b04 715static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
716 u8 storm, u16 index, u8 op, u8 update)
717{
5c862848
EG
718 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
719 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
720 struct igu_ack_register igu_ack;
721
722 igu_ack.status_block_index = index;
723 igu_ack.sb_id_and_flags =
34f80b04 724 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
725 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
726 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
727 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
728
5c862848
EG
729 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
730 (*(u32 *)&igu_ack), hc_addr);
731 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
732}
733
734static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
735{
736 struct host_status_block *fpsb = fp->status_blk;
737 u16 rc = 0;
738
739 barrier(); /* status block is written to by the chip */
740 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
741 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
742 rc |= 1;
743 }
744 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
745 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
746 rc |= 2;
747 }
748 return rc;
749}
750
a2fbb9ea
ET
751static u16 bnx2x_ack_int(struct bnx2x *bp)
752{
5c862848
EG
753 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
754 COMMAND_REG_SIMD_MASK);
755 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 756
5c862848
EG
757 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
758 result, hc_addr);
a2fbb9ea 759
a2fbb9ea
ET
760 return result;
761}
762
763
764/*
765 * fast path service functions
766 */
767
237907c1
EG
768static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
769{
770 u16 tx_cons_sb;
771
772 /* Tell compiler that status block fields can change */
773 barrier();
774 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
775 return (fp->tx_pkt_cons != tx_cons_sb);
776}
777
778static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
779{
780 /* Tell compiler that consumer and producer can change */
781 barrier();
782 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
783
237907c1
EG
784}
785
a2fbb9ea
ET
786/* free skb in the packet ring at pos idx
787 * return idx of last bd freed
788 */
789static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
790 u16 idx)
791{
792 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793 struct eth_tx_bd *tx_bd;
794 struct sk_buff *skb = tx_buf->skb;
34f80b04 795 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
796 int nbd;
797
798 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
799 idx, tx_buf, skb);
800
801 /* unmap first bd */
802 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
806
807 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 808 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
809#ifdef BNX2X_STOP_ON_ERROR
810 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 811 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
812 bnx2x_panic();
813 }
814#endif
815
816 /* Skip a parse bd and the TSO split header bd
817 since they have no mapping */
818 if (nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820
821 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822 ETH_TX_BD_FLAGS_TCP_CSUM |
823 ETH_TX_BD_FLAGS_SW_LSO)) {
824 if (--nbd)
825 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 /* is this a TSO split header bd? */
828 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
829 if (--nbd)
830 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
831 }
832 }
833
834 /* now free frags */
835 while (nbd > 0) {
836
837 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838 tx_bd = &fp->tx_desc_ring[bd_idx];
839 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
841 if (--nbd)
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843 }
844
845 /* release skb */
53e5e96e 846 WARN_ON(!skb);
a2fbb9ea
ET
847 dev_kfree_skb(skb);
848 tx_buf->first_bd = 0;
849 tx_buf->skb = NULL;
850
34f80b04 851 return new_cons;
a2fbb9ea
ET
852}
853
34f80b04 854static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 855{
34f80b04
EG
856 s16 used;
857 u16 prod;
858 u16 cons;
a2fbb9ea 859
34f80b04 860 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
861 prod = fp->tx_bd_prod;
862 cons = fp->tx_bd_cons;
863
34f80b04
EG
864 /* NUM_TX_RINGS = number of "next-page" entries
865 It will be used as a threshold */
866 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 867
34f80b04 868#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
869 WARN_ON(used < 0);
870 WARN_ON(used > fp->bp->tx_ring_size);
871 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 872#endif
a2fbb9ea 873
34f80b04 874 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
875}
876
877static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
878{
879 struct bnx2x *bp = fp->bp;
555f6c78 880 struct netdev_queue *txq;
a2fbb9ea
ET
881 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
882 int done = 0;
883
884#ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
886 return;
887#endif
888
555f6c78 889 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
890 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891 sw_cons = fp->tx_pkt_cons;
892
893 while (sw_cons != hw_cons) {
894 u16 pkt_cons;
895
896 pkt_cons = TX_BD(sw_cons);
897
898 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
899
34f80b04 900 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
901 hw_cons, sw_cons, pkt_cons);
902
34f80b04 903/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
904 rmb();
905 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
906 }
907*/
908 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
909 sw_cons++;
910 done++;
911
912 if (done == work)
913 break;
914 }
915
916 fp->tx_pkt_cons = sw_cons;
917 fp->tx_bd_cons = bd_cons;
918
555f6c78
EG
919 /* Need to make the tx_bd_cons update visible to start_xmit()
920 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
921 * memory barrier, there is a small possibility that start_xmit()
922 * will miss it and cause the queue to be stopped forever.
923 */
924 smp_mb();
925
926 /* TBD need a thresh? */
555f6c78 927 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 928
555f6c78 929 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 930
555f6c78 931 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 932 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 933 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 934 netif_tx_wake_queue(txq);
a2fbb9ea 935
555f6c78 936 __netif_tx_unlock(txq);
a2fbb9ea
ET
937 }
938}
939
3196a88a 940
a2fbb9ea
ET
941static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942 union eth_rx_cqe *rr_cqe)
943{
944 struct bnx2x *bp = fp->bp;
945 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
947
34f80b04 948 DP(BNX2X_MSG_SP,
a2fbb9ea 949 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 950 fp->index, cid, command, bp->state,
34f80b04 951 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
952
953 bp->spq_left++;
954
0626b899 955 if (fp->index) {
a2fbb9ea
ET
956 switch (command | fp->state) {
957 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958 BNX2X_FP_STATE_OPENING):
959 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
960 cid);
961 fp->state = BNX2X_FP_STATE_OPEN;
962 break;
963
964 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
966 cid);
967 fp->state = BNX2X_FP_STATE_HALTED;
968 break;
969
970 default:
34f80b04
EG
971 BNX2X_ERR("unexpected MC reply (%d) "
972 "fp->state is %x\n", command, fp->state);
973 break;
a2fbb9ea 974 }
34f80b04 975 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
976 return;
977 }
c14423fe 978
a2fbb9ea
ET
979 switch (command | bp->state) {
980 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982 bp->state = BNX2X_STATE_OPEN;
983 break;
984
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988 fp->state = BNX2X_FP_STATE_HALTED;
989 break;
990
a2fbb9ea 991 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 992 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 993 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
994 break;
995
3196a88a 996
a2fbb9ea 997 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 998 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 999 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1000 bp->set_mac_pending = 0;
a2fbb9ea
ET
1001 break;
1002
49d66772 1003 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1004 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1005 break;
1006
a2fbb9ea 1007 default:
34f80b04 1008 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1009 command, bp->state);
34f80b04 1010 break;
a2fbb9ea 1011 }
34f80b04 1012 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1013}
1014
7a9b2557
VZ
1015static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016 struct bnx2x_fastpath *fp, u16 index)
1017{
1018 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019 struct page *page = sw_buf->page;
1020 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1021
1022 /* Skip "next page" elements */
1023 if (!page)
1024 return;
1025
1026 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1027 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1028 __free_pages(page, PAGES_PER_SGE_SHIFT);
1029
1030 sw_buf->page = NULL;
1031 sge->addr_hi = 0;
1032 sge->addr_lo = 0;
1033}
1034
1035static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036 struct bnx2x_fastpath *fp, int last)
1037{
1038 int i;
1039
1040 for (i = 0; i < last; i++)
1041 bnx2x_free_rx_sge(bp, fp, i);
1042}
1043
1044static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045 struct bnx2x_fastpath *fp, u16 index)
1046{
1047 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1050 dma_addr_t mapping;
1051
1052 if (unlikely(page == NULL))
1053 return -ENOMEM;
1054
4f40f2cb 1055 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1056 PCI_DMA_FROMDEVICE);
8d8bb39b 1057 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1059 return -ENOMEM;
1060 }
1061
1062 sw_buf->page = page;
1063 pci_unmap_addr_set(sw_buf, mapping, mapping);
1064
1065 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1067
1068 return 0;
1069}
1070
a2fbb9ea
ET
1071static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072 struct bnx2x_fastpath *fp, u16 index)
1073{
1074 struct sk_buff *skb;
1075 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1077 dma_addr_t mapping;
1078
1079 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080 if (unlikely(skb == NULL))
1081 return -ENOMEM;
1082
437cf2f1 1083 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1084 PCI_DMA_FROMDEVICE);
8d8bb39b 1085 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1086 dev_kfree_skb(skb);
1087 return -ENOMEM;
1088 }
1089
1090 rx_buf->skb = skb;
1091 pci_unmap_addr_set(rx_buf, mapping, mapping);
1092
1093 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1095
1096 return 0;
1097}
1098
1099/* note that we are not allocating a new skb,
1100 * we are just moving one from cons to prod
1101 * we are not creating a new mapping,
1102 * so there is no need to check for dma_mapping_error().
1103 */
1104static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105 struct sk_buff *skb, u16 cons, u16 prod)
1106{
1107 struct bnx2x *bp = fp->bp;
1108 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1112
1113 pci_dma_sync_single_for_device(bp->pdev,
1114 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1115 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1116
1117 prod_rx_buf->skb = cons_rx_buf->skb;
1118 pci_unmap_addr_set(prod_rx_buf, mapping,
1119 pci_unmap_addr(cons_rx_buf, mapping));
1120 *prod_bd = *cons_bd;
1121}
1122
7a9b2557
VZ
1123static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1124 u16 idx)
1125{
1126 u16 last_max = fp->last_max_sge;
1127
1128 if (SUB_S16(idx, last_max) > 0)
1129 fp->last_max_sge = idx;
1130}
1131
1132static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1133{
1134 int i, j;
1135
1136 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137 int idx = RX_SGE_CNT * i - 1;
1138
1139 for (j = 0; j < 2; j++) {
1140 SGE_MASK_CLEAR_BIT(fp, idx);
1141 idx--;
1142 }
1143 }
1144}
1145
1146static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147 struct eth_fast_path_rx_cqe *fp_cqe)
1148{
1149 struct bnx2x *bp = fp->bp;
4f40f2cb 1150 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1151 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1152 SGE_PAGE_SHIFT;
7a9b2557
VZ
1153 u16 last_max, last_elem, first_elem;
1154 u16 delta = 0;
1155 u16 i;
1156
1157 if (!sge_len)
1158 return;
1159
1160 /* First mark all used pages */
1161 for (i = 0; i < sge_len; i++)
1162 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1163
1164 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1166
1167 /* Here we assume that the last SGE index is the biggest */
1168 prefetch((void *)(fp->sge_mask));
1169 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1170
1171 last_max = RX_SGE(fp->last_max_sge);
1172 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1174
1175 /* If ring is not full */
1176 if (last_elem + 1 != first_elem)
1177 last_elem++;
1178
1179 /* Now update the prod */
1180 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181 if (likely(fp->sge_mask[i]))
1182 break;
1183
1184 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185 delta += RX_SGE_MASK_ELEM_SZ;
1186 }
1187
1188 if (delta > 0) {
1189 fp->rx_sge_prod += delta;
1190 /* clear page-end entries */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1192 }
1193
1194 DP(NETIF_MSG_RX_STATUS,
1195 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1196 fp->last_max_sge, fp->rx_sge_prod);
1197}
1198
1199static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1200{
1201 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202 memset(fp->sge_mask, 0xff,
1203 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1204
33471629
EG
1205 /* Clear the two last indices in the page to 1:
1206 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1207 hence will never be indicated and should be removed from
1208 the calculations. */
1209 bnx2x_clear_sge_mask_next_elems(fp);
1210}
1211
1212static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213 struct sk_buff *skb, u16 cons, u16 prod)
1214{
1215 struct bnx2x *bp = fp->bp;
1216 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219 dma_addr_t mapping;
1220
1221 /* move empty skb from pool to prod and map it */
1222 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1224 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1225 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1226
1227 /* move partial skb from cons to pool (don't unmap yet) */
1228 fp->tpa_pool[queue] = *cons_rx_buf;
1229
1230 /* mark bin state as start - print error if current state != stop */
1231 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1233
1234 fp->tpa_state[queue] = BNX2X_TPA_START;
1235
1236 /* point prod_bd to new skb */
1237 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1239
1240#ifdef BNX2X_STOP_ON_ERROR
1241 fp->tpa_queue_used |= (1 << queue);
1242#ifdef __powerpc64__
1243 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1244#else
1245 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1246#endif
1247 fp->tpa_queue_used);
1248#endif
1249}
1250
1251static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252 struct sk_buff *skb,
1253 struct eth_fast_path_rx_cqe *fp_cqe,
1254 u16 cqe_idx)
1255{
1256 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1257 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258 u32 i, frag_len, frag_size, pages;
1259 int err;
1260 int j;
1261
1262 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1263 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1264
1265 /* This is needed in order to enable forwarding support */
1266 if (frag_size)
4f40f2cb 1267 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1268 max(frag_size, (u32)len_on_bd));
1269
1270#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1271 if (pages >
1272 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1273 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1274 pages, cqe_idx);
1275 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1276 fp_cqe->pkt_len, len_on_bd);
1277 bnx2x_panic();
1278 return -EINVAL;
1279 }
1280#endif
1281
1282 /* Run through the SGL and compose the fragmented skb */
1283 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1285
1286 /* FW gives the indices of the SGE as if the ring is an array
1287 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1288 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1289 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1290 old_rx_pg = *rx_pg;
1291
1292 /* If we fail to allocate a substitute page, we simply stop
1293 where we are and drop the whole packet */
1294 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295 if (unlikely(err)) {
de832a55 1296 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1297 return err;
1298 }
1299
1300 /* Unmap the page as we r going to pass it to the stack */
1301 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1302 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1303
1304 /* Add one frag and update the appropriate fields in the skb */
1305 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1306
1307 skb->data_len += frag_len;
1308 skb->truesize += frag_len;
1309 skb->len += frag_len;
1310
1311 frag_size -= frag_len;
1312 }
1313
1314 return 0;
1315}
1316
1317static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1319 u16 cqe_idx)
1320{
1321 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322 struct sk_buff *skb = rx_buf->skb;
1323 /* alloc new skb */
1324 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1325
1326 /* Unmap skb in the pool anyway, as we are going to change
1327 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1328 fails. */
1329 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1330 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1331
7a9b2557 1332 if (likely(new_skb)) {
66e855f3
YG
1333 /* fix ip xsum and give it to the stack */
1334 /* (no need to map the new skb) */
0c6671b0
EG
1335#ifdef BCM_VLAN
1336 int is_vlan_cqe =
1337 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338 PARSING_FLAGS_VLAN);
1339 int is_not_hwaccel_vlan_cqe =
1340 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1341#endif
7a9b2557
VZ
1342
1343 prefetch(skb);
1344 prefetch(((char *)(skb)) + 128);
1345
7a9b2557
VZ
1346#ifdef BNX2X_STOP_ON_ERROR
1347 if (pad + len > bp->rx_buf_size) {
1348 BNX2X_ERR("skb_put is about to fail... "
1349 "pad %d len %d rx_buf_size %d\n",
1350 pad, len, bp->rx_buf_size);
1351 bnx2x_panic();
1352 return;
1353 }
1354#endif
1355
1356 skb_reserve(skb, pad);
1357 skb_put(skb, len);
1358
1359 skb->protocol = eth_type_trans(skb, bp->dev);
1360 skb->ip_summed = CHECKSUM_UNNECESSARY;
1361
1362 {
1363 struct iphdr *iph;
1364
1365 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1366#ifdef BCM_VLAN
1367 /* If there is no Rx VLAN offloading -
1368 take VLAN tag into an account */
1369 if (unlikely(is_not_hwaccel_vlan_cqe))
1370 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1371#endif
7a9b2557
VZ
1372 iph->check = 0;
1373 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1374 }
1375
1376 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377 &cqe->fast_path_cqe, cqe_idx)) {
1378#ifdef BCM_VLAN
0c6671b0
EG
1379 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1381 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382 le16_to_cpu(cqe->fast_path_cqe.
1383 vlan_tag));
1384 else
1385#endif
1386 netif_receive_skb(skb);
1387 } else {
1388 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389 " - dropping packet!\n");
1390 dev_kfree_skb(skb);
1391 }
1392
7a9b2557
VZ
1393
1394 /* put new skb in bin */
1395 fp->tpa_pool[queue].skb = new_skb;
1396
1397 } else {
66e855f3 1398 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1399 DP(NETIF_MSG_RX_STATUS,
1400 "Failed to allocate new skb - dropping packet!\n");
de832a55 1401 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1402 }
1403
1404 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1405}
1406
1407static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408 struct bnx2x_fastpath *fp,
1409 u16 bd_prod, u16 rx_comp_prod,
1410 u16 rx_sge_prod)
1411{
8d9c5f34 1412 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1413 int i;
1414
1415 /* Update producers */
1416 rx_prods.bd_prod = bd_prod;
1417 rx_prods.cqe_prod = rx_comp_prod;
1418 rx_prods.sge_prod = rx_sge_prod;
1419
58f4c4cf
EG
1420 /*
1421 * Make sure that the BD and SGE data is updated before updating the
1422 * producers since FW might read the BD/SGE right after the producer
1423 * is updated.
1424 * This is only applicable for weak-ordered memory model archs such
1425 * as IA-64. The following barrier is also mandatory since FW will
1426 * assumes BDs must have buffers.
1427 */
1428 wmb();
1429
8d9c5f34
EG
1430 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1432 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1433 ((u32 *)&rx_prods)[i]);
1434
58f4c4cf
EG
1435 mmiowb(); /* keep prod updates ordered */
1436
7a9b2557 1437 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1438 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1439 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1440}
1441
a2fbb9ea
ET
1442static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1443{
1444 struct bnx2x *bp = fp->bp;
34f80b04 1445 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1446 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1447 int rx_pkt = 0;
1448
1449#ifdef BNX2X_STOP_ON_ERROR
1450 if (unlikely(bp->panic))
1451 return 0;
1452#endif
1453
34f80b04
EG
1454 /* CQ "next element" is of the size of the regular element,
1455 that's why it's ok here */
a2fbb9ea
ET
1456 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1458 hw_comp_cons++;
1459
1460 bd_cons = fp->rx_bd_cons;
1461 bd_prod = fp->rx_bd_prod;
34f80b04 1462 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1463 sw_comp_cons = fp->rx_comp_cons;
1464 sw_comp_prod = fp->rx_comp_prod;
1465
1466 /* Memory barrier necessary as speculative reads of the rx
1467 * buffer can be ahead of the index in the status block
1468 */
1469 rmb();
1470
1471 DP(NETIF_MSG_RX_STATUS,
1472 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1473 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1474
1475 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1476 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1477 struct sk_buff *skb;
1478 union eth_rx_cqe *cqe;
34f80b04
EG
1479 u8 cqe_fp_flags;
1480 u16 len, pad;
a2fbb9ea
ET
1481
1482 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483 bd_prod = RX_BD(bd_prod);
1484 bd_cons = RX_BD(bd_cons);
1485
1486 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1487 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1488
a2fbb9ea 1489 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1490 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1491 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1492 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1493 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1495
1496 /* is this a slowpath msg? */
34f80b04 1497 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1498 bnx2x_sp_event(fp, cqe);
1499 goto next_cqe;
1500
1501 /* this is an rx packet */
1502 } else {
1503 rx_buf = &fp->rx_buf_ring[bd_cons];
1504 skb = rx_buf->skb;
a2fbb9ea
ET
1505 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506 pad = cqe->fast_path_cqe.placement_offset;
1507
7a9b2557
VZ
1508 /* If CQE is marked both TPA_START and TPA_END
1509 it is a non-TPA CQE */
1510 if ((!fp->disable_tpa) &&
1511 (TPA_TYPE(cqe_fp_flags) !=
1512 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1513 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1514
1515 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516 DP(NETIF_MSG_RX_STATUS,
1517 "calling tpa_start on queue %d\n",
1518 queue);
1519
1520 bnx2x_tpa_start(fp, queue, skb,
1521 bd_cons, bd_prod);
1522 goto next_rx;
1523 }
1524
1525 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526 DP(NETIF_MSG_RX_STATUS,
1527 "calling tpa_stop on queue %d\n",
1528 queue);
1529
1530 if (!BNX2X_RX_SUM_FIX(cqe))
1531 BNX2X_ERR("STOP on none TCP "
1532 "data\n");
1533
1534 /* This is a size of the linear data
1535 on this skb */
1536 len = le16_to_cpu(cqe->fast_path_cqe.
1537 len_on_bd);
1538 bnx2x_tpa_stop(bp, fp, queue, pad,
1539 len, cqe, comp_ring_cons);
1540#ifdef BNX2X_STOP_ON_ERROR
1541 if (bp->panic)
1542 return -EINVAL;
1543#endif
1544
1545 bnx2x_update_sge_prod(fp,
1546 &cqe->fast_path_cqe);
1547 goto next_cqe;
1548 }
1549 }
1550
a2fbb9ea
ET
1551 pci_dma_sync_single_for_device(bp->pdev,
1552 pci_unmap_addr(rx_buf, mapping),
1553 pad + RX_COPY_THRESH,
1554 PCI_DMA_FROMDEVICE);
1555 prefetch(skb);
1556 prefetch(((char *)(skb)) + 128);
1557
1558 /* is this an error packet? */
34f80b04 1559 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1560 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1561 "ERROR flags %x rx packet %u\n",
1562 cqe_fp_flags, sw_comp_cons);
de832a55 1563 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1564 goto reuse_rx;
1565 }
1566
1567 /* Since we don't have a jumbo ring
1568 * copy small packets if mtu > 1500
1569 */
1570 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571 (len <= RX_COPY_THRESH)) {
1572 struct sk_buff *new_skb;
1573
1574 new_skb = netdev_alloc_skb(bp->dev,
1575 len + pad);
1576 if (new_skb == NULL) {
1577 DP(NETIF_MSG_RX_ERR,
34f80b04 1578 "ERROR packet dropped "
a2fbb9ea 1579 "because of alloc failure\n");
de832a55 1580 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1581 goto reuse_rx;
1582 }
1583
1584 /* aligned copy */
1585 skb_copy_from_linear_data_offset(skb, pad,
1586 new_skb->data + pad, len);
1587 skb_reserve(new_skb, pad);
1588 skb_put(new_skb, len);
1589
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591
1592 skb = new_skb;
1593
1594 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595 pci_unmap_single(bp->pdev,
1596 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1597 bp->rx_buf_size,
a2fbb9ea
ET
1598 PCI_DMA_FROMDEVICE);
1599 skb_reserve(skb, pad);
1600 skb_put(skb, len);
1601
1602 } else {
1603 DP(NETIF_MSG_RX_ERR,
34f80b04 1604 "ERROR packet dropped because "
a2fbb9ea 1605 "of alloc failure\n");
de832a55 1606 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1607reuse_rx:
1608 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609 goto next_rx;
1610 }
1611
1612 skb->protocol = eth_type_trans(skb, bp->dev);
1613
1614 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1615 if (bp->rx_csum) {
1adcd8be
EG
1616 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1618 else
de832a55 1619 fp->eth_q_stats.hw_csum_err++;
66e855f3 1620 }
a2fbb9ea
ET
1621 }
1622
748e5439 1623 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1624#ifdef BCM_VLAN
0c6671b0 1625 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1626 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1628 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1630 else
1631#endif
34f80b04 1632 netif_receive_skb(skb);
a2fbb9ea 1633
a2fbb9ea
ET
1634
1635next_rx:
1636 rx_buf->skb = NULL;
1637
1638 bd_cons = NEXT_RX_IDX(bd_cons);
1639 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1640 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1641 rx_pkt++;
a2fbb9ea
ET
1642next_cqe:
1643 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1645
34f80b04 1646 if (rx_pkt == budget)
a2fbb9ea
ET
1647 break;
1648 } /* while */
1649
1650 fp->rx_bd_cons = bd_cons;
34f80b04 1651 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1652 fp->rx_comp_cons = sw_comp_cons;
1653 fp->rx_comp_prod = sw_comp_prod;
1654
7a9b2557
VZ
1655 /* Update producers */
1656 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1657 fp->rx_sge_prod);
a2fbb9ea
ET
1658
1659 fp->rx_pkt += rx_pkt;
1660 fp->rx_calls++;
1661
1662 return rx_pkt;
1663}
1664
1665static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1666{
1667 struct bnx2x_fastpath *fp = fp_cookie;
1668 struct bnx2x *bp = fp->bp;
0626b899 1669 int index = fp->index;
a2fbb9ea 1670
da5a662a
VZ
1671 /* Return here if interrupt is disabled */
1672 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1674 return IRQ_HANDLED;
1675 }
1676
34f80b04 1677 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1678 index, fp->sb_id);
1679 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1680
1681#ifdef BNX2X_STOP_ON_ERROR
1682 if (unlikely(bp->panic))
1683 return IRQ_HANDLED;
1684#endif
1685
1686 prefetch(fp->rx_cons_sb);
1687 prefetch(fp->tx_cons_sb);
1688 prefetch(&fp->status_blk->c_status_block.status_block_index);
1689 prefetch(&fp->status_blk->u_status_block.status_block_index);
1690
288379f0 1691 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1692
a2fbb9ea
ET
1693 return IRQ_HANDLED;
1694}
1695
1696static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1697{
555f6c78 1698 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1699 u16 status = bnx2x_ack_int(bp);
34f80b04 1700 u16 mask;
a2fbb9ea 1701
34f80b04 1702 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1703 if (unlikely(status == 0)) {
1704 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1705 return IRQ_NONE;
1706 }
f5372251 1707 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1708
34f80b04 1709 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712 return IRQ_HANDLED;
1713 }
1714
3196a88a
EG
1715#ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp->panic))
1717 return IRQ_HANDLED;
1718#endif
1719
34f80b04
EG
1720 mask = 0x2 << bp->fp[0].sb_id;
1721 if (status & mask) {
a2fbb9ea
ET
1722 struct bnx2x_fastpath *fp = &bp->fp[0];
1723
1724 prefetch(fp->rx_cons_sb);
1725 prefetch(fp->tx_cons_sb);
1726 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727 prefetch(&fp->status_blk->u_status_block.status_block_index);
1728
288379f0 1729 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1730
34f80b04 1731 status &= ~mask;
a2fbb9ea
ET
1732 }
1733
a2fbb9ea 1734
34f80b04 1735 if (unlikely(status & 0x1)) {
1cf167f2 1736 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1737
1738 status &= ~0x1;
1739 if (!status)
1740 return IRQ_HANDLED;
1741 }
1742
34f80b04
EG
1743 if (status)
1744 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1745 status);
a2fbb9ea 1746
c18487ee 1747 return IRQ_HANDLED;
a2fbb9ea
ET
1748}
1749
c18487ee 1750/* end of fast path */
a2fbb9ea 1751
bb2a0f7a 1752static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1753
c18487ee
YR
1754/* Link */
1755
1756/*
1757 * General service functions
1758 */
a2fbb9ea 1759
4a37fb66 1760static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1761{
1762 u32 lock_status;
1763 u32 resource_bit = (1 << resource);
4a37fb66
YG
1764 int func = BP_FUNC(bp);
1765 u32 hw_lock_control_reg;
c18487ee 1766 int cnt;
a2fbb9ea 1767
c18487ee
YR
1768 /* Validating that the resource is within range */
1769 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1770 DP(NETIF_MSG_HW,
1771 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1773 return -EINVAL;
1774 }
a2fbb9ea 1775
4a37fb66
YG
1776 if (func <= 5) {
1777 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1778 } else {
1779 hw_lock_control_reg =
1780 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1781 }
1782
c18487ee 1783 /* Validating that the resource is not already taken */
4a37fb66 1784 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1785 if (lock_status & resource_bit) {
1786 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1787 lock_status, resource_bit);
1788 return -EEXIST;
1789 }
a2fbb9ea 1790
46230476
EG
1791 /* Try for 5 second every 5ms */
1792 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1793 /* Try to acquire the lock */
4a37fb66
YG
1794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1796 if (lock_status & resource_bit)
1797 return 0;
a2fbb9ea 1798
c18487ee 1799 msleep(5);
a2fbb9ea 1800 }
c18487ee
YR
1801 DP(NETIF_MSG_HW, "Timeout\n");
1802 return -EAGAIN;
1803}
a2fbb9ea 1804
4a37fb66 1805static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1806{
1807 u32 lock_status;
1808 u32 resource_bit = (1 << resource);
4a37fb66
YG
1809 int func = BP_FUNC(bp);
1810 u32 hw_lock_control_reg;
a2fbb9ea 1811
c18487ee
YR
1812 /* Validating that the resource is within range */
1813 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1814 DP(NETIF_MSG_HW,
1815 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1817 return -EINVAL;
1818 }
1819
4a37fb66
YG
1820 if (func <= 5) {
1821 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1822 } else {
1823 hw_lock_control_reg =
1824 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1825 }
1826
c18487ee 1827 /* Validating that the resource is currently taken */
4a37fb66 1828 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1829 if (!(lock_status & resource_bit)) {
1830 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1831 lock_status, resource_bit);
1832 return -EFAULT;
a2fbb9ea
ET
1833 }
1834
4a37fb66 1835 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1836 return 0;
1837}
1838
1839/* HW Lock for shared dual port PHYs */
4a37fb66 1840static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1841{
34f80b04 1842 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1843
46c6a674
EG
1844 if (bp->port.need_hw_lock)
1845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1846}
a2fbb9ea 1847
4a37fb66 1848static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1849{
46c6a674
EG
1850 if (bp->port.need_hw_lock)
1851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1852
34f80b04 1853 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1854}
a2fbb9ea 1855
4acac6a5
EG
1856int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1857{
1858 /* The GPIO should be swapped if swap register is set and active */
1859 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861 int gpio_shift = gpio_num +
1862 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863 u32 gpio_mask = (1 << gpio_shift);
1864 u32 gpio_reg;
1865 int value;
1866
1867 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1869 return -EINVAL;
1870 }
1871
1872 /* read GPIO value */
1873 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1874
1875 /* get the requested pin value */
1876 if ((gpio_reg & gpio_mask) == gpio_mask)
1877 value = 1;
1878 else
1879 value = 0;
1880
1881 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1882
1883 return value;
1884}
1885
17de50b7 1886int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1887{
1888 /* The GPIO should be swapped if swap register is set and active */
1889 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1890 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1891 int gpio_shift = gpio_num +
1892 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893 u32 gpio_mask = (1 << gpio_shift);
1894 u32 gpio_reg;
a2fbb9ea 1895
c18487ee
YR
1896 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1898 return -EINVAL;
1899 }
a2fbb9ea 1900
4a37fb66 1901 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1902 /* read GPIO and mask except the float bits */
1903 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1904
c18487ee
YR
1905 switch (mode) {
1906 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908 gpio_num, gpio_shift);
1909 /* clear FLOAT and set CLR */
1910 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1912 break;
a2fbb9ea 1913
c18487ee
YR
1914 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916 gpio_num, gpio_shift);
1917 /* clear FLOAT and set SET */
1918 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1920 break;
a2fbb9ea 1921
17de50b7 1922 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1923 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924 gpio_num, gpio_shift);
1925 /* set FLOAT */
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1927 break;
a2fbb9ea 1928
c18487ee
YR
1929 default:
1930 break;
a2fbb9ea
ET
1931 }
1932
c18487ee 1933 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1935
c18487ee 1936 return 0;
a2fbb9ea
ET
1937}
1938
4acac6a5
EG
1939int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1940{
1941 /* The GPIO should be swapped if swap register is set and active */
1942 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944 int gpio_shift = gpio_num +
1945 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946 u32 gpio_mask = (1 << gpio_shift);
1947 u32 gpio_reg;
1948
1949 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951 return -EINVAL;
1952 }
1953
1954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1955 /* read GPIO int */
1956 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1957
1958 switch (mode) {
1959 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961 "output low\n", gpio_num, gpio_shift);
1962 /* clear SET and set CLR */
1963 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1965 break;
1966
1967 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969 "output high\n", gpio_num, gpio_shift);
1970 /* clear CLR and set SET */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1973 break;
1974
1975 default:
1976 break;
1977 }
1978
1979 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981
1982 return 0;
1983}
1984
c18487ee 1985static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1986{
c18487ee
YR
1987 u32 spio_mask = (1 << spio_num);
1988 u32 spio_reg;
a2fbb9ea 1989
c18487ee
YR
1990 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991 (spio_num > MISC_REGISTERS_SPIO_7)) {
1992 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1993 return -EINVAL;
a2fbb9ea
ET
1994 }
1995
4a37fb66 1996 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1997 /* read SPIO and mask except the float bits */
1998 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1999
c18487ee 2000 switch (mode) {
6378c025 2001 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2002 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003 /* clear FLOAT and set CLR */
2004 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2006 break;
a2fbb9ea 2007
6378c025 2008 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010 /* clear FLOAT and set SET */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2013 break;
a2fbb9ea 2014
c18487ee
YR
2015 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2017 /* set FLOAT */
2018 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019 break;
a2fbb9ea 2020
c18487ee
YR
2021 default:
2022 break;
a2fbb9ea
ET
2023 }
2024
c18487ee 2025 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2026 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2027
a2fbb9ea
ET
2028 return 0;
2029}
2030
c18487ee 2031static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2032{
ad33ea3a
EG
2033 switch (bp->link_vars.ieee_fc &
2034 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2035 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2036 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2037 ADVERTISED_Pause);
2038 break;
2039 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2040 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2041 ADVERTISED_Pause);
2042 break;
2043 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2044 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
2045 break;
2046 default:
34f80b04 2047 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2048 ADVERTISED_Pause);
2049 break;
2050 }
2051}
f1410647 2052
c18487ee
YR
2053static void bnx2x_link_report(struct bnx2x *bp)
2054{
2055 if (bp->link_vars.link_up) {
2056 if (bp->state == BNX2X_STATE_OPEN)
2057 netif_carrier_on(bp->dev);
2058 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2059
c18487ee 2060 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2061
c18487ee
YR
2062 if (bp->link_vars.duplex == DUPLEX_FULL)
2063 printk("full duplex");
2064 else
2065 printk("half duplex");
f1410647 2066
c0700f90
DM
2067 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2068 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2069 printk(", receive ");
c0700f90 2070 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2071 printk("& transmit ");
2072 } else {
2073 printk(", transmit ");
2074 }
2075 printk("flow control ON");
2076 }
2077 printk("\n");
f1410647 2078
c18487ee
YR
2079 } else { /* link_down */
2080 netif_carrier_off(bp->dev);
2081 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2082 }
c18487ee
YR
2083}
2084
b5bf9068 2085static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2086{
19680c48
EG
2087 if (!BP_NOMCP(bp)) {
2088 u8 rc;
a2fbb9ea 2089
19680c48 2090 /* Initialize link parameters structure variables */
8c99e7b0
YR
2091 /* It is recommended to turn off RX FC for jumbo frames
2092 for better performance */
2093 if (IS_E1HMF(bp))
c0700f90 2094 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2095 else if (bp->dev->mtu > 5000)
c0700f90 2096 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2097 else
c0700f90 2098 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2099
4a37fb66 2100 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2101
2102 if (load_mode == LOAD_DIAG)
2103 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2104
19680c48 2105 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2106
4a37fb66 2107 bnx2x_release_phy_lock(bp);
a2fbb9ea 2108
3c96c68b
EG
2109 bnx2x_calc_fc_adv(bp);
2110
b5bf9068
EG
2111 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2112 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2113 bnx2x_link_report(bp);
b5bf9068 2114 }
34f80b04 2115
19680c48
EG
2116 return rc;
2117 }
f5372251 2118 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2119 return -EINVAL;
a2fbb9ea
ET
2120}
2121
c18487ee 2122static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2123{
19680c48 2124 if (!BP_NOMCP(bp)) {
4a37fb66 2125 bnx2x_acquire_phy_lock(bp);
19680c48 2126 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2127 bnx2x_release_phy_lock(bp);
a2fbb9ea 2128
19680c48
EG
2129 bnx2x_calc_fc_adv(bp);
2130 } else
f5372251 2131 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2132}
a2fbb9ea 2133
c18487ee
YR
2134static void bnx2x__link_reset(struct bnx2x *bp)
2135{
19680c48 2136 if (!BP_NOMCP(bp)) {
4a37fb66 2137 bnx2x_acquire_phy_lock(bp);
589abe3a 2138 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2139 bnx2x_release_phy_lock(bp);
19680c48 2140 } else
f5372251 2141 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2142}
a2fbb9ea 2143
c18487ee
YR
2144static u8 bnx2x_link_test(struct bnx2x *bp)
2145{
2146 u8 rc;
a2fbb9ea 2147
4a37fb66 2148 bnx2x_acquire_phy_lock(bp);
c18487ee 2149 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2150 bnx2x_release_phy_lock(bp);
a2fbb9ea 2151
c18487ee
YR
2152 return rc;
2153}
a2fbb9ea 2154
8a1c38d1 2155static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2156{
8a1c38d1
EG
2157 u32 r_param = bp->link_vars.line_speed / 8;
2158 u32 fair_periodic_timeout_usec;
2159 u32 t_fair;
34f80b04 2160
8a1c38d1
EG
2161 memset(&(bp->cmng.rs_vars), 0,
2162 sizeof(struct rate_shaping_vars_per_port));
2163 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2164
8a1c38d1
EG
2165 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2166 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2167
8a1c38d1
EG
2168 /* this is the threshold below which no timer arming will occur
2169 1.25 coefficient is for the threshold to be a little bigger
2170 than the real time, to compensate for timer in-accuracy */
2171 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2172 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2173
8a1c38d1
EG
2174 /* resolution of fairness timer */
2175 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2176 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2177 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2178
8a1c38d1
EG
2179 /* this is the threshold below which we won't arm the timer anymore */
2180 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2181
8a1c38d1
EG
2182 /* we multiply by 1e3/8 to get bytes/msec.
2183 We don't want the credits to pass a credit
2184 of the t_fair*FAIR_MEM (algorithm resolution) */
2185 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2186 /* since each tick is 4 usec */
2187 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2188}
2189
8a1c38d1 2190static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2191{
2192 struct rate_shaping_vars_per_vn m_rs_vn;
2193 struct fairness_vars_per_vn m_fair_vn;
2194 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2195 u16 vn_min_rate, vn_max_rate;
2196 int i;
2197
2198 /* If function is hidden - set min and max to zeroes */
2199 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2200 vn_min_rate = 0;
2201 vn_max_rate = 0;
2202
2203 } else {
2204 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2205 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2206 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2207 if current min rate is zero - set it to 1.
33471629 2208 This is a requirement of the algorithm. */
8a1c38d1 2209 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2210 vn_min_rate = DEF_MIN_RATE;
2211 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2212 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2213 }
2214
8a1c38d1
EG
2215 DP(NETIF_MSG_IFUP,
2216 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2217 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2218
2219 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2220 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2221
2222 /* global vn counter - maximal Mbps for this vn */
2223 m_rs_vn.vn_counter.rate = vn_max_rate;
2224
2225 /* quota - number of bytes transmitted in this period */
2226 m_rs_vn.vn_counter.quota =
2227 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2228
8a1c38d1 2229 if (bp->vn_weight_sum) {
34f80b04
EG
2230 /* credit for each period of the fairness algorithm:
2231 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2232 vn_weight_sum should not be larger than 10000, thus
2233 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2234 than zero */
34f80b04 2235 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2236 max((u32)(vn_min_rate * (T_FAIR_COEF /
2237 (8 * bp->vn_weight_sum))),
2238 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2239 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2240 m_fair_vn.vn_credit_delta);
2241 }
2242
34f80b04
EG
2243 /* Store it to internal memory */
2244 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2245 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2247 ((u32 *)(&m_rs_vn))[i]);
2248
2249 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2250 REG_WR(bp, BAR_XSTRORM_INTMEM +
2251 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2252 ((u32 *)(&m_fair_vn))[i]);
2253}
2254
8a1c38d1 2255
c18487ee
YR
2256/* This function is called upon link interrupt */
2257static void bnx2x_link_attn(struct bnx2x *bp)
2258{
bb2a0f7a
YG
2259 /* Make sure that we are synced with the current statistics */
2260 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2261
c18487ee 2262 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2263
bb2a0f7a
YG
2264 if (bp->link_vars.link_up) {
2265
1c06328c
EG
2266 /* dropless flow control */
2267 if (CHIP_IS_E1H(bp)) {
2268 int port = BP_PORT(bp);
2269 u32 pause_enabled = 0;
2270
2271 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2272 pause_enabled = 1;
2273
2274 REG_WR(bp, BAR_USTRORM_INTMEM +
2275 USTORM_PAUSE_ENABLED_OFFSET(port),
2276 pause_enabled);
2277 }
2278
bb2a0f7a
YG
2279 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2280 struct host_port_stats *pstats;
2281
2282 pstats = bnx2x_sp(bp, port_stats);
2283 /* reset old bmac stats */
2284 memset(&(pstats->mac_stx[0]), 0,
2285 sizeof(struct mac_stx));
2286 }
2287 if ((bp->state == BNX2X_STATE_OPEN) ||
2288 (bp->state == BNX2X_STATE_DISABLED))
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290 }
2291
c18487ee
YR
2292 /* indicate link status */
2293 bnx2x_link_report(bp);
34f80b04
EG
2294
2295 if (IS_E1HMF(bp)) {
8a1c38d1 2296 int port = BP_PORT(bp);
34f80b04 2297 int func;
8a1c38d1 2298 int vn;
34f80b04
EG
2299
2300 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2301 if (vn == BP_E1HVN(bp))
2302 continue;
2303
8a1c38d1 2304 func = ((vn << 1) | port);
34f80b04
EG
2305
2306 /* Set the attention towards other drivers
2307 on the same port */
2308 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2309 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2310 }
34f80b04 2311
8a1c38d1
EG
2312 if (bp->link_vars.link_up) {
2313 int i;
2314
2315 /* Init rate shaping and fairness contexts */
2316 bnx2x_init_port_minmax(bp);
34f80b04 2317
34f80b04 2318 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2319 bnx2x_init_vn_minmax(bp, 2*vn + port);
2320
2321 /* Store it to internal memory */
2322 for (i = 0;
2323 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2324 REG_WR(bp, BAR_XSTRORM_INTMEM +
2325 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2326 ((u32 *)(&bp->cmng))[i]);
2327 }
34f80b04 2328 }
c18487ee 2329}
a2fbb9ea 2330
c18487ee
YR
2331static void bnx2x__link_status_update(struct bnx2x *bp)
2332{
2333 if (bp->state != BNX2X_STATE_OPEN)
2334 return;
a2fbb9ea 2335
c18487ee 2336 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2337
bb2a0f7a
YG
2338 if (bp->link_vars.link_up)
2339 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2340 else
2341 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2342
c18487ee
YR
2343 /* indicate link status */
2344 bnx2x_link_report(bp);
a2fbb9ea 2345}
a2fbb9ea 2346
34f80b04
EG
2347static void bnx2x_pmf_update(struct bnx2x *bp)
2348{
2349 int port = BP_PORT(bp);
2350 u32 val;
2351
2352 bp->port.pmf = 1;
2353 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2354
2355 /* enable nig attention */
2356 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2357 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2358 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2359
2360 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2361}
2362
c18487ee 2363/* end of Link */
a2fbb9ea
ET
2364
2365/* slow path */
2366
2367/*
2368 * General service functions
2369 */
2370
2371/* the slow path queue is odd since completions arrive on the fastpath ring */
2372static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2373 u32 data_hi, u32 data_lo, int common)
2374{
34f80b04 2375 int func = BP_FUNC(bp);
a2fbb9ea 2376
34f80b04
EG
2377 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2378 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2379 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2380 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2381 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2382
2383#ifdef BNX2X_STOP_ON_ERROR
2384 if (unlikely(bp->panic))
2385 return -EIO;
2386#endif
2387
34f80b04 2388 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2389
2390 if (!bp->spq_left) {
2391 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2392 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2393 bnx2x_panic();
2394 return -EBUSY;
2395 }
f1410647 2396
a2fbb9ea
ET
2397 /* CID needs port number to be encoded int it */
2398 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2399 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2400 HW_CID(bp, cid)));
2401 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2402 if (common)
2403 bp->spq_prod_bd->hdr.type |=
2404 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2405
2406 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2407 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2408
2409 bp->spq_left--;
2410
2411 if (bp->spq_prod_bd == bp->spq_last_bd) {
2412 bp->spq_prod_bd = bp->spq;
2413 bp->spq_prod_idx = 0;
2414 DP(NETIF_MSG_TIMER, "end of spq\n");
2415
2416 } else {
2417 bp->spq_prod_bd++;
2418 bp->spq_prod_idx++;
2419 }
2420
34f80b04 2421 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2422 bp->spq_prod_idx);
2423
34f80b04 2424 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2425 return 0;
2426}
2427
2428/* acquire split MCP access lock register */
4a37fb66 2429static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2430{
a2fbb9ea 2431 u32 i, j, val;
34f80b04 2432 int rc = 0;
a2fbb9ea
ET
2433
2434 might_sleep();
2435 i = 100;
2436 for (j = 0; j < i*10; j++) {
2437 val = (1UL << 31);
2438 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2439 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2440 if (val & (1L << 31))
2441 break;
2442
2443 msleep(5);
2444 }
a2fbb9ea 2445 if (!(val & (1L << 31))) {
19680c48 2446 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2447 rc = -EBUSY;
2448 }
2449
2450 return rc;
2451}
2452
4a37fb66
YG
2453/* release split MCP access lock register */
2454static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2455{
2456 u32 val = 0;
2457
2458 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2459}
2460
2461static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2462{
2463 struct host_def_status_block *def_sb = bp->def_status_blk;
2464 u16 rc = 0;
2465
2466 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2467 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2468 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2469 rc |= 1;
2470 }
2471 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2472 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2473 rc |= 2;
2474 }
2475 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2476 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2477 rc |= 4;
2478 }
2479 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2480 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2481 rc |= 8;
2482 }
2483 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2484 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2485 rc |= 16;
2486 }
2487 return rc;
2488}
2489
2490/*
2491 * slow path service functions
2492 */
2493
2494static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2495{
34f80b04 2496 int port = BP_PORT(bp);
5c862848
EG
2497 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2498 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2499 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2500 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2501 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2502 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2503 u32 aeu_mask;
87942b46 2504 u32 nig_mask = 0;
a2fbb9ea 2505
a2fbb9ea
ET
2506 if (bp->attn_state & asserted)
2507 BNX2X_ERR("IGU ERROR\n");
2508
3fcaf2e5
EG
2509 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2510 aeu_mask = REG_RD(bp, aeu_addr);
2511
a2fbb9ea 2512 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2513 aeu_mask, asserted);
2514 aeu_mask &= ~(asserted & 0xff);
2515 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2516
3fcaf2e5
EG
2517 REG_WR(bp, aeu_addr, aeu_mask);
2518 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2519
3fcaf2e5 2520 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2521 bp->attn_state |= asserted;
3fcaf2e5 2522 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2523
2524 if (asserted & ATTN_HARD_WIRED_MASK) {
2525 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2526
a5e9a7cf
EG
2527 bnx2x_acquire_phy_lock(bp);
2528
877e9aa4 2529 /* save nig interrupt mask */
87942b46 2530 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2531 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2532
c18487ee 2533 bnx2x_link_attn(bp);
a2fbb9ea
ET
2534
2535 /* handle unicore attn? */
2536 }
2537 if (asserted & ATTN_SW_TIMER_4_FUNC)
2538 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2539
2540 if (asserted & GPIO_2_FUNC)
2541 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2542
2543 if (asserted & GPIO_3_FUNC)
2544 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2545
2546 if (asserted & GPIO_4_FUNC)
2547 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2548
2549 if (port == 0) {
2550 if (asserted & ATTN_GENERAL_ATTN_1) {
2551 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2552 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2553 }
2554 if (asserted & ATTN_GENERAL_ATTN_2) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2557 }
2558 if (asserted & ATTN_GENERAL_ATTN_3) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2561 }
2562 } else {
2563 if (asserted & ATTN_GENERAL_ATTN_4) {
2564 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2565 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2566 }
2567 if (asserted & ATTN_GENERAL_ATTN_5) {
2568 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2569 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2570 }
2571 if (asserted & ATTN_GENERAL_ATTN_6) {
2572 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2574 }
2575 }
2576
2577 } /* if hardwired */
2578
5c862848
EG
2579 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2580 asserted, hc_addr);
2581 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2582
2583 /* now set back the mask */
a5e9a7cf 2584 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2585 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2586 bnx2x_release_phy_lock(bp);
2587 }
a2fbb9ea
ET
2588}
2589
877e9aa4 2590static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2591{
34f80b04 2592 int port = BP_PORT(bp);
877e9aa4
ET
2593 int reg_offset;
2594 u32 val;
2595
34f80b04
EG
2596 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2597 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2598
34f80b04 2599 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2600
2601 val = REG_RD(bp, reg_offset);
2602 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2603 REG_WR(bp, reg_offset, val);
2604
2605 BNX2X_ERR("SPIO5 hw attention\n");
2606
35b19ba5
EG
2607 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2608 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2609 /* Fan failure attention */
2610
17de50b7 2611 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2612 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2613 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2614 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2615 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2616 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2617 /* mark the failure */
c18487ee 2618 bp->link_params.ext_phy_config &=
877e9aa4 2619 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2620 bp->link_params.ext_phy_config |=
877e9aa4
ET
2621 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2622 SHMEM_WR(bp,
2623 dev_info.port_hw_config[port].
2624 external_phy_config,
c18487ee 2625 bp->link_params.ext_phy_config);
877e9aa4
ET
2626 /* log the failure */
2627 printk(KERN_ERR PFX "Fan Failure on Network"
2628 " Controller %s has caused the driver to"
2629 " shutdown the card to prevent permanent"
2630 " damage. Please contact Dell Support for"
2631 " assistance\n", bp->dev->name);
2632 break;
2633
2634 default:
2635 break;
2636 }
2637 }
34f80b04 2638
589abe3a
EG
2639 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2640 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2641 bnx2x_acquire_phy_lock(bp);
2642 bnx2x_handle_module_detect_int(&bp->link_params);
2643 bnx2x_release_phy_lock(bp);
2644 }
2645
34f80b04
EG
2646 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2647
2648 val = REG_RD(bp, reg_offset);
2649 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2650 REG_WR(bp, reg_offset, val);
2651
2652 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2653 (attn & HW_INTERRUT_ASSERT_SET_0));
2654 bnx2x_panic();
2655 }
877e9aa4
ET
2656}
2657
2658static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2659{
2660 u32 val;
2661
0626b899 2662 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2663
2664 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2665 BNX2X_ERR("DB hw attention 0x%x\n", val);
2666 /* DORQ discard attention */
2667 if (val & 0x2)
2668 BNX2X_ERR("FATAL error from DORQ\n");
2669 }
34f80b04
EG
2670
2671 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2672
2673 int port = BP_PORT(bp);
2674 int reg_offset;
2675
2676 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2677 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2678
2679 val = REG_RD(bp, reg_offset);
2680 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2681 REG_WR(bp, reg_offset, val);
2682
2683 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2684 (attn & HW_INTERRUT_ASSERT_SET_1));
2685 bnx2x_panic();
2686 }
877e9aa4
ET
2687}
2688
2689static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2690{
2691 u32 val;
2692
2693 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2694
2695 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2696 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2697 /* CFC error attention */
2698 if (val & 0x2)
2699 BNX2X_ERR("FATAL error from CFC\n");
2700 }
2701
2702 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2703
2704 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2705 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2706 /* RQ_USDMDP_FIFO_OVERFLOW */
2707 if (val & 0x18000)
2708 BNX2X_ERR("FATAL error from PXP\n");
2709 }
34f80b04
EG
2710
2711 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2712
2713 int port = BP_PORT(bp);
2714 int reg_offset;
2715
2716 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2717 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2718
2719 val = REG_RD(bp, reg_offset);
2720 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2721 REG_WR(bp, reg_offset, val);
2722
2723 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2724 (attn & HW_INTERRUT_ASSERT_SET_2));
2725 bnx2x_panic();
2726 }
877e9aa4
ET
2727}
2728
2729static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2730{
34f80b04
EG
2731 u32 val;
2732
877e9aa4
ET
2733 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2734
34f80b04
EG
2735 if (attn & BNX2X_PMF_LINK_ASSERT) {
2736 int func = BP_FUNC(bp);
2737
2738 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2739 bnx2x__link_status_update(bp);
2740 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2741 DRV_STATUS_PMF)
2742 bnx2x_pmf_update(bp);
2743
2744 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2745
2746 BNX2X_ERR("MC assert!\n");
2747 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2748 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2749 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2751 bnx2x_panic();
2752
2753 } else if (attn & BNX2X_MCP_ASSERT) {
2754
2755 BNX2X_ERR("MCP assert!\n");
2756 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2757 bnx2x_fw_dump(bp);
877e9aa4
ET
2758
2759 } else
2760 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2761 }
2762
2763 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2764 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2765 if (attn & BNX2X_GRC_TIMEOUT) {
2766 val = CHIP_IS_E1H(bp) ?
2767 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2768 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2769 }
2770 if (attn & BNX2X_GRC_RSV) {
2771 val = CHIP_IS_E1H(bp) ?
2772 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2773 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2774 }
877e9aa4 2775 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2776 }
2777}
2778
2779static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2780{
a2fbb9ea
ET
2781 struct attn_route attn;
2782 struct attn_route group_mask;
34f80b04 2783 int port = BP_PORT(bp);
877e9aa4 2784 int index;
a2fbb9ea
ET
2785 u32 reg_addr;
2786 u32 val;
3fcaf2e5 2787 u32 aeu_mask;
a2fbb9ea
ET
2788
2789 /* need to take HW lock because MCP or other port might also
2790 try to handle this event */
4a37fb66 2791 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2792
2793 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2794 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2795 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2796 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2797 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2798 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2799
2800 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2801 if (deasserted & (1 << index)) {
2802 group_mask = bp->attn_group[index];
2803
34f80b04
EG
2804 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2805 index, group_mask.sig[0], group_mask.sig[1],
2806 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2807
877e9aa4
ET
2808 bnx2x_attn_int_deasserted3(bp,
2809 attn.sig[3] & group_mask.sig[3]);
2810 bnx2x_attn_int_deasserted1(bp,
2811 attn.sig[1] & group_mask.sig[1]);
2812 bnx2x_attn_int_deasserted2(bp,
2813 attn.sig[2] & group_mask.sig[2]);
2814 bnx2x_attn_int_deasserted0(bp,
2815 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2816
a2fbb9ea
ET
2817 if ((attn.sig[0] & group_mask.sig[0] &
2818 HW_PRTY_ASSERT_SET_0) ||
2819 (attn.sig[1] & group_mask.sig[1] &
2820 HW_PRTY_ASSERT_SET_1) ||
2821 (attn.sig[2] & group_mask.sig[2] &
2822 HW_PRTY_ASSERT_SET_2))
6378c025 2823 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2824 }
2825 }
2826
4a37fb66 2827 bnx2x_release_alr(bp);
a2fbb9ea 2828
5c862848 2829 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2830
2831 val = ~deasserted;
3fcaf2e5
EG
2832 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2833 val, reg_addr);
5c862848 2834 REG_WR(bp, reg_addr, val);
a2fbb9ea 2835
a2fbb9ea 2836 if (~bp->attn_state & deasserted)
3fcaf2e5 2837 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2838
2839 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2840 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2841
3fcaf2e5
EG
2842 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2843 aeu_mask = REG_RD(bp, reg_addr);
2844
2845 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2846 aeu_mask, deasserted);
2847 aeu_mask |= (deasserted & 0xff);
2848 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2849
3fcaf2e5
EG
2850 REG_WR(bp, reg_addr, aeu_mask);
2851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2852
2853 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2854 bp->attn_state &= ~deasserted;
2855 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2856}
2857
2858static void bnx2x_attn_int(struct bnx2x *bp)
2859{
2860 /* read local copy of bits */
68d59484
EG
2861 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2862 attn_bits);
2863 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2864 attn_bits_ack);
a2fbb9ea
ET
2865 u32 attn_state = bp->attn_state;
2866
2867 /* look for changed bits */
2868 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2869 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2870
2871 DP(NETIF_MSG_HW,
2872 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2873 attn_bits, attn_ack, asserted, deasserted);
2874
2875 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2876 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2877
2878 /* handle bits that were raised */
2879 if (asserted)
2880 bnx2x_attn_int_asserted(bp, asserted);
2881
2882 if (deasserted)
2883 bnx2x_attn_int_deasserted(bp, deasserted);
2884}
2885
2886static void bnx2x_sp_task(struct work_struct *work)
2887{
1cf167f2 2888 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2889 u16 status;
2890
34f80b04 2891
a2fbb9ea
ET
2892 /* Return here if interrupt is disabled */
2893 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2894 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2895 return;
2896 }
2897
2898 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2899/* if (status == 0) */
2900/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2901
3196a88a 2902 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2903
877e9aa4
ET
2904 /* HW attentions */
2905 if (status & 0x1)
a2fbb9ea 2906 bnx2x_attn_int(bp);
a2fbb9ea 2907
68d59484 2908 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2909 IGU_INT_NOP, 1);
2910 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2911 IGU_INT_NOP, 1);
2912 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2913 IGU_INT_NOP, 1);
2914 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2915 IGU_INT_NOP, 1);
2916 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2917 IGU_INT_ENABLE, 1);
877e9aa4 2918
a2fbb9ea
ET
2919}
2920
2921static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2922{
2923 struct net_device *dev = dev_instance;
2924 struct bnx2x *bp = netdev_priv(dev);
2925
2926 /* Return here if interrupt is disabled */
2927 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2928 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2929 return IRQ_HANDLED;
2930 }
2931
8d9c5f34 2932 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2933
2934#ifdef BNX2X_STOP_ON_ERROR
2935 if (unlikely(bp->panic))
2936 return IRQ_HANDLED;
2937#endif
2938
1cf167f2 2939 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2940
2941 return IRQ_HANDLED;
2942}
2943
2944/* end of slow path */
2945
2946/* Statistics */
2947
2948/****************************************************************************
2949* Macros
2950****************************************************************************/
2951
a2fbb9ea
ET
2952/* sum[hi:lo] += add[hi:lo] */
2953#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2954 do { \
2955 s_lo += a_lo; \
f5ba6772 2956 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2957 } while (0)
2958
2959/* difference = minuend - subtrahend */
2960#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2961 do { \
bb2a0f7a
YG
2962 if (m_lo < s_lo) { \
2963 /* underflow */ \
a2fbb9ea 2964 d_hi = m_hi - s_hi; \
bb2a0f7a 2965 if (d_hi > 0) { \
6378c025 2966 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2967 d_hi--; \
2968 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2969 } else { \
6378c025 2970 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2971 d_hi = 0; \
2972 d_lo = 0; \
2973 } \
bb2a0f7a
YG
2974 } else { \
2975 /* m_lo >= s_lo */ \
a2fbb9ea 2976 if (m_hi < s_hi) { \
bb2a0f7a
YG
2977 d_hi = 0; \
2978 d_lo = 0; \
2979 } else { \
6378c025 2980 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2981 d_hi = m_hi - s_hi; \
2982 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2983 } \
2984 } \
2985 } while (0)
2986
bb2a0f7a 2987#define UPDATE_STAT64(s, t) \
a2fbb9ea 2988 do { \
bb2a0f7a
YG
2989 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2990 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2991 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2992 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2993 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2994 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2995 } while (0)
2996
bb2a0f7a 2997#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2998 do { \
bb2a0f7a
YG
2999 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3000 diff.lo, new->s##_lo, old->s##_lo); \
3001 ADD_64(estats->t##_hi, diff.hi, \
3002 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3003 } while (0)
3004
3005/* sum[hi:lo] += add */
3006#define ADD_EXTEND_64(s_hi, s_lo, a) \
3007 do { \
3008 s_lo += a; \
3009 s_hi += (s_lo < a) ? 1 : 0; \
3010 } while (0)
3011
bb2a0f7a 3012#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3013 do { \
bb2a0f7a
YG
3014 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3015 pstats->mac_stx[1].s##_lo, \
3016 new->s); \
a2fbb9ea
ET
3017 } while (0)
3018
bb2a0f7a 3019#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3020 do { \
4781bfad
EG
3021 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3022 old_tclient->s = tclient->s; \
de832a55
EG
3023 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3024 } while (0)
3025
3026#define UPDATE_EXTEND_USTAT(s, t) \
3027 do { \
3028 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3029 old_uclient->s = uclient->s; \
3030 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3031 } while (0)
3032
3033#define UPDATE_EXTEND_XSTAT(s, t) \
3034 do { \
4781bfad
EG
3035 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3036 old_xclient->s = xclient->s; \
de832a55
EG
3037 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3038 } while (0)
3039
3040/* minuend -= subtrahend */
3041#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3042 do { \
3043 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3044 } while (0)
3045
3046/* minuend[hi:lo] -= subtrahend */
3047#define SUB_EXTEND_64(m_hi, m_lo, s) \
3048 do { \
3049 SUB_64(m_hi, 0, m_lo, s); \
3050 } while (0)
3051
3052#define SUB_EXTEND_USTAT(s, t) \
3053 do { \
3054 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3055 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3056 } while (0)
3057
3058/*
3059 * General service functions
3060 */
3061
3062static inline long bnx2x_hilo(u32 *hiref)
3063{
3064 u32 lo = *(hiref + 1);
3065#if (BITS_PER_LONG == 64)
3066 u32 hi = *hiref;
3067
3068 return HILO_U64(hi, lo);
3069#else
3070 return lo;
3071#endif
3072}
3073
3074/*
3075 * Init service functions
3076 */
3077
bb2a0f7a
YG
3078static void bnx2x_storm_stats_post(struct bnx2x *bp)
3079{
3080 if (!bp->stats_pending) {
3081 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3082 int i, rc;
bb2a0f7a
YG
3083
3084 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3085 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3086 for_each_queue(bp, i)
3087 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3088
3089 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3090 ((u32 *)&ramrod_data)[1],
3091 ((u32 *)&ramrod_data)[0], 0);
3092 if (rc == 0) {
3093 /* stats ramrod has it's own slot on the spq */
3094 bp->spq_left++;
3095 bp->stats_pending = 1;
3096 }
3097 }
3098}
3099
3100static void bnx2x_stats_init(struct bnx2x *bp)
3101{
3102 int port = BP_PORT(bp);
de832a55 3103 int i;
bb2a0f7a 3104
de832a55 3105 bp->stats_pending = 0;
bb2a0f7a
YG
3106 bp->executer_idx = 0;
3107 bp->stats_counter = 0;
3108
3109 /* port stats */
3110 if (!BP_NOMCP(bp))
3111 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3112 else
3113 bp->port.port_stx = 0;
3114 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3115
3116 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3117 bp->port.old_nig_stats.brb_discard =
3118 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3119 bp->port.old_nig_stats.brb_truncate =
3120 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3121 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3122 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3123 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3124 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3125
3126 /* function stats */
de832a55
EG
3127 for_each_queue(bp, i) {
3128 struct bnx2x_fastpath *fp = &bp->fp[i];
3129
3130 memset(&fp->old_tclient, 0,
3131 sizeof(struct tstorm_per_client_stats));
3132 memset(&fp->old_uclient, 0,
3133 sizeof(struct ustorm_per_client_stats));
3134 memset(&fp->old_xclient, 0,
3135 sizeof(struct xstorm_per_client_stats));
3136 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3137 }
3138
bb2a0f7a 3139 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3140 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3141
3142 bp->stats_state = STATS_STATE_DISABLED;
3143 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3144 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3145}
3146
3147static void bnx2x_hw_stats_post(struct bnx2x *bp)
3148{
3149 struct dmae_command *dmae = &bp->stats_dmae;
3150 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3151
3152 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3153 if (CHIP_REV_IS_SLOW(bp))
3154 return;
bb2a0f7a
YG
3155
3156 /* loader */
3157 if (bp->executer_idx) {
3158 int loader_idx = PMF_DMAE_C(bp);
3159
3160 memset(dmae, 0, sizeof(struct dmae_command));
3161
3162 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3163 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3164 DMAE_CMD_DST_RESET |
3165#ifdef __BIG_ENDIAN
3166 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3167#else
3168 DMAE_CMD_ENDIANITY_DW_SWAP |
3169#endif
3170 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3171 DMAE_CMD_PORT_0) |
3172 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3173 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3174 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3175 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3176 sizeof(struct dmae_command) *
3177 (loader_idx + 1)) >> 2;
3178 dmae->dst_addr_hi = 0;
3179 dmae->len = sizeof(struct dmae_command) >> 2;
3180 if (CHIP_IS_E1(bp))
3181 dmae->len--;
3182 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3183 dmae->comp_addr_hi = 0;
3184 dmae->comp_val = 1;
3185
3186 *stats_comp = 0;
3187 bnx2x_post_dmae(bp, dmae, loader_idx);
3188
3189 } else if (bp->func_stx) {
3190 *stats_comp = 0;
3191 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3192 }
3193}
3194
3195static int bnx2x_stats_comp(struct bnx2x *bp)
3196{
3197 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3198 int cnt = 10;
3199
3200 might_sleep();
3201 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3202 if (!cnt) {
3203 BNX2X_ERR("timeout waiting for stats finished\n");
3204 break;
3205 }
3206 cnt--;
12469401 3207 msleep(1);
bb2a0f7a
YG
3208 }
3209 return 1;
3210}
3211
3212/*
3213 * Statistics service functions
3214 */
3215
3216static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3217{
3218 struct dmae_command *dmae;
3219 u32 opcode;
3220 int loader_idx = PMF_DMAE_C(bp);
3221 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3222
3223 /* sanity */
3224 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3225 BNX2X_ERR("BUG!\n");
3226 return;
3227 }
3228
3229 bp->executer_idx = 0;
3230
3231 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232 DMAE_CMD_C_ENABLE |
3233 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3234#ifdef __BIG_ENDIAN
3235 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3236#else
3237 DMAE_CMD_ENDIANITY_DW_SWAP |
3238#endif
3239 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3241
3242 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3243 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3244 dmae->src_addr_lo = bp->port.port_stx >> 2;
3245 dmae->src_addr_hi = 0;
3246 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3247 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3248 dmae->len = DMAE_LEN32_RD_MAX;
3249 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3250 dmae->comp_addr_hi = 0;
3251 dmae->comp_val = 1;
3252
3253 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3254 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3255 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3256 dmae->src_addr_hi = 0;
7a9b2557
VZ
3257 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3258 DMAE_LEN32_RD_MAX * 4);
3259 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3260 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3261 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3262 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3263 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3264 dmae->comp_val = DMAE_COMP_VAL;
3265
3266 *stats_comp = 0;
3267 bnx2x_hw_stats_post(bp);
3268 bnx2x_stats_comp(bp);
3269}
3270
3271static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3272{
3273 struct dmae_command *dmae;
34f80b04 3274 int port = BP_PORT(bp);
bb2a0f7a 3275 int vn = BP_E1HVN(bp);
a2fbb9ea 3276 u32 opcode;
bb2a0f7a 3277 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3278 u32 mac_addr;
bb2a0f7a
YG
3279 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3280
3281 /* sanity */
3282 if (!bp->link_vars.link_up || !bp->port.pmf) {
3283 BNX2X_ERR("BUG!\n");
3284 return;
3285 }
a2fbb9ea
ET
3286
3287 bp->executer_idx = 0;
bb2a0f7a
YG
3288
3289 /* MCP */
3290 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3291 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3292 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3293#ifdef __BIG_ENDIAN
bb2a0f7a 3294 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3295#else
bb2a0f7a 3296 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3297#endif
bb2a0f7a
YG
3298 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3299 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3300
bb2a0f7a 3301 if (bp->port.port_stx) {
a2fbb9ea
ET
3302
3303 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3304 dmae->opcode = opcode;
bb2a0f7a
YG
3305 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3306 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3307 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3308 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3309 dmae->len = sizeof(struct host_port_stats) >> 2;
3310 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3311 dmae->comp_addr_hi = 0;
3312 dmae->comp_val = 1;
a2fbb9ea
ET
3313 }
3314
bb2a0f7a
YG
3315 if (bp->func_stx) {
3316
3317 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3318 dmae->opcode = opcode;
3319 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3320 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3321 dmae->dst_addr_lo = bp->func_stx >> 2;
3322 dmae->dst_addr_hi = 0;
3323 dmae->len = sizeof(struct host_func_stats) >> 2;
3324 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3325 dmae->comp_addr_hi = 0;
3326 dmae->comp_val = 1;
a2fbb9ea
ET
3327 }
3328
bb2a0f7a 3329 /* MAC */
a2fbb9ea
ET
3330 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3331 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3332 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3333#ifdef __BIG_ENDIAN
3334 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3335#else
3336 DMAE_CMD_ENDIANITY_DW_SWAP |
3337#endif
bb2a0f7a
YG
3338 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3339 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3340
c18487ee 3341 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3342
3343 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3344 NIG_REG_INGRESS_BMAC0_MEM);
3345
3346 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3347 BIGMAC_REGISTER_TX_STAT_GTBYT */
3348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349 dmae->opcode = opcode;
3350 dmae->src_addr_lo = (mac_addr +
3351 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3352 dmae->src_addr_hi = 0;
3353 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3354 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3355 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3356 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3357 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3358 dmae->comp_addr_hi = 0;
3359 dmae->comp_val = 1;
3360
3361 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3362 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3363 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3364 dmae->opcode = opcode;
3365 dmae->src_addr_lo = (mac_addr +
3366 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3367 dmae->src_addr_hi = 0;
3368 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3369 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3370 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3371 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3372 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3373 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3374 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3375 dmae->comp_addr_hi = 0;
3376 dmae->comp_val = 1;
3377
c18487ee 3378 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3379
3380 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3381
3382 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3383 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3384 dmae->opcode = opcode;
3385 dmae->src_addr_lo = (mac_addr +
3386 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3387 dmae->src_addr_hi = 0;
3388 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3390 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392 dmae->comp_addr_hi = 0;
3393 dmae->comp_val = 1;
3394
3395 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3396 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3397 dmae->opcode = opcode;
3398 dmae->src_addr_lo = (mac_addr +
3399 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3400 dmae->src_addr_hi = 0;
3401 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3402 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3403 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3404 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3405 dmae->len = 1;
3406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3407 dmae->comp_addr_hi = 0;
3408 dmae->comp_val = 1;
3409
3410 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3411 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3412 dmae->opcode = opcode;
3413 dmae->src_addr_lo = (mac_addr +
3414 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3415 dmae->src_addr_hi = 0;
3416 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3417 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3418 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3419 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3420 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3421 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3422 dmae->comp_addr_hi = 0;
3423 dmae->comp_val = 1;
3424 }
3425
3426 /* NIG */
bb2a0f7a
YG
3427 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3428 dmae->opcode = opcode;
3429 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3430 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3431 dmae->src_addr_hi = 0;
3432 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3433 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3434 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3435 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3436 dmae->comp_addr_hi = 0;
3437 dmae->comp_val = 1;
3438
3439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440 dmae->opcode = opcode;
3441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3442 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3443 dmae->src_addr_hi = 0;
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3445 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3446 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3447 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3448 dmae->len = (2*sizeof(u32)) >> 2;
3449 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3450 dmae->comp_addr_hi = 0;
3451 dmae->comp_val = 1;
3452
a2fbb9ea
ET
3453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3455 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3456 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3457#ifdef __BIG_ENDIAN
3458 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3459#else
3460 DMAE_CMD_ENDIANITY_DW_SWAP |
3461#endif
bb2a0f7a
YG
3462 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3463 (vn << DMAE_CMD_E1HVN_SHIFT));
3464 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3465 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3466 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3467 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3468 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3469 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3470 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3471 dmae->len = (2*sizeof(u32)) >> 2;
3472 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3473 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3474 dmae->comp_val = DMAE_COMP_VAL;
3475
3476 *stats_comp = 0;
a2fbb9ea
ET
3477}
3478
bb2a0f7a 3479static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3480{
bb2a0f7a
YG
3481 struct dmae_command *dmae = &bp->stats_dmae;
3482 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3483
bb2a0f7a
YG
3484 /* sanity */
3485 if (!bp->func_stx) {
3486 BNX2X_ERR("BUG!\n");
3487 return;
3488 }
a2fbb9ea 3489
bb2a0f7a
YG
3490 bp->executer_idx = 0;
3491 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3492
bb2a0f7a
YG
3493 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3494 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3495 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3496#ifdef __BIG_ENDIAN
3497 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3498#else
3499 DMAE_CMD_ENDIANITY_DW_SWAP |
3500#endif
3501 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3502 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3503 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3504 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3505 dmae->dst_addr_lo = bp->func_stx >> 2;
3506 dmae->dst_addr_hi = 0;
3507 dmae->len = sizeof(struct host_func_stats) >> 2;
3508 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3509 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3510 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3511
bb2a0f7a
YG
3512 *stats_comp = 0;
3513}
a2fbb9ea 3514
bb2a0f7a
YG
3515static void bnx2x_stats_start(struct bnx2x *bp)
3516{
3517 if (bp->port.pmf)
3518 bnx2x_port_stats_init(bp);
3519
3520 else if (bp->func_stx)
3521 bnx2x_func_stats_init(bp);
3522
3523 bnx2x_hw_stats_post(bp);
3524 bnx2x_storm_stats_post(bp);
3525}
3526
3527static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3528{
3529 bnx2x_stats_comp(bp);
3530 bnx2x_stats_pmf_update(bp);
3531 bnx2x_stats_start(bp);
3532}
3533
3534static void bnx2x_stats_restart(struct bnx2x *bp)
3535{
3536 bnx2x_stats_comp(bp);
3537 bnx2x_stats_start(bp);
3538}
3539
3540static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3541{
3542 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3543 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3544 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3545 struct {
3546 u32 lo;
3547 u32 hi;
3548 } diff;
bb2a0f7a
YG
3549
3550 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3551 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3552 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3553 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3554 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3555 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3556 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3557 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3558 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3559 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3560 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3561 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3562 UPDATE_STAT64(tx_stat_gt127,
3563 tx_stat_etherstatspkts65octetsto127octets);
3564 UPDATE_STAT64(tx_stat_gt255,
3565 tx_stat_etherstatspkts128octetsto255octets);
3566 UPDATE_STAT64(tx_stat_gt511,
3567 tx_stat_etherstatspkts256octetsto511octets);
3568 UPDATE_STAT64(tx_stat_gt1023,
3569 tx_stat_etherstatspkts512octetsto1023octets);
3570 UPDATE_STAT64(tx_stat_gt1518,
3571 tx_stat_etherstatspkts1024octetsto1522octets);
3572 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3573 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3574 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3575 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3576 UPDATE_STAT64(tx_stat_gterr,
3577 tx_stat_dot3statsinternalmactransmiterrors);
3578 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3579
3580 estats->pause_frames_received_hi =
3581 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3582 estats->pause_frames_received_lo =
3583 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3584
3585 estats->pause_frames_sent_hi =
3586 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3587 estats->pause_frames_sent_lo =
3588 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3589}
3590
3591static void bnx2x_emac_stats_update(struct bnx2x *bp)
3592{
3593 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3594 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3595 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3596
3597 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3598 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3599 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3600 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3601 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3602 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3603 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3605 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3606 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3607 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3608 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3609 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3610 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3611 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3612 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3613 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3614 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3615 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3616 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3617 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3618 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3620 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3621 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3622 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3623 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3628
3629 estats->pause_frames_received_hi =
3630 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3631 estats->pause_frames_received_lo =
3632 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3633 ADD_64(estats->pause_frames_received_hi,
3634 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3635 estats->pause_frames_received_lo,
3636 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3637
3638 estats->pause_frames_sent_hi =
3639 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3640 estats->pause_frames_sent_lo =
3641 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3642 ADD_64(estats->pause_frames_sent_hi,
3643 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3644 estats->pause_frames_sent_lo,
3645 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3646}
3647
3648static int bnx2x_hw_stats_update(struct bnx2x *bp)
3649{
3650 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3651 struct nig_stats *old = &(bp->port.old_nig_stats);
3652 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3653 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3654 struct {
3655 u32 lo;
3656 u32 hi;
3657 } diff;
de832a55 3658 u32 nig_timer_max;
bb2a0f7a
YG
3659
3660 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3661 bnx2x_bmac_stats_update(bp);
3662
3663 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3664 bnx2x_emac_stats_update(bp);
3665
3666 else { /* unreached */
3667 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3668 return -1;
3669 }
a2fbb9ea 3670
bb2a0f7a
YG
3671 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3672 new->brb_discard - old->brb_discard);
66e855f3
YG
3673 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3674 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3675
bb2a0f7a
YG
3676 UPDATE_STAT64_NIG(egress_mac_pkt0,
3677 etherstatspkts1024octetsto1522octets);
3678 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3679
bb2a0f7a 3680 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3681
bb2a0f7a
YG
3682 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3683 sizeof(struct mac_stx));
3684 estats->brb_drop_hi = pstats->brb_drop_hi;
3685 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3686
bb2a0f7a 3687 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3688
de832a55
EG
3689 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3690 if (nig_timer_max != estats->nig_timer_max) {
3691 estats->nig_timer_max = nig_timer_max;
3692 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3693 }
3694
bb2a0f7a 3695 return 0;
a2fbb9ea
ET
3696}
3697
bb2a0f7a 3698static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3699{
3700 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3701 struct tstorm_per_port_stats *tport =
de832a55 3702 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3703 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3704 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3705 int i;
3706
3707 memset(&(fstats->total_bytes_received_hi), 0,
3708 sizeof(struct host_func_stats) - 2*sizeof(u32));
3709 estats->error_bytes_received_hi = 0;
3710 estats->error_bytes_received_lo = 0;
3711 estats->etherstatsoverrsizepkts_hi = 0;
3712 estats->etherstatsoverrsizepkts_lo = 0;
3713 estats->no_buff_discard_hi = 0;
3714 estats->no_buff_discard_lo = 0;
a2fbb9ea 3715
de832a55
EG
3716 for_each_queue(bp, i) {
3717 struct bnx2x_fastpath *fp = &bp->fp[i];
3718 int cl_id = fp->cl_id;
3719 struct tstorm_per_client_stats *tclient =
3720 &stats->tstorm_common.client_statistics[cl_id];
3721 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3722 struct ustorm_per_client_stats *uclient =
3723 &stats->ustorm_common.client_statistics[cl_id];
3724 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3725 struct xstorm_per_client_stats *xclient =
3726 &stats->xstorm_common.client_statistics[cl_id];
3727 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3728 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3729 u32 diff;
3730
3731 /* are storm stats valid? */
3732 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3733 bp->stats_counter) {
de832a55
EG
3734 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3735 " xstorm counter (%d) != stats_counter (%d)\n",
3736 i, xclient->stats_counter, bp->stats_counter);
3737 return -1;
3738 }
3739 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3740 bp->stats_counter) {
de832a55
EG
3741 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3742 " tstorm counter (%d) != stats_counter (%d)\n",
3743 i, tclient->stats_counter, bp->stats_counter);
3744 return -2;
3745 }
3746 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3747 bp->stats_counter) {
3748 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3749 " ustorm counter (%d) != stats_counter (%d)\n",
3750 i, uclient->stats_counter, bp->stats_counter);
3751 return -4;
3752 }
a2fbb9ea 3753
de832a55
EG
3754 qstats->total_bytes_received_hi =
3755 qstats->valid_bytes_received_hi =
a2fbb9ea 3756 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3757 qstats->total_bytes_received_lo =
3758 qstats->valid_bytes_received_lo =
a2fbb9ea 3759 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3760
de832a55 3761 qstats->error_bytes_received_hi =
bb2a0f7a 3762 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3763 qstats->error_bytes_received_lo =
bb2a0f7a 3764 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3765
de832a55
EG
3766 ADD_64(qstats->total_bytes_received_hi,
3767 qstats->error_bytes_received_hi,
3768 qstats->total_bytes_received_lo,
3769 qstats->error_bytes_received_lo);
3770
3771 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3772 total_unicast_packets_received);
3773 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3774 total_multicast_packets_received);
3775 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3776 total_broadcast_packets_received);
3777 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3778 etherstatsoverrsizepkts);
3779 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3780
3781 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3782 total_unicast_packets_received);
3783 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3784 total_multicast_packets_received);
3785 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3786 total_broadcast_packets_received);
3787 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3788 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3789 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3790
3791 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3792 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3793 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3794 le32_to_cpu(xclient->total_sent_bytes.lo);
3795
de832a55
EG
3796 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3797 total_unicast_packets_transmitted);
3798 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3799 total_multicast_packets_transmitted);
3800 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3801 total_broadcast_packets_transmitted);
3802
3803 old_tclient->checksum_discard = tclient->checksum_discard;
3804 old_tclient->ttl0_discard = tclient->ttl0_discard;
3805
3806 ADD_64(fstats->total_bytes_received_hi,
3807 qstats->total_bytes_received_hi,
3808 fstats->total_bytes_received_lo,
3809 qstats->total_bytes_received_lo);
3810 ADD_64(fstats->total_bytes_transmitted_hi,
3811 qstats->total_bytes_transmitted_hi,
3812 fstats->total_bytes_transmitted_lo,
3813 qstats->total_bytes_transmitted_lo);
3814 ADD_64(fstats->total_unicast_packets_received_hi,
3815 qstats->total_unicast_packets_received_hi,
3816 fstats->total_unicast_packets_received_lo,
3817 qstats->total_unicast_packets_received_lo);
3818 ADD_64(fstats->total_multicast_packets_received_hi,
3819 qstats->total_multicast_packets_received_hi,
3820 fstats->total_multicast_packets_received_lo,
3821 qstats->total_multicast_packets_received_lo);
3822 ADD_64(fstats->total_broadcast_packets_received_hi,
3823 qstats->total_broadcast_packets_received_hi,
3824 fstats->total_broadcast_packets_received_lo,
3825 qstats->total_broadcast_packets_received_lo);
3826 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3827 qstats->total_unicast_packets_transmitted_hi,
3828 fstats->total_unicast_packets_transmitted_lo,
3829 qstats->total_unicast_packets_transmitted_lo);
3830 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3831 qstats->total_multicast_packets_transmitted_hi,
3832 fstats->total_multicast_packets_transmitted_lo,
3833 qstats->total_multicast_packets_transmitted_lo);
3834 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3835 qstats->total_broadcast_packets_transmitted_hi,
3836 fstats->total_broadcast_packets_transmitted_lo,
3837 qstats->total_broadcast_packets_transmitted_lo);
3838 ADD_64(fstats->valid_bytes_received_hi,
3839 qstats->valid_bytes_received_hi,
3840 fstats->valid_bytes_received_lo,
3841 qstats->valid_bytes_received_lo);
3842
3843 ADD_64(estats->error_bytes_received_hi,
3844 qstats->error_bytes_received_hi,
3845 estats->error_bytes_received_lo,
3846 qstats->error_bytes_received_lo);
3847 ADD_64(estats->etherstatsoverrsizepkts_hi,
3848 qstats->etherstatsoverrsizepkts_hi,
3849 estats->etherstatsoverrsizepkts_lo,
3850 qstats->etherstatsoverrsizepkts_lo);
3851 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3852 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3853 }
3854
3855 ADD_64(fstats->total_bytes_received_hi,
3856 estats->rx_stat_ifhcinbadoctets_hi,
3857 fstats->total_bytes_received_lo,
3858 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3859
3860 memcpy(estats, &(fstats->total_bytes_received_hi),
3861 sizeof(struct host_func_stats) - 2*sizeof(u32));
3862
de832a55
EG
3863 ADD_64(estats->etherstatsoverrsizepkts_hi,
3864 estats->rx_stat_dot3statsframestoolong_hi,
3865 estats->etherstatsoverrsizepkts_lo,
3866 estats->rx_stat_dot3statsframestoolong_lo);
3867 ADD_64(estats->error_bytes_received_hi,
3868 estats->rx_stat_ifhcinbadoctets_hi,
3869 estats->error_bytes_received_lo,
3870 estats->rx_stat_ifhcinbadoctets_lo);
3871
3872 if (bp->port.pmf) {
3873 estats->mac_filter_discard =
3874 le32_to_cpu(tport->mac_filter_discard);
3875 estats->xxoverflow_discard =
3876 le32_to_cpu(tport->xxoverflow_discard);
3877 estats->brb_truncate_discard =
bb2a0f7a 3878 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3879 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3880 }
bb2a0f7a
YG
3881
3882 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3883
de832a55
EG
3884 bp->stats_pending = 0;
3885
a2fbb9ea
ET
3886 return 0;
3887}
3888
bb2a0f7a 3889static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3890{
bb2a0f7a 3891 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3892 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3893 int i;
a2fbb9ea
ET
3894
3895 nstats->rx_packets =
3896 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3897 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3898 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3899
3900 nstats->tx_packets =
3901 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3902 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3903 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3904
de832a55 3905 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3906
0e39e645 3907 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3908
de832a55
EG
3909 nstats->rx_dropped = estats->mac_discard;
3910 for_each_queue(bp, i)
3911 nstats->rx_dropped +=
3912 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3913
a2fbb9ea
ET
3914 nstats->tx_dropped = 0;
3915
3916 nstats->multicast =
de832a55 3917 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3918
bb2a0f7a 3919 nstats->collisions =
de832a55 3920 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3921
3922 nstats->rx_length_errors =
de832a55
EG
3923 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3924 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3925 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3926 bnx2x_hilo(&estats->brb_truncate_hi);
3927 nstats->rx_crc_errors =
3928 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3929 nstats->rx_frame_errors =
3930 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3931 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3932 nstats->rx_missed_errors = estats->xxoverflow_discard;
3933
3934 nstats->rx_errors = nstats->rx_length_errors +
3935 nstats->rx_over_errors +
3936 nstats->rx_crc_errors +
3937 nstats->rx_frame_errors +
0e39e645
ET
3938 nstats->rx_fifo_errors +
3939 nstats->rx_missed_errors;
a2fbb9ea 3940
bb2a0f7a 3941 nstats->tx_aborted_errors =
de832a55
EG
3942 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3943 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3944 nstats->tx_carrier_errors =
3945 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3946 nstats->tx_fifo_errors = 0;
3947 nstats->tx_heartbeat_errors = 0;
3948 nstats->tx_window_errors = 0;
3949
3950 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3951 nstats->tx_carrier_errors +
3952 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3953}
3954
3955static void bnx2x_drv_stats_update(struct bnx2x *bp)
3956{
3957 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3958 int i;
3959
3960 estats->driver_xoff = 0;
3961 estats->rx_err_discard_pkt = 0;
3962 estats->rx_skb_alloc_failed = 0;
3963 estats->hw_csum_err = 0;
3964 for_each_queue(bp, i) {
3965 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3966
3967 estats->driver_xoff += qstats->driver_xoff;
3968 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3969 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3970 estats->hw_csum_err += qstats->hw_csum_err;
3971 }
a2fbb9ea
ET
3972}
3973
bb2a0f7a 3974static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3975{
bb2a0f7a 3976 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3977
bb2a0f7a
YG
3978 if (*stats_comp != DMAE_COMP_VAL)
3979 return;
3980
3981 if (bp->port.pmf)
de832a55 3982 bnx2x_hw_stats_update(bp);
a2fbb9ea 3983
de832a55
EG
3984 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3985 BNX2X_ERR("storm stats were not updated for 3 times\n");
3986 bnx2x_panic();
3987 return;
a2fbb9ea
ET
3988 }
3989
de832a55
EG
3990 bnx2x_net_stats_update(bp);
3991 bnx2x_drv_stats_update(bp);
3992
a2fbb9ea 3993 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3994 struct tstorm_per_client_stats *old_tclient =
3995 &bp->fp->old_tclient;
3996 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3997 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3998 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3999 int i;
a2fbb9ea
ET
4000
4001 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4002 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4003 " tx pkt (%lx)\n",
4004 bnx2x_tx_avail(bp->fp),
7a9b2557 4005 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4006 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4007 " rx pkt (%lx)\n",
7a9b2557
VZ
4008 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4009 bp->fp->rx_comp_cons),
4010 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4011 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4012 "brb truncate %u\n",
4013 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4014 qstats->driver_xoff,
4015 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4016 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4017 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4018 "mac_discard %u mac_filter_discard %u "
4019 "xxovrflow_discard %u brb_truncate_discard %u "
4020 "ttl0_discard %u\n",
4781bfad 4021 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4022 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4023 bnx2x_hilo(&qstats->no_buff_discard_hi),
4024 estats->mac_discard, estats->mac_filter_discard,
4025 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4026 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4027
4028 for_each_queue(bp, i) {
4029 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4030 bnx2x_fp(bp, i, tx_pkt),
4031 bnx2x_fp(bp, i, rx_pkt),
4032 bnx2x_fp(bp, i, rx_calls));
4033 }
4034 }
4035
bb2a0f7a
YG
4036 bnx2x_hw_stats_post(bp);
4037 bnx2x_storm_stats_post(bp);
4038}
a2fbb9ea 4039
bb2a0f7a
YG
4040static void bnx2x_port_stats_stop(struct bnx2x *bp)
4041{
4042 struct dmae_command *dmae;
4043 u32 opcode;
4044 int loader_idx = PMF_DMAE_C(bp);
4045 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4046
bb2a0f7a 4047 bp->executer_idx = 0;
a2fbb9ea 4048
bb2a0f7a
YG
4049 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4050 DMAE_CMD_C_ENABLE |
4051 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4052#ifdef __BIG_ENDIAN
bb2a0f7a 4053 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4054#else
bb2a0f7a 4055 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4056#endif
bb2a0f7a
YG
4057 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4058 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4059
4060 if (bp->port.port_stx) {
4061
4062 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4063 if (bp->func_stx)
4064 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4065 else
4066 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4067 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4068 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4069 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4070 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4071 dmae->len = sizeof(struct host_port_stats) >> 2;
4072 if (bp->func_stx) {
4073 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4074 dmae->comp_addr_hi = 0;
4075 dmae->comp_val = 1;
4076 } else {
4077 dmae->comp_addr_lo =
4078 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4079 dmae->comp_addr_hi =
4080 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4081 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4082
bb2a0f7a
YG
4083 *stats_comp = 0;
4084 }
a2fbb9ea
ET
4085 }
4086
bb2a0f7a
YG
4087 if (bp->func_stx) {
4088
4089 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4090 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4091 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4092 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4093 dmae->dst_addr_lo = bp->func_stx >> 2;
4094 dmae->dst_addr_hi = 0;
4095 dmae->len = sizeof(struct host_func_stats) >> 2;
4096 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4097 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4098 dmae->comp_val = DMAE_COMP_VAL;
4099
4100 *stats_comp = 0;
a2fbb9ea 4101 }
bb2a0f7a
YG
4102}
4103
4104static void bnx2x_stats_stop(struct bnx2x *bp)
4105{
4106 int update = 0;
4107
4108 bnx2x_stats_comp(bp);
4109
4110 if (bp->port.pmf)
4111 update = (bnx2x_hw_stats_update(bp) == 0);
4112
4113 update |= (bnx2x_storm_stats_update(bp) == 0);
4114
4115 if (update) {
4116 bnx2x_net_stats_update(bp);
a2fbb9ea 4117
bb2a0f7a
YG
4118 if (bp->port.pmf)
4119 bnx2x_port_stats_stop(bp);
4120
4121 bnx2x_hw_stats_post(bp);
4122 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4123 }
4124}
4125
bb2a0f7a
YG
4126static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4127{
4128}
4129
4130static const struct {
4131 void (*action)(struct bnx2x *bp);
4132 enum bnx2x_stats_state next_state;
4133} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4134/* state event */
4135{
4136/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4137/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4138/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4139/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4140},
4141{
4142/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4143/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4144/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4145/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4146}
4147};
4148
4149static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4150{
4151 enum bnx2x_stats_state state = bp->stats_state;
4152
4153 bnx2x_stats_stm[state][event].action(bp);
4154 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4155
4156 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4157 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4158 state, event, bp->stats_state);
4159}
4160
a2fbb9ea
ET
4161static void bnx2x_timer(unsigned long data)
4162{
4163 struct bnx2x *bp = (struct bnx2x *) data;
4164
4165 if (!netif_running(bp->dev))
4166 return;
4167
4168 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4169 goto timer_restart;
a2fbb9ea
ET
4170
4171 if (poll) {
4172 struct bnx2x_fastpath *fp = &bp->fp[0];
4173 int rc;
4174
4175 bnx2x_tx_int(fp, 1000);
4176 rc = bnx2x_rx_int(fp, 1000);
4177 }
4178
34f80b04
EG
4179 if (!BP_NOMCP(bp)) {
4180 int func = BP_FUNC(bp);
a2fbb9ea
ET
4181 u32 drv_pulse;
4182 u32 mcp_pulse;
4183
4184 ++bp->fw_drv_pulse_wr_seq;
4185 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4186 /* TBD - add SYSTEM_TIME */
4187 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4188 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4189
34f80b04 4190 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4191 MCP_PULSE_SEQ_MASK);
4192 /* The delta between driver pulse and mcp response
4193 * should be 1 (before mcp response) or 0 (after mcp response)
4194 */
4195 if ((drv_pulse != mcp_pulse) &&
4196 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4197 /* someone lost a heartbeat... */
4198 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4199 drv_pulse, mcp_pulse);
4200 }
4201 }
4202
bb2a0f7a
YG
4203 if ((bp->state == BNX2X_STATE_OPEN) ||
4204 (bp->state == BNX2X_STATE_DISABLED))
4205 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4206
f1410647 4207timer_restart:
a2fbb9ea
ET
4208 mod_timer(&bp->timer, jiffies + bp->current_interval);
4209}
4210
4211/* end of Statistics */
4212
4213/* nic init */
4214
4215/*
4216 * nic init service functions
4217 */
4218
34f80b04 4219static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4220{
34f80b04
EG
4221 int port = BP_PORT(bp);
4222
4223 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4224 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4225 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4226 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4227 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4228 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4229}
4230
5c862848
EG
4231static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4232 dma_addr_t mapping, int sb_id)
34f80b04
EG
4233{
4234 int port = BP_PORT(bp);
bb2a0f7a 4235 int func = BP_FUNC(bp);
a2fbb9ea 4236 int index;
34f80b04 4237 u64 section;
a2fbb9ea
ET
4238
4239 /* USTORM */
4240 section = ((u64)mapping) + offsetof(struct host_status_block,
4241 u_status_block);
34f80b04 4242 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4243
4244 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4245 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4246 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4247 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4248 U64_HI(section));
bb2a0f7a
YG
4249 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4250 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4251
4252 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4253 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4254 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4255
4256 /* CSTORM */
4257 section = ((u64)mapping) + offsetof(struct host_status_block,
4258 c_status_block);
34f80b04 4259 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4260
4261 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4262 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4263 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4264 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4265 U64_HI(section));
7a9b2557
VZ
4266 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4267 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4268
4269 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4270 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4271 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4272
4273 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4274}
4275
4276static void bnx2x_zero_def_sb(struct bnx2x *bp)
4277{
4278 int func = BP_FUNC(bp);
a2fbb9ea 4279
34f80b04
EG
4280 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4281 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4282 sizeof(struct ustorm_def_status_block)/4);
4283 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4284 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4285 sizeof(struct cstorm_def_status_block)/4);
4286 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4287 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4288 sizeof(struct xstorm_def_status_block)/4);
4289 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4290 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4291 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4292}
4293
4294static void bnx2x_init_def_sb(struct bnx2x *bp,
4295 struct host_def_status_block *def_sb,
34f80b04 4296 dma_addr_t mapping, int sb_id)
a2fbb9ea 4297{
34f80b04
EG
4298 int port = BP_PORT(bp);
4299 int func = BP_FUNC(bp);
a2fbb9ea
ET
4300 int index, val, reg_offset;
4301 u64 section;
4302
4303 /* ATTN */
4304 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4305 atten_status_block);
34f80b04 4306 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4307
49d66772
ET
4308 bp->attn_state = 0;
4309
a2fbb9ea
ET
4310 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4311 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4312
34f80b04 4313 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4314 bp->attn_group[index].sig[0] = REG_RD(bp,
4315 reg_offset + 0x10*index);
4316 bp->attn_group[index].sig[1] = REG_RD(bp,
4317 reg_offset + 0x4 + 0x10*index);
4318 bp->attn_group[index].sig[2] = REG_RD(bp,
4319 reg_offset + 0x8 + 0x10*index);
4320 bp->attn_group[index].sig[3] = REG_RD(bp,
4321 reg_offset + 0xc + 0x10*index);
4322 }
4323
a2fbb9ea
ET
4324 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4325 HC_REG_ATTN_MSG0_ADDR_L);
4326
4327 REG_WR(bp, reg_offset, U64_LO(section));
4328 REG_WR(bp, reg_offset + 4, U64_HI(section));
4329
4330 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4331
4332 val = REG_RD(bp, reg_offset);
34f80b04 4333 val |= sb_id;
a2fbb9ea
ET
4334 REG_WR(bp, reg_offset, val);
4335
4336 /* USTORM */
4337 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4338 u_def_status_block);
34f80b04 4339 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4340
4341 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4342 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4343 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4344 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4345 U64_HI(section));
5c862848 4346 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4347 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4348
4349 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4350 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4351 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4352
4353 /* CSTORM */
4354 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4355 c_def_status_block);
34f80b04 4356 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4357
4358 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4359 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4360 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4361 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4362 U64_HI(section));
5c862848 4363 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4364 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4365
4366 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4367 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4368 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4369
4370 /* TSTORM */
4371 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4372 t_def_status_block);
34f80b04 4373 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4374
4375 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4376 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4377 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4378 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4379 U64_HI(section));
5c862848 4380 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4381 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4382
4383 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4384 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4385 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4386
4387 /* XSTORM */
4388 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4389 x_def_status_block);
34f80b04 4390 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4391
4392 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4393 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4394 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4395 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4396 U64_HI(section));
5c862848 4397 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4398 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4399
4400 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4401 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4402 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4403
bb2a0f7a 4404 bp->stats_pending = 0;
66e855f3 4405 bp->set_mac_pending = 0;
bb2a0f7a 4406
34f80b04 4407 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4408}
4409
4410static void bnx2x_update_coalesce(struct bnx2x *bp)
4411{
34f80b04 4412 int port = BP_PORT(bp);
a2fbb9ea
ET
4413 int i;
4414
4415 for_each_queue(bp, i) {
34f80b04 4416 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4417
4418 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4419 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4420 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4421 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4422 bp->rx_ticks/12);
a2fbb9ea 4423 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4424 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4425 U_SB_ETH_RX_CQ_INDEX),
4426 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4427
4428 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4429 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4430 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4431 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4432 bp->tx_ticks/12);
a2fbb9ea 4433 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4434 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4435 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4436 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4437 }
4438}
4439
7a9b2557
VZ
4440static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4441 struct bnx2x_fastpath *fp, int last)
4442{
4443 int i;
4444
4445 for (i = 0; i < last; i++) {
4446 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4447 struct sk_buff *skb = rx_buf->skb;
4448
4449 if (skb == NULL) {
4450 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4451 continue;
4452 }
4453
4454 if (fp->tpa_state[i] == BNX2X_TPA_START)
4455 pci_unmap_single(bp->pdev,
4456 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4457 bp->rx_buf_size,
7a9b2557
VZ
4458 PCI_DMA_FROMDEVICE);
4459
4460 dev_kfree_skb(skb);
4461 rx_buf->skb = NULL;
4462 }
4463}
4464
a2fbb9ea
ET
4465static void bnx2x_init_rx_rings(struct bnx2x *bp)
4466{
7a9b2557 4467 int func = BP_FUNC(bp);
32626230
EG
4468 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4469 ETH_MAX_AGGREGATION_QUEUES_E1H;
4470 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4471 int i, j;
a2fbb9ea 4472
87942b46 4473 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4474 DP(NETIF_MSG_IFUP,
4475 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4476
7a9b2557 4477 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4478
555f6c78 4479 for_each_rx_queue(bp, j) {
32626230 4480 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4481
32626230 4482 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4483 fp->tpa_pool[i].skb =
4484 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4485 if (!fp->tpa_pool[i].skb) {
4486 BNX2X_ERR("Failed to allocate TPA "
4487 "skb pool for queue[%d] - "
4488 "disabling TPA on this "
4489 "queue!\n", j);
4490 bnx2x_free_tpa_pool(bp, fp, i);
4491 fp->disable_tpa = 1;
4492 break;
4493 }
4494 pci_unmap_addr_set((struct sw_rx_bd *)
4495 &bp->fp->tpa_pool[i],
4496 mapping, 0);
4497 fp->tpa_state[i] = BNX2X_TPA_STOP;
4498 }
4499 }
4500 }
4501
555f6c78 4502 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4503 struct bnx2x_fastpath *fp = &bp->fp[j];
4504
4505 fp->rx_bd_cons = 0;
4506 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4507 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4508
4509 /* "next page" elements initialization */
4510 /* SGE ring */
4511 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4512 struct eth_rx_sge *sge;
4513
4514 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4515 sge->addr_hi =
4516 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4517 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4518 sge->addr_lo =
4519 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4520 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4521 }
4522
4523 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4524
7a9b2557 4525 /* RX BD ring */
a2fbb9ea
ET
4526 for (i = 1; i <= NUM_RX_RINGS; i++) {
4527 struct eth_rx_bd *rx_bd;
4528
4529 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4530 rx_bd->addr_hi =
4531 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4532 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4533 rx_bd->addr_lo =
4534 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4535 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4536 }
4537
34f80b04 4538 /* CQ ring */
a2fbb9ea
ET
4539 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4540 struct eth_rx_cqe_next_page *nextpg;
4541
4542 nextpg = (struct eth_rx_cqe_next_page *)
4543 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4544 nextpg->addr_hi =
4545 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4546 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4547 nextpg->addr_lo =
4548 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4549 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4550 }
4551
7a9b2557
VZ
4552 /* Allocate SGEs and initialize the ring elements */
4553 for (i = 0, ring_prod = 0;
4554 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4555
7a9b2557
VZ
4556 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4557 BNX2X_ERR("was only able to allocate "
4558 "%d rx sges\n", i);
4559 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4560 /* Cleanup already allocated elements */
4561 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4562 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4563 fp->disable_tpa = 1;
4564 ring_prod = 0;
4565 break;
4566 }
4567 ring_prod = NEXT_SGE_IDX(ring_prod);
4568 }
4569 fp->rx_sge_prod = ring_prod;
4570
4571 /* Allocate BDs and initialize BD ring */
66e855f3 4572 fp->rx_comp_cons = 0;
7a9b2557 4573 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4574 for (i = 0; i < bp->rx_ring_size; i++) {
4575 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4576 BNX2X_ERR("was only able to allocate "
de832a55
EG
4577 "%d rx skbs on queue[%d]\n", i, j);
4578 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4579 break;
4580 }
4581 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4582 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4583 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4584 }
4585
7a9b2557
VZ
4586 fp->rx_bd_prod = ring_prod;
4587 /* must not have more available CQEs than BDs */
4588 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4589 cqe_ring_prod);
a2fbb9ea
ET
4590 fp->rx_pkt = fp->rx_calls = 0;
4591
7a9b2557
VZ
4592 /* Warning!
4593 * this will generate an interrupt (to the TSTORM)
4594 * must only be done after chip is initialized
4595 */
4596 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4597 fp->rx_sge_prod);
a2fbb9ea
ET
4598 if (j != 0)
4599 continue;
4600
4601 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4602 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4603 U64_LO(fp->rx_comp_mapping));
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4605 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4606 U64_HI(fp->rx_comp_mapping));
4607 }
4608}
4609
4610static void bnx2x_init_tx_ring(struct bnx2x *bp)
4611{
4612 int i, j;
4613
555f6c78 4614 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4615 struct bnx2x_fastpath *fp = &bp->fp[j];
4616
4617 for (i = 1; i <= NUM_TX_RINGS; i++) {
4618 struct eth_tx_bd *tx_bd =
4619 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4620
4621 tx_bd->addr_hi =
4622 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4623 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4624 tx_bd->addr_lo =
4625 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4626 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4627 }
4628
4629 fp->tx_pkt_prod = 0;
4630 fp->tx_pkt_cons = 0;
4631 fp->tx_bd_prod = 0;
4632 fp->tx_bd_cons = 0;
4633 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4634 fp->tx_pkt = 0;
4635 }
4636}
4637
4638static void bnx2x_init_sp_ring(struct bnx2x *bp)
4639{
34f80b04 4640 int func = BP_FUNC(bp);
a2fbb9ea
ET
4641
4642 spin_lock_init(&bp->spq_lock);
4643
4644 bp->spq_left = MAX_SPQ_PENDING;
4645 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4646 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4647 bp->spq_prod_bd = bp->spq;
4648 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4649
34f80b04 4650 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4651 U64_LO(bp->spq_mapping));
34f80b04
EG
4652 REG_WR(bp,
4653 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4654 U64_HI(bp->spq_mapping));
4655
34f80b04 4656 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4657 bp->spq_prod_idx);
4658}
4659
4660static void bnx2x_init_context(struct bnx2x *bp)
4661{
4662 int i;
4663
4664 for_each_queue(bp, i) {
4665 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4666 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4667 u8 cl_id = fp->cl_id;
0626b899 4668 u8 sb_id = fp->sb_id;
a2fbb9ea 4669
34f80b04
EG
4670 context->ustorm_st_context.common.sb_index_numbers =
4671 BNX2X_RX_SB_INDEX_NUM;
0626b899 4672 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4673 context->ustorm_st_context.common.status_block_id = sb_id;
4674 context->ustorm_st_context.common.flags =
de832a55
EG
4675 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4676 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4677 context->ustorm_st_context.common.statistics_counter_id =
4678 cl_id;
8d9c5f34 4679 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4680 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4681 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4682 bp->rx_buf_size;
34f80b04 4683 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4684 U64_HI(fp->rx_desc_mapping);
34f80b04 4685 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4686 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4687 if (!fp->disable_tpa) {
4688 context->ustorm_st_context.common.flags |=
4689 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4690 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4691 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4692 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4693 (u32)0xffff);
7a9b2557
VZ
4694 context->ustorm_st_context.common.sge_page_base_hi =
4695 U64_HI(fp->rx_sge_mapping);
4696 context->ustorm_st_context.common.sge_page_base_lo =
4697 U64_LO(fp->rx_sge_mapping);
4698 }
4699
8d9c5f34
EG
4700 context->ustorm_ag_context.cdu_usage =
4701 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4702 CDU_REGION_NUMBER_UCM_AG,
4703 ETH_CONNECTION_TYPE);
4704
4705 context->xstorm_st_context.tx_bd_page_base_hi =
4706 U64_HI(fp->tx_desc_mapping);
4707 context->xstorm_st_context.tx_bd_page_base_lo =
4708 U64_LO(fp->tx_desc_mapping);
4709 context->xstorm_st_context.db_data_addr_hi =
4710 U64_HI(fp->tx_prods_mapping);
4711 context->xstorm_st_context.db_data_addr_lo =
4712 U64_LO(fp->tx_prods_mapping);
0626b899 4713 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4714 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4715 context->cstorm_st_context.sb_index_number =
5c862848 4716 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4717 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4718
4719 context->xstorm_ag_context.cdu_reserved =
4720 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4721 CDU_REGION_NUMBER_XCM_AG,
4722 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4723 }
4724}
4725
4726static void bnx2x_init_ind_table(struct bnx2x *bp)
4727{
26c8fa4d 4728 int func = BP_FUNC(bp);
a2fbb9ea
ET
4729 int i;
4730
555f6c78 4731 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4732 return;
4733
555f6c78
EG
4734 DP(NETIF_MSG_IFUP,
4735 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4736 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4737 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4738 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4739 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4740}
4741
49d66772
ET
4742static void bnx2x_set_client_config(struct bnx2x *bp)
4743{
49d66772 4744 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4745 int port = BP_PORT(bp);
4746 int i;
49d66772 4747
e7799c5f 4748 tstorm_client.mtu = bp->dev->mtu;
49d66772 4749 tstorm_client.config_flags =
de832a55
EG
4750 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4751 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4752#ifdef BCM_VLAN
0c6671b0 4753 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4754 tstorm_client.config_flags |=
8d9c5f34 4755 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4756 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4757 }
4758#endif
49d66772 4759
7a9b2557
VZ
4760 if (bp->flags & TPA_ENABLE_FLAG) {
4761 tstorm_client.max_sges_for_packet =
4f40f2cb 4762 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4763 tstorm_client.max_sges_for_packet =
4764 ((tstorm_client.max_sges_for_packet +
4765 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4766 PAGES_PER_SGE_SHIFT;
4767
4768 tstorm_client.config_flags |=
4769 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4770 }
4771
49d66772 4772 for_each_queue(bp, i) {
de832a55
EG
4773 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4774
49d66772 4775 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4776 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4777 ((u32 *)&tstorm_client)[0]);
4778 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4779 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4780 ((u32 *)&tstorm_client)[1]);
4781 }
4782
34f80b04
EG
4783 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4784 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4785}
4786
a2fbb9ea
ET
4787static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4788{
a2fbb9ea 4789 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4790 int mode = bp->rx_mode;
4791 int mask = (1 << BP_L_ID(bp));
4792 int func = BP_FUNC(bp);
a2fbb9ea
ET
4793 int i;
4794
3196a88a 4795 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4796
4797 switch (mode) {
4798 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4799 tstorm_mac_filter.ucast_drop_all = mask;
4800 tstorm_mac_filter.mcast_drop_all = mask;
4801 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4802 break;
4803 case BNX2X_RX_MODE_NORMAL:
34f80b04 4804 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4805 break;
4806 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4807 tstorm_mac_filter.mcast_accept_all = mask;
4808 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4809 break;
4810 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4811 tstorm_mac_filter.ucast_accept_all = mask;
4812 tstorm_mac_filter.mcast_accept_all = mask;
4813 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4814 break;
4815 default:
34f80b04
EG
4816 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4817 break;
a2fbb9ea
ET
4818 }
4819
4820 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4821 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4822 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4823 ((u32 *)&tstorm_mac_filter)[i]);
4824
34f80b04 4825/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4826 ((u32 *)&tstorm_mac_filter)[i]); */
4827 }
a2fbb9ea 4828
49d66772
ET
4829 if (mode != BNX2X_RX_MODE_NONE)
4830 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4831}
4832
471de716
EG
4833static void bnx2x_init_internal_common(struct bnx2x *bp)
4834{
4835 int i;
4836
3cdf1db7
YG
4837 if (bp->flags & TPA_ENABLE_FLAG) {
4838 struct tstorm_eth_tpa_exist tpa = {0};
4839
4840 tpa.tpa_exist = 1;
4841
4842 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4843 ((u32 *)&tpa)[0]);
4844 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4845 ((u32 *)&tpa)[1]);
4846 }
4847
471de716
EG
4848 /* Zero this manually as its initialization is
4849 currently missing in the initTool */
4850 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4851 REG_WR(bp, BAR_USTRORM_INTMEM +
4852 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4853}
4854
4855static void bnx2x_init_internal_port(struct bnx2x *bp)
4856{
4857 int port = BP_PORT(bp);
4858
4859 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4860 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4861 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4862 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4863}
4864
8a1c38d1
EG
4865/* Calculates the sum of vn_min_rates.
4866 It's needed for further normalizing of the min_rates.
4867 Returns:
4868 sum of vn_min_rates.
4869 or
4870 0 - if all the min_rates are 0.
4871 In the later case fainess algorithm should be deactivated.
4872 If not all min_rates are zero then those that are zeroes will be set to 1.
4873 */
4874static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4875{
4876 int all_zero = 1;
4877 int port = BP_PORT(bp);
4878 int vn;
4879
4880 bp->vn_weight_sum = 0;
4881 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4882 int func = 2*vn + port;
4883 u32 vn_cfg =
4884 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4885 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4886 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4887
4888 /* Skip hidden vns */
4889 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4890 continue;
4891
4892 /* If min rate is zero - set it to 1 */
4893 if (!vn_min_rate)
4894 vn_min_rate = DEF_MIN_RATE;
4895 else
4896 all_zero = 0;
4897
4898 bp->vn_weight_sum += vn_min_rate;
4899 }
4900
4901 /* ... only if all min rates are zeros - disable fairness */
4902 if (all_zero)
4903 bp->vn_weight_sum = 0;
4904}
4905
471de716 4906static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4907{
a2fbb9ea
ET
4908 struct tstorm_eth_function_common_config tstorm_config = {0};
4909 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4910 int port = BP_PORT(bp);
4911 int func = BP_FUNC(bp);
de832a55
EG
4912 int i, j;
4913 u32 offset;
471de716 4914 u16 max_agg_size;
a2fbb9ea
ET
4915
4916 if (is_multi(bp)) {
555f6c78 4917 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4918 tstorm_config.rss_result_mask = MULTI_MASK;
4919 }
8d9c5f34
EG
4920 if (IS_E1HMF(bp))
4921 tstorm_config.config_flags |=
4922 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4923
34f80b04
EG
4924 tstorm_config.leading_client_id = BP_L_ID(bp);
4925
a2fbb9ea 4926 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4927 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4928 (*(u32 *)&tstorm_config));
4929
c14423fe 4930 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4931 bnx2x_set_storm_rx_mode(bp);
4932
de832a55
EG
4933 for_each_queue(bp, i) {
4934 u8 cl_id = bp->fp[i].cl_id;
4935
4936 /* reset xstorm per client statistics */
4937 offset = BAR_XSTRORM_INTMEM +
4938 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4939 for (j = 0;
4940 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4941 REG_WR(bp, offset + j*4, 0);
4942
4943 /* reset tstorm per client statistics */
4944 offset = BAR_TSTRORM_INTMEM +
4945 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4946 for (j = 0;
4947 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4948 REG_WR(bp, offset + j*4, 0);
4949
4950 /* reset ustorm per client statistics */
4951 offset = BAR_USTRORM_INTMEM +
4952 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953 for (j = 0;
4954 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4956 }
4957
4958 /* Init statistics related context */
34f80b04 4959 stats_flags.collect_eth = 1;
a2fbb9ea 4960
66e855f3 4961 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4962 ((u32 *)&stats_flags)[0]);
66e855f3 4963 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4964 ((u32 *)&stats_flags)[1]);
4965
66e855f3 4966 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4967 ((u32 *)&stats_flags)[0]);
66e855f3 4968 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4969 ((u32 *)&stats_flags)[1]);
4970
de832a55
EG
4971 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4972 ((u32 *)&stats_flags)[0]);
4973 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4974 ((u32 *)&stats_flags)[1]);
4975
66e855f3 4976 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4977 ((u32 *)&stats_flags)[0]);
66e855f3 4978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4979 ((u32 *)&stats_flags)[1]);
4980
66e855f3
YG
4981 REG_WR(bp, BAR_XSTRORM_INTMEM +
4982 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4983 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4984 REG_WR(bp, BAR_XSTRORM_INTMEM +
4985 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4986 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4987
4988 REG_WR(bp, BAR_TSTRORM_INTMEM +
4989 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991 REG_WR(bp, BAR_TSTRORM_INTMEM +
4992 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4994
de832a55
EG
4995 REG_WR(bp, BAR_USTRORM_INTMEM +
4996 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_USTRORM_INTMEM +
4999 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
34f80b04
EG
5002 if (CHIP_IS_E1H(bp)) {
5003 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5004 IS_E1HMF(bp));
5005 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5006 IS_E1HMF(bp));
5007 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5008 IS_E1HMF(bp));
5009 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5010 IS_E1HMF(bp));
5011
7a9b2557
VZ
5012 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5013 bp->e1hov);
34f80b04
EG
5014 }
5015
4f40f2cb
EG
5016 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5017 max_agg_size =
5018 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5019 SGE_PAGE_SIZE * PAGES_PER_SGE),
5020 (u32)0xffff);
555f6c78 5021 for_each_rx_queue(bp, i) {
7a9b2557 5022 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5023
5024 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5025 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5026 U64_LO(fp->rx_comp_mapping));
5027 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5028 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5029 U64_HI(fp->rx_comp_mapping));
5030
7a9b2557 5031 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5032 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5033 max_agg_size);
5034 }
8a1c38d1 5035
1c06328c
EG
5036 /* dropless flow control */
5037 if (CHIP_IS_E1H(bp)) {
5038 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5039
5040 rx_pause.bd_thr_low = 250;
5041 rx_pause.cqe_thr_low = 250;
5042 rx_pause.cos = 1;
5043 rx_pause.sge_thr_low = 0;
5044 rx_pause.bd_thr_high = 350;
5045 rx_pause.cqe_thr_high = 350;
5046 rx_pause.sge_thr_high = 0;
5047
5048 for_each_rx_queue(bp, i) {
5049 struct bnx2x_fastpath *fp = &bp->fp[i];
5050
5051 if (!fp->disable_tpa) {
5052 rx_pause.sge_thr_low = 150;
5053 rx_pause.sge_thr_high = 250;
5054 }
5055
5056
5057 offset = BAR_USTRORM_INTMEM +
5058 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5059 fp->cl_id);
5060 for (j = 0;
5061 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5062 j++)
5063 REG_WR(bp, offset + j*4,
5064 ((u32 *)&rx_pause)[j]);
5065 }
5066 }
5067
8a1c38d1
EG
5068 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5069
5070 /* Init rate shaping and fairness contexts */
5071 if (IS_E1HMF(bp)) {
5072 int vn;
5073
5074 /* During init there is no active link
5075 Until link is up, set link rate to 10Gbps */
5076 bp->link_vars.line_speed = SPEED_10000;
5077 bnx2x_init_port_minmax(bp);
5078
5079 bnx2x_calc_vn_weight_sum(bp);
5080
5081 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5082 bnx2x_init_vn_minmax(bp, 2*vn + port);
5083
5084 /* Enable rate shaping and fairness */
5085 bp->cmng.flags.cmng_enables =
5086 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5087 if (bp->vn_weight_sum)
5088 bp->cmng.flags.cmng_enables |=
5089 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5090 else
5091 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5092 " fairness will be disabled\n");
5093 } else {
5094 /* rate shaping and fairness are disabled */
5095 DP(NETIF_MSG_IFUP,
5096 "single function mode minmax will be disabled\n");
5097 }
5098
5099
5100 /* Store it to internal memory */
5101 if (bp->port.pmf)
5102 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5103 REG_WR(bp, BAR_XSTRORM_INTMEM +
5104 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5105 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5106}
5107
471de716
EG
5108static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5109{
5110 switch (load_code) {
5111 case FW_MSG_CODE_DRV_LOAD_COMMON:
5112 bnx2x_init_internal_common(bp);
5113 /* no break */
5114
5115 case FW_MSG_CODE_DRV_LOAD_PORT:
5116 bnx2x_init_internal_port(bp);
5117 /* no break */
5118
5119 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5120 bnx2x_init_internal_func(bp);
5121 break;
5122
5123 default:
5124 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5125 break;
5126 }
5127}
5128
5129static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5130{
5131 int i;
5132
5133 for_each_queue(bp, i) {
5134 struct bnx2x_fastpath *fp = &bp->fp[i];
5135
34f80b04 5136 fp->bp = bp;
a2fbb9ea 5137 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5138 fp->index = i;
34f80b04
EG
5139 fp->cl_id = BP_L_ID(bp) + i;
5140 fp->sb_id = fp->cl_id;
5141 DP(NETIF_MSG_IFUP,
f5372251
EG
5142 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5143 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5144 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5145 fp->sb_id);
5c862848 5146 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5147 }
5148
5c862848
EG
5149 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5150 DEF_SB_ID);
5151 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5152 bnx2x_update_coalesce(bp);
5153 bnx2x_init_rx_rings(bp);
5154 bnx2x_init_tx_ring(bp);
5155 bnx2x_init_sp_ring(bp);
5156 bnx2x_init_context(bp);
471de716 5157 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5158 bnx2x_init_ind_table(bp);
0ef00459
EG
5159 bnx2x_stats_init(bp);
5160
5161 /* At this point, we are ready for interrupts */
5162 atomic_set(&bp->intr_sem, 0);
5163
5164 /* flush all before enabling interrupts */
5165 mb();
5166 mmiowb();
5167
615f8fd9 5168 bnx2x_int_enable(bp);
a2fbb9ea
ET
5169}
5170
5171/* end of nic init */
5172
5173/*
5174 * gzip service functions
5175 */
5176
5177static int bnx2x_gunzip_init(struct bnx2x *bp)
5178{
5179 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5180 &bp->gunzip_mapping);
5181 if (bp->gunzip_buf == NULL)
5182 goto gunzip_nomem1;
5183
5184 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5185 if (bp->strm == NULL)
5186 goto gunzip_nomem2;
5187
5188 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5189 GFP_KERNEL);
5190 if (bp->strm->workspace == NULL)
5191 goto gunzip_nomem3;
5192
5193 return 0;
5194
5195gunzip_nomem3:
5196 kfree(bp->strm);
5197 bp->strm = NULL;
5198
5199gunzip_nomem2:
5200 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5201 bp->gunzip_mapping);
5202 bp->gunzip_buf = NULL;
5203
5204gunzip_nomem1:
5205 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5206 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5207 return -ENOMEM;
5208}
5209
5210static void bnx2x_gunzip_end(struct bnx2x *bp)
5211{
5212 kfree(bp->strm->workspace);
5213
5214 kfree(bp->strm);
5215 bp->strm = NULL;
5216
5217 if (bp->gunzip_buf) {
5218 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5219 bp->gunzip_mapping);
5220 bp->gunzip_buf = NULL;
5221 }
5222}
5223
5224static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5225{
5226 int n, rc;
5227
5228 /* check gzip header */
5229 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5230 return -EINVAL;
5231
5232 n = 10;
5233
34f80b04 5234#define FNAME 0x8
a2fbb9ea
ET
5235
5236 if (zbuf[3] & FNAME)
5237 while ((zbuf[n++] != 0) && (n < len));
5238
5239 bp->strm->next_in = zbuf + n;
5240 bp->strm->avail_in = len - n;
5241 bp->strm->next_out = bp->gunzip_buf;
5242 bp->strm->avail_out = FW_BUF_SIZE;
5243
5244 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5245 if (rc != Z_OK)
5246 return rc;
5247
5248 rc = zlib_inflate(bp->strm, Z_FINISH);
5249 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5250 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5251 bp->dev->name, bp->strm->msg);
5252
5253 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5254 if (bp->gunzip_outlen & 0x3)
5255 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5256 " gunzip_outlen (%d) not aligned\n",
5257 bp->dev->name, bp->gunzip_outlen);
5258 bp->gunzip_outlen >>= 2;
5259
5260 zlib_inflateEnd(bp->strm);
5261
5262 if (rc == Z_STREAM_END)
5263 return 0;
5264
5265 return rc;
5266}
5267
5268/* nic load/unload */
5269
5270/*
34f80b04 5271 * General service functions
a2fbb9ea
ET
5272 */
5273
5274/* send a NIG loopback debug packet */
5275static void bnx2x_lb_pckt(struct bnx2x *bp)
5276{
a2fbb9ea 5277 u32 wb_write[3];
a2fbb9ea
ET
5278
5279 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5280 wb_write[0] = 0x55555555;
5281 wb_write[1] = 0x55555555;
34f80b04 5282 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5283 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5284
5285 /* NON-IP protocol */
a2fbb9ea
ET
5286 wb_write[0] = 0x09000000;
5287 wb_write[1] = 0x55555555;
34f80b04 5288 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5289 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5290}
5291
5292/* some of the internal memories
5293 * are not directly readable from the driver
5294 * to test them we send debug packets
5295 */
5296static int bnx2x_int_mem_test(struct bnx2x *bp)
5297{
5298 int factor;
5299 int count, i;
5300 u32 val = 0;
5301
ad8d3948 5302 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5303 factor = 120;
ad8d3948
EG
5304 else if (CHIP_REV_IS_EMUL(bp))
5305 factor = 200;
5306 else
a2fbb9ea 5307 factor = 1;
a2fbb9ea
ET
5308
5309 DP(NETIF_MSG_HW, "start part1\n");
5310
5311 /* Disable inputs of parser neighbor blocks */
5312 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5313 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5314 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5315 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5316
5317 /* Write 0 to parser credits for CFC search request */
5318 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5319
5320 /* send Ethernet packet */
5321 bnx2x_lb_pckt(bp);
5322
5323 /* TODO do i reset NIG statistic? */
5324 /* Wait until NIG register shows 1 packet of size 0x10 */
5325 count = 1000 * factor;
5326 while (count) {
34f80b04 5327
a2fbb9ea
ET
5328 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5329 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5330 if (val == 0x10)
5331 break;
5332
5333 msleep(10);
5334 count--;
5335 }
5336 if (val != 0x10) {
5337 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5338 return -1;
5339 }
5340
5341 /* Wait until PRS register shows 1 packet */
5342 count = 1000 * factor;
5343 while (count) {
5344 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5345 if (val == 1)
5346 break;
5347
5348 msleep(10);
5349 count--;
5350 }
5351 if (val != 0x1) {
5352 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5353 return -2;
5354 }
5355
5356 /* Reset and init BRB, PRS */
34f80b04 5357 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5358 msleep(50);
34f80b04 5359 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5360 msleep(50);
5361 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5362 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5363
5364 DP(NETIF_MSG_HW, "part2\n");
5365
5366 /* Disable inputs of parser neighbor blocks */
5367 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5368 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5369 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5370 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5371
5372 /* Write 0 to parser credits for CFC search request */
5373 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5374
5375 /* send 10 Ethernet packets */
5376 for (i = 0; i < 10; i++)
5377 bnx2x_lb_pckt(bp);
5378
5379 /* Wait until NIG register shows 10 + 1
5380 packets of size 11*0x10 = 0xb0 */
5381 count = 1000 * factor;
5382 while (count) {
34f80b04 5383
a2fbb9ea
ET
5384 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5385 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5386 if (val == 0xb0)
5387 break;
5388
5389 msleep(10);
5390 count--;
5391 }
5392 if (val != 0xb0) {
5393 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5394 return -3;
5395 }
5396
5397 /* Wait until PRS register shows 2 packets */
5398 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5399 if (val != 2)
5400 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5401
5402 /* Write 1 to parser credits for CFC search request */
5403 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5404
5405 /* Wait until PRS register shows 3 packets */
5406 msleep(10 * factor);
5407 /* Wait until NIG register shows 1 packet of size 0x10 */
5408 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5409 if (val != 3)
5410 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5411
5412 /* clear NIG EOP FIFO */
5413 for (i = 0; i < 11; i++)
5414 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5415 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5416 if (val != 1) {
5417 BNX2X_ERR("clear of NIG failed\n");
5418 return -4;
5419 }
5420
5421 /* Reset and init BRB, PRS, NIG */
5422 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5423 msleep(50);
5424 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5425 msleep(50);
5426 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5427 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5428#ifndef BCM_ISCSI
5429 /* set NIC mode */
5430 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5431#endif
5432
5433 /* Enable inputs of parser neighbor blocks */
5434 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5435 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5436 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5437 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5438
5439 DP(NETIF_MSG_HW, "done\n");
5440
5441 return 0; /* OK */
5442}
5443
5444static void enable_blocks_attention(struct bnx2x *bp)
5445{
5446 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5447 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5448 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5449 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5450 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5451 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5452 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5453 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5454 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5455/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5456/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5457 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5458 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5459 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5460/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5461/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5462 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5463 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5464 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5465 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5466/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5467/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5468 if (CHIP_REV_IS_FPGA(bp))
5469 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5470 else
5471 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5472 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5473 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5474 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5475/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5476/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5477 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5478 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5479/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5480 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5481}
5482
34f80b04 5483
81f75bbf
EG
5484static void bnx2x_reset_common(struct bnx2x *bp)
5485{
5486 /* reset_common */
5487 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5488 0xd3ffff7f);
5489 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5490}
5491
34f80b04 5492static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5493{
a2fbb9ea 5494 u32 val, i;
a2fbb9ea 5495
34f80b04 5496 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5497
81f75bbf 5498 bnx2x_reset_common(bp);
34f80b04
EG
5499 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5500 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5501
34f80b04
EG
5502 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5503 if (CHIP_IS_E1H(bp))
5504 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5505
34f80b04
EG
5506 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5507 msleep(30);
5508 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5509
34f80b04
EG
5510 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5511 if (CHIP_IS_E1(bp)) {
5512 /* enable HW interrupt from PXP on USDM overflow
5513 bit 16 on INT_MASK_0 */
5514 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5515 }
a2fbb9ea 5516
34f80b04
EG
5517 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5518 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5519
5520#ifdef __BIG_ENDIAN
34f80b04
EG
5521 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5522 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5523 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5524 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5525 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5526 /* make sure this value is 0 */
5527 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5528
5529/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5530 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5531 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5532 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5533 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5534#endif
5535
34f80b04 5536 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5537#ifdef BCM_ISCSI
34f80b04
EG
5538 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5539 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5540 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5541#endif
5542
34f80b04
EG
5543 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5544 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5545
34f80b04
EG
5546 /* let the HW do it's magic ... */
5547 msleep(100);
5548 /* finish PXP init */
5549 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5550 if (val != 1) {
5551 BNX2X_ERR("PXP2 CFG failed\n");
5552 return -EBUSY;
5553 }
5554 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5555 if (val != 1) {
5556 BNX2X_ERR("PXP2 RD_INIT failed\n");
5557 return -EBUSY;
5558 }
a2fbb9ea 5559
34f80b04
EG
5560 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5561 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5562
34f80b04 5563 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5564
34f80b04
EG
5565 /* clean the DMAE memory */
5566 bp->dmae_ready = 1;
5567 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5568
34f80b04
EG
5569 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5570 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5571 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5572 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5573
34f80b04
EG
5574 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5575 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5576 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5577 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5578
5579 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5580 /* soft reset pulse */
5581 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5582 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5583
5584#ifdef BCM_ISCSI
34f80b04 5585 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5586#endif
a2fbb9ea 5587
34f80b04
EG
5588 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5589 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5590 if (!CHIP_REV_IS_SLOW(bp)) {
5591 /* enable hw interrupt from doorbell Q */
5592 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5593 }
a2fbb9ea 5594
34f80b04 5595 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5596 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5597 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5598 /* set NIC mode */
5599 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5600 if (CHIP_IS_E1H(bp))
5601 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5602
34f80b04
EG
5603 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5604 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5605 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5606 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5607
34f80b04
EG
5608 if (CHIP_IS_E1H(bp)) {
5609 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5610 STORM_INTMEM_SIZE_E1H/2);
5611 bnx2x_init_fill(bp,
5612 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5613 0, STORM_INTMEM_SIZE_E1H/2);
5614 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5615 STORM_INTMEM_SIZE_E1H/2);
5616 bnx2x_init_fill(bp,
5617 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5618 0, STORM_INTMEM_SIZE_E1H/2);
5619 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5620 STORM_INTMEM_SIZE_E1H/2);
5621 bnx2x_init_fill(bp,
5622 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5623 0, STORM_INTMEM_SIZE_E1H/2);
5624 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5625 STORM_INTMEM_SIZE_E1H/2);
5626 bnx2x_init_fill(bp,
5627 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5628 0, STORM_INTMEM_SIZE_E1H/2);
5629 } else { /* E1 */
ad8d3948
EG
5630 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5631 STORM_INTMEM_SIZE_E1);
5632 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5633 STORM_INTMEM_SIZE_E1);
5634 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5635 STORM_INTMEM_SIZE_E1);
5636 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5637 STORM_INTMEM_SIZE_E1);
34f80b04 5638 }
a2fbb9ea 5639
34f80b04
EG
5640 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5641 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5642 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5643 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5644
34f80b04
EG
5645 /* sync semi rtc */
5646 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5647 0x80000000);
5648 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5649 0x80000000);
a2fbb9ea 5650
34f80b04
EG
5651 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5652 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5653 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5654
34f80b04
EG
5655 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5656 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5657 REG_WR(bp, i, 0xc0cac01a);
5658 /* TODO: replace with something meaningful */
5659 }
8d9c5f34 5660 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5661 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5662
34f80b04
EG
5663 if (sizeof(union cdu_context) != 1024)
5664 /* we currently assume that a context is 1024 bytes */
5665 printk(KERN_ALERT PFX "please adjust the size of"
5666 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5667
34f80b04
EG
5668 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5669 val = (4 << 24) + (0 << 12) + 1024;
5670 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5671 if (CHIP_IS_E1(bp)) {
5672 /* !!! fix pxp client crdit until excel update */
5673 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5674 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5675 }
a2fbb9ea 5676
34f80b04
EG
5677 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5678 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5679 /* enable context validation interrupt from CFC */
5680 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5681
5682 /* set the thresholds to prevent CFC/CDU race */
5683 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5684
34f80b04
EG
5685 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5686 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5687
34f80b04
EG
5688 /* PXPCS COMMON comes here */
5689 /* Reset PCIE errors for debug */
5690 REG_WR(bp, 0x2814, 0xffffffff);
5691 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5692
34f80b04
EG
5693 /* EMAC0 COMMON comes here */
5694 /* EMAC1 COMMON comes here */
5695 /* DBU COMMON comes here */
5696 /* DBG COMMON comes here */
5697
5698 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5699 if (CHIP_IS_E1H(bp)) {
5700 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5701 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5702 }
5703
5704 if (CHIP_REV_IS_SLOW(bp))
5705 msleep(200);
5706
5707 /* finish CFC init */
5708 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5709 if (val != 1) {
5710 BNX2X_ERR("CFC LL_INIT failed\n");
5711 return -EBUSY;
5712 }
5713 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5714 if (val != 1) {
5715 BNX2X_ERR("CFC AC_INIT failed\n");
5716 return -EBUSY;
5717 }
5718 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5719 if (val != 1) {
5720 BNX2X_ERR("CFC CAM_INIT failed\n");
5721 return -EBUSY;
5722 }
5723 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5724
34f80b04
EG
5725 /* read NIG statistic
5726 to see if this is our first up since powerup */
5727 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5728 val = *bnx2x_sp(bp, wb_data[0]);
5729
5730 /* do internal memory self test */
5731 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5732 BNX2X_ERR("internal mem self test failed\n");
5733 return -EBUSY;
5734 }
5735
35b19ba5 5736 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5740 bp->port.need_hw_lock = 1;
5741 break;
5742
35b19ba5 5743 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5744 /* Fan failure is indicated by SPIO 5 */
5745 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5746 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5747
5748 /* set to active low mode */
5749 val = REG_RD(bp, MISC_REG_SPIO_INT);
5750 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5751 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5752 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5753
34f80b04
EG
5754 /* enable interrupt to signal the IGU */
5755 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5756 val |= (1 << MISC_REGISTERS_SPIO_5);
5757 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5758 break;
f1410647 5759
34f80b04
EG
5760 default:
5761 break;
5762 }
f1410647 5763
34f80b04
EG
5764 /* clear PXP2 attentions */
5765 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5766
34f80b04 5767 enable_blocks_attention(bp);
a2fbb9ea 5768
6bbca910
YR
5769 if (!BP_NOMCP(bp)) {
5770 bnx2x_acquire_phy_lock(bp);
5771 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5772 bnx2x_release_phy_lock(bp);
5773 } else
5774 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5775
34f80b04
EG
5776 return 0;
5777}
a2fbb9ea 5778
34f80b04
EG
5779static int bnx2x_init_port(struct bnx2x *bp)
5780{
5781 int port = BP_PORT(bp);
1c06328c 5782 u32 low, high;
34f80b04 5783 u32 val;
a2fbb9ea 5784
34f80b04
EG
5785 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5786
5787 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5788
5789 /* Port PXP comes here */
5790 /* Port PXP2 comes here */
a2fbb9ea
ET
5791#ifdef BCM_ISCSI
5792 /* Port0 1
5793 * Port1 385 */
5794 i++;
5795 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5796 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5797 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5798 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5799
5800 /* Port0 2
5801 * Port1 386 */
5802 i++;
5803 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5804 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5805 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5806 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5807
5808 /* Port0 3
5809 * Port1 387 */
5810 i++;
5811 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5812 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5813 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5814 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5815#endif
34f80b04 5816 /* Port CMs come here */
8d9c5f34
EG
5817 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5818 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5819
5820 /* Port QM comes here */
a2fbb9ea
ET
5821#ifdef BCM_ISCSI
5822 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5823 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5824
5825 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5826 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5827#endif
5828 /* Port DQ comes here */
1c06328c
EG
5829
5830 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5831 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5832 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5833 /* no pause for emulation and FPGA */
5834 low = 0;
5835 high = 513;
5836 } else {
5837 if (IS_E1HMF(bp))
5838 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5839 else if (bp->dev->mtu > 4096) {
5840 if (bp->flags & ONE_PORT_FLAG)
5841 low = 160;
5842 else {
5843 val = bp->dev->mtu;
5844 /* (24*1024 + val*4)/256 */
5845 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5846 }
5847 } else
5848 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5849 high = low + 56; /* 14*1024/256 */
5850 }
5851 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5852 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5853
5854
ad8d3948 5855 /* Port PRS comes here */
a2fbb9ea
ET
5856 /* Port TSDM comes here */
5857 /* Port CSDM comes here */
5858 /* Port USDM comes here */
5859 /* Port XSDM comes here */
34f80b04
EG
5860 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5861 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5862 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5863 port ? USEM_PORT1_END : USEM_PORT0_END);
5864 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5865 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5866 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5867 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5868 /* Port UPB comes here */
34f80b04
EG
5869 /* Port XPB comes here */
5870
5871 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5872 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5873
5874 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5875 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5876
5877 /* update threshold */
34f80b04 5878 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5879 /* update init credit */
34f80b04 5880 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5881
5882 /* probe changes */
34f80b04 5883 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5884 msleep(5);
34f80b04 5885 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5886
5887#ifdef BCM_ISCSI
5888 /* tell the searcher where the T2 table is */
5889 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5890
5891 wb_write[0] = U64_LO(bp->t2_mapping);
5892 wb_write[1] = U64_HI(bp->t2_mapping);
5893 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5894 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5895 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5896 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5897
5898 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5899 /* Port SRCH comes here */
5900#endif
5901 /* Port CDU comes here */
5902 /* Port CFC comes here */
34f80b04
EG
5903
5904 if (CHIP_IS_E1(bp)) {
5905 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5906 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5907 }
5908 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5909 port ? HC_PORT1_END : HC_PORT0_END);
5910
5911 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5912 MISC_AEU_PORT0_START,
34f80b04
EG
5913 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5914 /* init aeu_mask_attn_func_0/1:
5915 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5916 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5917 * bits 4-7 are used for "per vn group attention" */
5918 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5919 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5920
a2fbb9ea
ET
5921 /* Port PXPCS comes here */
5922 /* Port EMAC0 comes here */
5923 /* Port EMAC1 comes here */
5924 /* Port DBU comes here */
5925 /* Port DBG comes here */
34f80b04
EG
5926 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5927 port ? NIG_PORT1_END : NIG_PORT0_END);
5928
5929 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5930
5931 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5932 /* 0x2 disable e1hov, 0x1 enable */
5933 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5934 (IS_E1HMF(bp) ? 0x1 : 0x2));
5935
1c06328c
EG
5936 /* support pause requests from USDM, TSDM and BRB */
5937 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5938
5939 {
5940 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5941 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5942 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5943 }
34f80b04
EG
5944 }
5945
a2fbb9ea
ET
5946 /* Port MCP comes here */
5947 /* Port DMAE comes here */
5948
35b19ba5 5949 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5950 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5951 {
5952 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5953
5954 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5955 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5956
5957 /* The GPIO should be swapped if the swap register is
5958 set and active */
5959 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5960 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5961
5962 /* Select function upon port-swap configuration */
5963 if (port == 0) {
5964 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5965 aeu_gpio_mask = (swap_val && swap_override) ?
5966 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5967 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5968 } else {
5969 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5970 aeu_gpio_mask = (swap_val && swap_override) ?
5971 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5972 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5973 }
5974 val = REG_RD(bp, offset);
5975 /* add GPIO3 to group */
5976 val |= aeu_gpio_mask;
5977 REG_WR(bp, offset, val);
5978 }
5979 break;
5980
35b19ba5 5981 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5982 /* add SPIO 5 to group 0 */
5983 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5984 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5985 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5986 break;
5987
5988 default:
5989 break;
5990 }
5991
c18487ee 5992 bnx2x__link_reset(bp);
a2fbb9ea 5993
34f80b04
EG
5994 return 0;
5995}
5996
5997#define ILT_PER_FUNC (768/2)
5998#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5999/* the phys address is shifted right 12 bits and has an added
6000 1=valid bit added to the 53rd bit
6001 then since this is a wide register(TM)
6002 we split it into two 32 bit writes
6003 */
6004#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6005#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6006#define PXP_ONE_ILT(x) (((x) << 10) | x)
6007#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6008
6009#define CNIC_ILT_LINES 0
6010
6011static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6012{
6013 int reg;
6014
6015 if (CHIP_IS_E1H(bp))
6016 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6017 else /* E1 */
6018 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6019
6020 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6021}
6022
6023static int bnx2x_init_func(struct bnx2x *bp)
6024{
6025 int port = BP_PORT(bp);
6026 int func = BP_FUNC(bp);
8badd27a 6027 u32 addr, val;
34f80b04
EG
6028 int i;
6029
6030 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6031
8badd27a
EG
6032 /* set MSI reconfigure capability */
6033 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6034 val = REG_RD(bp, addr);
6035 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6036 REG_WR(bp, addr, val);
6037
34f80b04
EG
6038 i = FUNC_ILT_BASE(func);
6039
6040 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6041 if (CHIP_IS_E1H(bp)) {
6042 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6043 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6044 } else /* E1 */
6045 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6046 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6047
6048
6049 if (CHIP_IS_E1H(bp)) {
6050 for (i = 0; i < 9; i++)
6051 bnx2x_init_block(bp,
6052 cm_start[func][i], cm_end[func][i]);
6053
6054 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6055 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6056 }
6057
6058 /* HC init per function */
6059 if (CHIP_IS_E1H(bp)) {
6060 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6061
6062 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6063 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6064 }
6065 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6066
c14423fe 6067 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6068 REG_WR(bp, 0x2114, 0xffffffff);
6069 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6070
34f80b04
EG
6071 return 0;
6072}
6073
6074static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6075{
6076 int i, rc = 0;
a2fbb9ea 6077
34f80b04
EG
6078 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6079 BP_FUNC(bp), load_code);
a2fbb9ea 6080
34f80b04
EG
6081 bp->dmae_ready = 0;
6082 mutex_init(&bp->dmae_mutex);
6083 bnx2x_gunzip_init(bp);
a2fbb9ea 6084
34f80b04
EG
6085 switch (load_code) {
6086 case FW_MSG_CODE_DRV_LOAD_COMMON:
6087 rc = bnx2x_init_common(bp);
6088 if (rc)
6089 goto init_hw_err;
6090 /* no break */
6091
6092 case FW_MSG_CODE_DRV_LOAD_PORT:
6093 bp->dmae_ready = 1;
6094 rc = bnx2x_init_port(bp);
6095 if (rc)
6096 goto init_hw_err;
6097 /* no break */
6098
6099 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6100 bp->dmae_ready = 1;
6101 rc = bnx2x_init_func(bp);
6102 if (rc)
6103 goto init_hw_err;
6104 break;
6105
6106 default:
6107 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6108 break;
6109 }
6110
6111 if (!BP_NOMCP(bp)) {
6112 int func = BP_FUNC(bp);
a2fbb9ea
ET
6113
6114 bp->fw_drv_pulse_wr_seq =
34f80b04 6115 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6116 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6117 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6118 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6119 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6120 } else
6121 bp->func_stx = 0;
a2fbb9ea 6122
34f80b04
EG
6123 /* this needs to be done before gunzip end */
6124 bnx2x_zero_def_sb(bp);
6125 for_each_queue(bp, i)
6126 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6127
6128init_hw_err:
6129 bnx2x_gunzip_end(bp);
6130
6131 return rc;
a2fbb9ea
ET
6132}
6133
c14423fe 6134/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6135static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6136{
34f80b04 6137 int func = BP_FUNC(bp);
f1410647
ET
6138 u32 seq = ++bp->fw_seq;
6139 u32 rc = 0;
19680c48
EG
6140 u32 cnt = 1;
6141 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6142
34f80b04 6143 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6144 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6145
19680c48
EG
6146 do {
6147 /* let the FW do it's magic ... */
6148 msleep(delay);
a2fbb9ea 6149
19680c48 6150 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6151
19680c48
EG
6152 /* Give the FW up to 2 second (200*10ms) */
6153 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6154
6155 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6156 cnt*delay, rc, seq);
a2fbb9ea
ET
6157
6158 /* is this a reply to our command? */
6159 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6160 rc &= FW_MSG_CODE_MASK;
f1410647 6161
a2fbb9ea
ET
6162 } else {
6163 /* FW BUG! */
6164 BNX2X_ERR("FW failed to respond!\n");
6165 bnx2x_fw_dump(bp);
6166 rc = 0;
6167 }
f1410647 6168
a2fbb9ea
ET
6169 return rc;
6170}
6171
6172static void bnx2x_free_mem(struct bnx2x *bp)
6173{
6174
6175#define BNX2X_PCI_FREE(x, y, size) \
6176 do { \
6177 if (x) { \
6178 pci_free_consistent(bp->pdev, size, x, y); \
6179 x = NULL; \
6180 y = 0; \
6181 } \
6182 } while (0)
6183
6184#define BNX2X_FREE(x) \
6185 do { \
6186 if (x) { \
6187 vfree(x); \
6188 x = NULL; \
6189 } \
6190 } while (0)
6191
6192 int i;
6193
6194 /* fastpath */
555f6c78 6195 /* Common */
a2fbb9ea
ET
6196 for_each_queue(bp, i) {
6197
555f6c78 6198 /* status blocks */
a2fbb9ea
ET
6199 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6200 bnx2x_fp(bp, i, status_blk_mapping),
6201 sizeof(struct host_status_block) +
6202 sizeof(struct eth_tx_db_data));
555f6c78
EG
6203 }
6204 /* Rx */
6205 for_each_rx_queue(bp, i) {
a2fbb9ea 6206
555f6c78 6207 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6208 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6209 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6210 bnx2x_fp(bp, i, rx_desc_mapping),
6211 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6212
6213 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6214 bnx2x_fp(bp, i, rx_comp_mapping),
6215 sizeof(struct eth_fast_path_rx_cqe) *
6216 NUM_RCQ_BD);
a2fbb9ea 6217
7a9b2557 6218 /* SGE ring */
32626230 6219 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6220 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6221 bnx2x_fp(bp, i, rx_sge_mapping),
6222 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6223 }
555f6c78
EG
6224 /* Tx */
6225 for_each_tx_queue(bp, i) {
6226
6227 /* fastpath tx rings: tx_buf tx_desc */
6228 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6229 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6230 bnx2x_fp(bp, i, tx_desc_mapping),
6231 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6232 }
a2fbb9ea
ET
6233 /* end of fastpath */
6234
6235 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6236 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6237
6238 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6239 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6240
6241#ifdef BCM_ISCSI
6242 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6243 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6244 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6245 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6246#endif
7a9b2557 6247 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6248
6249#undef BNX2X_PCI_FREE
6250#undef BNX2X_KFREE
6251}
6252
6253static int bnx2x_alloc_mem(struct bnx2x *bp)
6254{
6255
6256#define BNX2X_PCI_ALLOC(x, y, size) \
6257 do { \
6258 x = pci_alloc_consistent(bp->pdev, size, y); \
6259 if (x == NULL) \
6260 goto alloc_mem_err; \
6261 memset(x, 0, size); \
6262 } while (0)
6263
6264#define BNX2X_ALLOC(x, size) \
6265 do { \
6266 x = vmalloc(size); \
6267 if (x == NULL) \
6268 goto alloc_mem_err; \
6269 memset(x, 0, size); \
6270 } while (0)
6271
6272 int i;
6273
6274 /* fastpath */
555f6c78 6275 /* Common */
a2fbb9ea
ET
6276 for_each_queue(bp, i) {
6277 bnx2x_fp(bp, i, bp) = bp;
6278
555f6c78 6279 /* status blocks */
a2fbb9ea
ET
6280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6281 &bnx2x_fp(bp, i, status_blk_mapping),
6282 sizeof(struct host_status_block) +
6283 sizeof(struct eth_tx_db_data));
555f6c78
EG
6284 }
6285 /* Rx */
6286 for_each_rx_queue(bp, i) {
a2fbb9ea 6287
555f6c78 6288 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6289 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6290 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6291 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6292 &bnx2x_fp(bp, i, rx_desc_mapping),
6293 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6294
6295 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6296 &bnx2x_fp(bp, i, rx_comp_mapping),
6297 sizeof(struct eth_fast_path_rx_cqe) *
6298 NUM_RCQ_BD);
6299
7a9b2557
VZ
6300 /* SGE ring */
6301 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6302 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6303 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6304 &bnx2x_fp(bp, i, rx_sge_mapping),
6305 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6306 }
555f6c78
EG
6307 /* Tx */
6308 for_each_tx_queue(bp, i) {
6309
6310 bnx2x_fp(bp, i, hw_tx_prods) =
6311 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6312
6313 bnx2x_fp(bp, i, tx_prods_mapping) =
6314 bnx2x_fp(bp, i, status_blk_mapping) +
6315 sizeof(struct host_status_block);
6316
6317 /* fastpath tx rings: tx_buf tx_desc */
6318 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6319 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6320 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6321 &bnx2x_fp(bp, i, tx_desc_mapping),
6322 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6323 }
a2fbb9ea
ET
6324 /* end of fastpath */
6325
6326 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6327 sizeof(struct host_def_status_block));
6328
6329 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6330 sizeof(struct bnx2x_slowpath));
6331
6332#ifdef BCM_ISCSI
6333 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6334
6335 /* Initialize T1 */
6336 for (i = 0; i < 64*1024; i += 64) {
6337 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6338 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6339 }
6340
6341 /* allocate searcher T2 table
6342 we allocate 1/4 of alloc num for T2
6343 (which is not entered into the ILT) */
6344 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6345
6346 /* Initialize T2 */
6347 for (i = 0; i < 16*1024; i += 64)
6348 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6349
c14423fe 6350 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6351 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6352
6353 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6354 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6355
6356 /* QM queues (128*MAX_CONN) */
6357 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6358#endif
6359
6360 /* Slow path ring */
6361 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6362
6363 return 0;
6364
6365alloc_mem_err:
6366 bnx2x_free_mem(bp);
6367 return -ENOMEM;
6368
6369#undef BNX2X_PCI_ALLOC
6370#undef BNX2X_ALLOC
6371}
6372
6373static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6374{
6375 int i;
6376
555f6c78 6377 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6378 struct bnx2x_fastpath *fp = &bp->fp[i];
6379
6380 u16 bd_cons = fp->tx_bd_cons;
6381 u16 sw_prod = fp->tx_pkt_prod;
6382 u16 sw_cons = fp->tx_pkt_cons;
6383
a2fbb9ea
ET
6384 while (sw_cons != sw_prod) {
6385 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6386 sw_cons++;
6387 }
6388 }
6389}
6390
6391static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6392{
6393 int i, j;
6394
555f6c78 6395 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6396 struct bnx2x_fastpath *fp = &bp->fp[j];
6397
a2fbb9ea
ET
6398 for (i = 0; i < NUM_RX_BD; i++) {
6399 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6400 struct sk_buff *skb = rx_buf->skb;
6401
6402 if (skb == NULL)
6403 continue;
6404
6405 pci_unmap_single(bp->pdev,
6406 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6407 bp->rx_buf_size,
a2fbb9ea
ET
6408 PCI_DMA_FROMDEVICE);
6409
6410 rx_buf->skb = NULL;
6411 dev_kfree_skb(skb);
6412 }
7a9b2557 6413 if (!fp->disable_tpa)
32626230
EG
6414 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6415 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6416 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6417 }
6418}
6419
6420static void bnx2x_free_skbs(struct bnx2x *bp)
6421{
6422 bnx2x_free_tx_skbs(bp);
6423 bnx2x_free_rx_skbs(bp);
6424}
6425
6426static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6427{
34f80b04 6428 int i, offset = 1;
a2fbb9ea
ET
6429
6430 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6431 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6432 bp->msix_table[0].vector);
6433
6434 for_each_queue(bp, i) {
c14423fe 6435 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6436 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6437 bnx2x_fp(bp, i, state));
6438
34f80b04 6439 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6440 }
a2fbb9ea
ET
6441}
6442
6443static void bnx2x_free_irq(struct bnx2x *bp)
6444{
a2fbb9ea 6445 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6446 bnx2x_free_msix_irqs(bp);
6447 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6448 bp->flags &= ~USING_MSIX_FLAG;
6449
8badd27a
EG
6450 } else if (bp->flags & USING_MSI_FLAG) {
6451 free_irq(bp->pdev->irq, bp->dev);
6452 pci_disable_msi(bp->pdev);
6453 bp->flags &= ~USING_MSI_FLAG;
6454
a2fbb9ea
ET
6455 } else
6456 free_irq(bp->pdev->irq, bp->dev);
6457}
6458
6459static int bnx2x_enable_msix(struct bnx2x *bp)
6460{
8badd27a
EG
6461 int i, rc, offset = 1;
6462 int igu_vec = 0;
a2fbb9ea 6463
8badd27a
EG
6464 bp->msix_table[0].entry = igu_vec;
6465 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6466
34f80b04 6467 for_each_queue(bp, i) {
8badd27a 6468 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6469 bp->msix_table[i + offset].entry = igu_vec;
6470 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6471 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6472 }
6473
34f80b04 6474 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6475 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6476 if (rc) {
8badd27a
EG
6477 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6478 return rc;
34f80b04 6479 }
8badd27a 6480
a2fbb9ea
ET
6481 bp->flags |= USING_MSIX_FLAG;
6482
6483 return 0;
a2fbb9ea
ET
6484}
6485
a2fbb9ea
ET
6486static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6487{
34f80b04 6488 int i, rc, offset = 1;
a2fbb9ea 6489
a2fbb9ea
ET
6490 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6491 bp->dev->name, bp->dev);
a2fbb9ea
ET
6492 if (rc) {
6493 BNX2X_ERR("request sp irq failed\n");
6494 return -EBUSY;
6495 }
6496
6497 for_each_queue(bp, i) {
555f6c78
EG
6498 struct bnx2x_fastpath *fp = &bp->fp[i];
6499
6500 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6501 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6502 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6503 if (rc) {
555f6c78 6504 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6505 bnx2x_free_msix_irqs(bp);
6506 return -EBUSY;
6507 }
6508
555f6c78 6509 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6510 }
6511
555f6c78
EG
6512 i = BNX2X_NUM_QUEUES(bp);
6513 if (is_multi(bp))
6514 printk(KERN_INFO PFX
6515 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6516 bp->dev->name, bp->msix_table[0].vector,
6517 bp->msix_table[offset].vector,
6518 bp->msix_table[offset + i - 1].vector);
6519 else
6520 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6521 bp->dev->name, bp->msix_table[0].vector,
6522 bp->msix_table[offset + i - 1].vector);
6523
a2fbb9ea 6524 return 0;
a2fbb9ea
ET
6525}
6526
8badd27a
EG
6527static int bnx2x_enable_msi(struct bnx2x *bp)
6528{
6529 int rc;
6530
6531 rc = pci_enable_msi(bp->pdev);
6532 if (rc) {
6533 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6534 return -1;
6535 }
6536 bp->flags |= USING_MSI_FLAG;
6537
6538 return 0;
6539}
6540
a2fbb9ea
ET
6541static int bnx2x_req_irq(struct bnx2x *bp)
6542{
8badd27a 6543 unsigned long flags;
34f80b04 6544 int rc;
a2fbb9ea 6545
8badd27a
EG
6546 if (bp->flags & USING_MSI_FLAG)
6547 flags = 0;
6548 else
6549 flags = IRQF_SHARED;
6550
6551 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6552 bp->dev->name, bp->dev);
a2fbb9ea
ET
6553 if (!rc)
6554 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6555
6556 return rc;
a2fbb9ea
ET
6557}
6558
65abd74d
YG
6559static void bnx2x_napi_enable(struct bnx2x *bp)
6560{
6561 int i;
6562
555f6c78 6563 for_each_rx_queue(bp, i)
65abd74d
YG
6564 napi_enable(&bnx2x_fp(bp, i, napi));
6565}
6566
6567static void bnx2x_napi_disable(struct bnx2x *bp)
6568{
6569 int i;
6570
555f6c78 6571 for_each_rx_queue(bp, i)
65abd74d
YG
6572 napi_disable(&bnx2x_fp(bp, i, napi));
6573}
6574
6575static void bnx2x_netif_start(struct bnx2x *bp)
6576{
6577 if (atomic_dec_and_test(&bp->intr_sem)) {
6578 if (netif_running(bp->dev)) {
65abd74d
YG
6579 bnx2x_napi_enable(bp);
6580 bnx2x_int_enable(bp);
555f6c78
EG
6581 if (bp->state == BNX2X_STATE_OPEN)
6582 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6583 }
6584 }
6585}
6586
f8ef6e44 6587static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6588{
f8ef6e44 6589 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6590 bnx2x_napi_disable(bp);
65abd74d 6591 if (netif_running(bp->dev)) {
65abd74d
YG
6592 netif_tx_disable(bp->dev);
6593 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6594 }
6595}
6596
a2fbb9ea
ET
6597/*
6598 * Init service functions
6599 */
6600
3101c2bc 6601static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6602{
6603 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6604 int port = BP_PORT(bp);
a2fbb9ea
ET
6605
6606 /* CAM allocation
6607 * unicasts 0-31:port0 32-63:port1
6608 * multicast 64-127:port0 128-191:port1
6609 */
8d9c5f34 6610 config->hdr.length = 2;
af246401 6611 config->hdr.offset = port ? 32 : 0;
0626b899 6612 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6613 config->hdr.reserved1 = 0;
6614
6615 /* primary MAC */
6616 config->config_table[0].cam_entry.msb_mac_addr =
6617 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6618 config->config_table[0].cam_entry.middle_mac_addr =
6619 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6620 config->config_table[0].cam_entry.lsb_mac_addr =
6621 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6622 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6623 if (set)
6624 config->config_table[0].target_table_entry.flags = 0;
6625 else
6626 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6627 config->config_table[0].target_table_entry.client_id = 0;
6628 config->config_table[0].target_table_entry.vlan_id = 0;
6629
3101c2bc
YG
6630 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6631 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6632 config->config_table[0].cam_entry.msb_mac_addr,
6633 config->config_table[0].cam_entry.middle_mac_addr,
6634 config->config_table[0].cam_entry.lsb_mac_addr);
6635
6636 /* broadcast */
4781bfad
EG
6637 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6638 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6639 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6640 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6641 if (set)
6642 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6643 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6644 else
6645 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6646 config->config_table[1].target_table_entry.client_id = 0;
6647 config->config_table[1].target_table_entry.vlan_id = 0;
6648
6649 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6650 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6651 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6652}
6653
3101c2bc 6654static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6655{
6656 struct mac_configuration_cmd_e1h *config =
6657 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6658
3101c2bc 6659 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6660 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6661 return;
6662 }
6663
6664 /* CAM allocation for E1H
6665 * unicasts: by func number
6666 * multicast: 20+FUNC*20, 20 each
6667 */
8d9c5f34 6668 config->hdr.length = 1;
34f80b04 6669 config->hdr.offset = BP_FUNC(bp);
0626b899 6670 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6671 config->hdr.reserved1 = 0;
6672
6673 /* primary MAC */
6674 config->config_table[0].msb_mac_addr =
6675 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6676 config->config_table[0].middle_mac_addr =
6677 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6678 config->config_table[0].lsb_mac_addr =
6679 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6680 config->config_table[0].client_id = BP_L_ID(bp);
6681 config->config_table[0].vlan_id = 0;
6682 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6683 if (set)
6684 config->config_table[0].flags = BP_PORT(bp);
6685 else
6686 config->config_table[0].flags =
6687 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6688
3101c2bc
YG
6689 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6690 (set ? "setting" : "clearing"),
34f80b04
EG
6691 config->config_table[0].msb_mac_addr,
6692 config->config_table[0].middle_mac_addr,
6693 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6694
6695 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6696 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6697 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6698}
6699
a2fbb9ea
ET
6700static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6701 int *state_p, int poll)
6702{
6703 /* can take a while if any port is running */
8b3a0f0b 6704 int cnt = 5000;
a2fbb9ea 6705
c14423fe
ET
6706 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6707 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6708
6709 might_sleep();
34f80b04 6710 while (cnt--) {
a2fbb9ea
ET
6711 if (poll) {
6712 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6713 /* if index is different from 0
6714 * the reply for some commands will
3101c2bc 6715 * be on the non default queue
a2fbb9ea
ET
6716 */
6717 if (idx)
6718 bnx2x_rx_int(&bp->fp[idx], 10);
6719 }
a2fbb9ea 6720
3101c2bc 6721 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6722 if (*state_p == state) {
6723#ifdef BNX2X_STOP_ON_ERROR
6724 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6725#endif
a2fbb9ea 6726 return 0;
8b3a0f0b 6727 }
a2fbb9ea 6728
a2fbb9ea 6729 msleep(1);
a2fbb9ea
ET
6730 }
6731
a2fbb9ea 6732 /* timeout! */
49d66772
ET
6733 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6734 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6735#ifdef BNX2X_STOP_ON_ERROR
6736 bnx2x_panic();
6737#endif
a2fbb9ea 6738
49d66772 6739 return -EBUSY;
a2fbb9ea
ET
6740}
6741
6742static int bnx2x_setup_leading(struct bnx2x *bp)
6743{
34f80b04 6744 int rc;
a2fbb9ea 6745
c14423fe 6746 /* reset IGU state */
34f80b04 6747 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6748
6749 /* SETUP ramrod */
6750 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6751
34f80b04
EG
6752 /* Wait for completion */
6753 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6754
34f80b04 6755 return rc;
a2fbb9ea
ET
6756}
6757
6758static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6759{
555f6c78
EG
6760 struct bnx2x_fastpath *fp = &bp->fp[index];
6761
a2fbb9ea 6762 /* reset IGU state */
555f6c78 6763 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6764
228241eb 6765 /* SETUP ramrod */
555f6c78
EG
6766 fp->state = BNX2X_FP_STATE_OPENING;
6767 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6768 fp->cl_id, 0);
a2fbb9ea
ET
6769
6770 /* Wait for completion */
6771 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6772 &(fp->state), 0);
a2fbb9ea
ET
6773}
6774
a2fbb9ea 6775static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6776
8badd27a 6777static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6778{
555f6c78 6779 int num_queues;
a2fbb9ea 6780
8badd27a
EG
6781 switch (int_mode) {
6782 case INT_MODE_INTx:
6783 case INT_MODE_MSI:
555f6c78
EG
6784 num_queues = 1;
6785 bp->num_rx_queues = num_queues;
6786 bp->num_tx_queues = num_queues;
6787 DP(NETIF_MSG_IFUP,
6788 "set number of queues to %d\n", num_queues);
8badd27a
EG
6789 break;
6790
6791 case INT_MODE_MSIX:
6792 default:
555f6c78
EG
6793 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6794 num_queues = min_t(u32, num_online_cpus(),
6795 BNX2X_MAX_QUEUES(bp));
34f80b04 6796 else
555f6c78
EG
6797 num_queues = 1;
6798 bp->num_rx_queues = num_queues;
6799 bp->num_tx_queues = num_queues;
6800 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6801 " number of tx queues to %d\n",
6802 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6803 /* if we can't use MSI-X we only need one fp,
6804 * so try to enable MSI-X with the requested number of fp's
6805 * and fallback to MSI or legacy INTx with one fp
6806 */
8badd27a 6807 if (bnx2x_enable_msix(bp)) {
34f80b04 6808 /* failed to enable MSI-X */
555f6c78
EG
6809 num_queues = 1;
6810 bp->num_rx_queues = num_queues;
6811 bp->num_tx_queues = num_queues;
6812 if (bp->multi_mode)
6813 BNX2X_ERR("Multi requested but failed to "
6814 "enable MSI-X set number of "
6815 "queues to %d\n", num_queues);
a2fbb9ea 6816 }
8badd27a 6817 break;
a2fbb9ea 6818 }
555f6c78 6819 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6820}
6821
6822static void bnx2x_set_rx_mode(struct net_device *dev);
6823
6824/* must be called with rtnl_lock */
6825static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6826{
6827 u32 load_code;
6828 int i, rc = 0;
6829#ifdef BNX2X_STOP_ON_ERROR
6830 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6831 if (unlikely(bp->panic))
6832 return -EPERM;
6833#endif
6834
6835 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6836
6837 bnx2x_set_int_mode(bp);
c14423fe 6838
a2fbb9ea
ET
6839 if (bnx2x_alloc_mem(bp))
6840 return -ENOMEM;
6841
555f6c78 6842 for_each_rx_queue(bp, i)
7a9b2557
VZ
6843 bnx2x_fp(bp, i, disable_tpa) =
6844 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6845
555f6c78 6846 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6847 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6848 bnx2x_poll, 128);
6849
6850#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6851 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6852 struct bnx2x_fastpath *fp = &bp->fp[i];
6853
6854 fp->poll_no_work = 0;
6855 fp->poll_calls = 0;
6856 fp->poll_max_calls = 0;
6857 fp->poll_complete = 0;
6858 fp->poll_exit = 0;
6859 }
6860#endif
6861 bnx2x_napi_enable(bp);
6862
34f80b04
EG
6863 if (bp->flags & USING_MSIX_FLAG) {
6864 rc = bnx2x_req_msix_irqs(bp);
6865 if (rc) {
6866 pci_disable_msix(bp->pdev);
2dfe0e1f 6867 goto load_error1;
34f80b04
EG
6868 }
6869 } else {
8badd27a
EG
6870 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6871 bnx2x_enable_msi(bp);
34f80b04
EG
6872 bnx2x_ack_int(bp);
6873 rc = bnx2x_req_irq(bp);
6874 if (rc) {
2dfe0e1f 6875 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6876 if (bp->flags & USING_MSI_FLAG)
6877 pci_disable_msi(bp->pdev);
2dfe0e1f 6878 goto load_error1;
a2fbb9ea 6879 }
8badd27a
EG
6880 if (bp->flags & USING_MSI_FLAG) {
6881 bp->dev->irq = bp->pdev->irq;
6882 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6883 bp->dev->name, bp->pdev->irq);
6884 }
a2fbb9ea
ET
6885 }
6886
2dfe0e1f
EG
6887 /* Send LOAD_REQUEST command to MCP
6888 Returns the type of LOAD command:
6889 if it is the first port to be initialized
6890 common blocks should be initialized, otherwise - not
6891 */
6892 if (!BP_NOMCP(bp)) {
6893 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6894 if (!load_code) {
6895 BNX2X_ERR("MCP response failure, aborting\n");
6896 rc = -EBUSY;
6897 goto load_error2;
6898 }
6899 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6900 rc = -EBUSY; /* other port in diagnostic mode */
6901 goto load_error2;
6902 }
6903
6904 } else {
6905 int port = BP_PORT(bp);
6906
f5372251 6907 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6908 load_count[0], load_count[1], load_count[2]);
6909 load_count[0]++;
6910 load_count[1 + port]++;
f5372251 6911 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6912 load_count[0], load_count[1], load_count[2]);
6913 if (load_count[0] == 1)
6914 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6915 else if (load_count[1 + port] == 1)
6916 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6917 else
6918 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6919 }
6920
6921 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6922 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6923 bp->port.pmf = 1;
6924 else
6925 bp->port.pmf = 0;
6926 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6927
a2fbb9ea 6928 /* Initialize HW */
34f80b04
EG
6929 rc = bnx2x_init_hw(bp, load_code);
6930 if (rc) {
a2fbb9ea 6931 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6932 goto load_error2;
a2fbb9ea
ET
6933 }
6934
a2fbb9ea 6935 /* Setup NIC internals and enable interrupts */
471de716 6936 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6937
6938 /* Send LOAD_DONE command to MCP */
34f80b04 6939 if (!BP_NOMCP(bp)) {
228241eb
ET
6940 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6941 if (!load_code) {
da5a662a 6942 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6943 rc = -EBUSY;
2dfe0e1f 6944 goto load_error3;
a2fbb9ea
ET
6945 }
6946 }
6947
6948 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6949
34f80b04
EG
6950 rc = bnx2x_setup_leading(bp);
6951 if (rc) {
da5a662a 6952 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6953 goto load_error3;
34f80b04 6954 }
a2fbb9ea 6955
34f80b04
EG
6956 if (CHIP_IS_E1H(bp))
6957 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 6958 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
6959 bp->state = BNX2X_STATE_DISABLED;
6960 }
a2fbb9ea 6961
34f80b04
EG
6962 if (bp->state == BNX2X_STATE_OPEN)
6963 for_each_nondefault_queue(bp, i) {
6964 rc = bnx2x_setup_multi(bp, i);
6965 if (rc)
2dfe0e1f 6966 goto load_error3;
34f80b04 6967 }
a2fbb9ea 6968
34f80b04 6969 if (CHIP_IS_E1(bp))
3101c2bc 6970 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6971 else
3101c2bc 6972 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6973
6974 if (bp->port.pmf)
b5bf9068 6975 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6976
6977 /* Start fast path */
34f80b04
EG
6978 switch (load_mode) {
6979 case LOAD_NORMAL:
6980 /* Tx queue should be only reenabled */
555f6c78 6981 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6982 /* Initialize the receive filter. */
34f80b04
EG
6983 bnx2x_set_rx_mode(bp->dev);
6984 break;
6985
6986 case LOAD_OPEN:
555f6c78 6987 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6988 /* Initialize the receive filter. */
34f80b04 6989 bnx2x_set_rx_mode(bp->dev);
34f80b04 6990 break;
a2fbb9ea 6991
34f80b04 6992 case LOAD_DIAG:
2dfe0e1f 6993 /* Initialize the receive filter. */
a2fbb9ea 6994 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6995 bp->state = BNX2X_STATE_DIAG;
6996 break;
6997
6998 default:
6999 break;
a2fbb9ea
ET
7000 }
7001
34f80b04
EG
7002 if (!bp->port.pmf)
7003 bnx2x__link_status_update(bp);
7004
a2fbb9ea
ET
7005 /* start the timer */
7006 mod_timer(&bp->timer, jiffies + bp->current_interval);
7007
34f80b04 7008
a2fbb9ea
ET
7009 return 0;
7010
2dfe0e1f
EG
7011load_error3:
7012 bnx2x_int_disable_sync(bp, 1);
7013 if (!BP_NOMCP(bp)) {
7014 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7015 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7016 }
7017 bp->port.pmf = 0;
7a9b2557
VZ
7018 /* Free SKBs, SGEs, TPA pool and driver internals */
7019 bnx2x_free_skbs(bp);
555f6c78 7020 for_each_rx_queue(bp, i)
3196a88a 7021 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7022load_error2:
d1014634
YG
7023 /* Release IRQs */
7024 bnx2x_free_irq(bp);
2dfe0e1f
EG
7025load_error1:
7026 bnx2x_napi_disable(bp);
555f6c78 7027 for_each_rx_queue(bp, i)
7cde1c8b 7028 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7029 bnx2x_free_mem(bp);
7030
34f80b04 7031 return rc;
a2fbb9ea
ET
7032}
7033
7034static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7035{
555f6c78 7036 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7037 int rc;
7038
c14423fe 7039 /* halt the connection */
555f6c78
EG
7040 fp->state = BNX2X_FP_STATE_HALTING;
7041 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7042
34f80b04 7043 /* Wait for completion */
a2fbb9ea 7044 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7045 &(fp->state), 1);
c14423fe 7046 if (rc) /* timeout */
a2fbb9ea
ET
7047 return rc;
7048
7049 /* delete cfc entry */
7050 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7051
34f80b04
EG
7052 /* Wait for completion */
7053 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7054 &(fp->state), 1);
34f80b04 7055 return rc;
a2fbb9ea
ET
7056}
7057
da5a662a 7058static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7059{
4781bfad 7060 __le16 dsb_sp_prod_idx;
c14423fe 7061 /* if the other port is handling traffic,
a2fbb9ea 7062 this can take a lot of time */
34f80b04
EG
7063 int cnt = 500;
7064 int rc;
a2fbb9ea
ET
7065
7066 might_sleep();
7067
7068 /* Send HALT ramrod */
7069 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7070 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7071
34f80b04
EG
7072 /* Wait for completion */
7073 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7074 &(bp->fp[0].state), 1);
7075 if (rc) /* timeout */
da5a662a 7076 return rc;
a2fbb9ea 7077
49d66772 7078 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7079
228241eb 7080 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7081 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7082
49d66772 7083 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7084 we are going to reset the chip anyway
7085 so there is not much to do if this times out
7086 */
34f80b04 7087 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7088 if (!cnt) {
7089 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7090 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7091 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7092#ifdef BNX2X_STOP_ON_ERROR
7093 bnx2x_panic();
7094#endif
36e552ab 7095 rc = -EBUSY;
34f80b04
EG
7096 break;
7097 }
7098 cnt--;
da5a662a 7099 msleep(1);
5650d9d4 7100 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7101 }
7102 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7103 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7104
7105 return rc;
a2fbb9ea
ET
7106}
7107
34f80b04
EG
7108static void bnx2x_reset_func(struct bnx2x *bp)
7109{
7110 int port = BP_PORT(bp);
7111 int func = BP_FUNC(bp);
7112 int base, i;
7113
7114 /* Configure IGU */
7115 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7116 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7117
34f80b04
EG
7118 /* Clear ILT */
7119 base = FUNC_ILT_BASE(func);
7120 for (i = base; i < base + ILT_PER_FUNC; i++)
7121 bnx2x_ilt_wr(bp, i, 0);
7122}
7123
7124static void bnx2x_reset_port(struct bnx2x *bp)
7125{
7126 int port = BP_PORT(bp);
7127 u32 val;
7128
7129 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7130
7131 /* Do not rcv packets to BRB */
7132 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7133 /* Do not direct rcv packets that are not for MCP to the BRB */
7134 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7135 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7136
7137 /* Configure AEU */
7138 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7139
7140 msleep(100);
7141 /* Check for BRB port occupancy */
7142 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7143 if (val)
7144 DP(NETIF_MSG_IFDOWN,
33471629 7145 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7146
7147 /* TODO: Close Doorbell port? */
7148}
7149
34f80b04
EG
7150static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7151{
7152 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7153 BP_FUNC(bp), reset_code);
7154
7155 switch (reset_code) {
7156 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7157 bnx2x_reset_port(bp);
7158 bnx2x_reset_func(bp);
7159 bnx2x_reset_common(bp);
7160 break;
7161
7162 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7163 bnx2x_reset_port(bp);
7164 bnx2x_reset_func(bp);
7165 break;
7166
7167 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7168 bnx2x_reset_func(bp);
7169 break;
49d66772 7170
34f80b04
EG
7171 default:
7172 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7173 break;
7174 }
7175}
7176
33471629 7177/* must be called with rtnl_lock */
34f80b04 7178static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7179{
da5a662a 7180 int port = BP_PORT(bp);
a2fbb9ea 7181 u32 reset_code = 0;
da5a662a 7182 int i, cnt, rc;
a2fbb9ea
ET
7183
7184 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7185
228241eb
ET
7186 bp->rx_mode = BNX2X_RX_MODE_NONE;
7187 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7188
f8ef6e44 7189 bnx2x_netif_stop(bp, 1);
e94d8af3 7190
34f80b04
EG
7191 del_timer_sync(&bp->timer);
7192 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7193 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7194 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7195
70b9986c
EG
7196 /* Release IRQs */
7197 bnx2x_free_irq(bp);
7198
555f6c78
EG
7199 /* Wait until tx fastpath tasks complete */
7200 for_each_tx_queue(bp, i) {
228241eb
ET
7201 struct bnx2x_fastpath *fp = &bp->fp[i];
7202
34f80b04 7203 cnt = 1000;
3e5b510e 7204 smp_mb();
e8b5fc51 7205 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7206
65abd74d 7207 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7208 if (!cnt) {
7209 BNX2X_ERR("timeout waiting for queue[%d]\n",
7210 i);
7211#ifdef BNX2X_STOP_ON_ERROR
7212 bnx2x_panic();
7213 return -EBUSY;
7214#else
7215 break;
7216#endif
7217 }
7218 cnt--;
da5a662a 7219 msleep(1);
3e5b510e 7220 smp_mb();
34f80b04 7221 }
228241eb 7222 }
da5a662a
VZ
7223 /* Give HW time to discard old tx messages */
7224 msleep(1);
a2fbb9ea 7225
3101c2bc
YG
7226 if (CHIP_IS_E1(bp)) {
7227 struct mac_configuration_cmd *config =
7228 bnx2x_sp(bp, mcast_config);
7229
7230 bnx2x_set_mac_addr_e1(bp, 0);
7231
8d9c5f34 7232 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7233 CAM_INVALIDATE(config->config_table[i]);
7234
8d9c5f34 7235 config->hdr.length = i;
3101c2bc
YG
7236 if (CHIP_REV_IS_SLOW(bp))
7237 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7238 else
7239 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7240 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7241 config->hdr.reserved1 = 0;
7242
7243 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7244 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7245 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7246
7247 } else { /* E1H */
65abd74d
YG
7248 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7249
3101c2bc
YG
7250 bnx2x_set_mac_addr_e1h(bp, 0);
7251
7252 for (i = 0; i < MC_HASH_SIZE; i++)
7253 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7254 }
7255
65abd74d
YG
7256 if (unload_mode == UNLOAD_NORMAL)
7257 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7258
7259 else if (bp->flags & NO_WOL_FLAG) {
7260 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7261 if (CHIP_IS_E1H(bp))
7262 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7263
7264 } else if (bp->wol) {
7265 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7266 u8 *mac_addr = bp->dev->dev_addr;
7267 u32 val;
7268 /* The mac address is written to entries 1-4 to
7269 preserve entry 0 which is used by the PMF */
7270 u8 entry = (BP_E1HVN(bp) + 1)*8;
7271
7272 val = (mac_addr[0] << 8) | mac_addr[1];
7273 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7274
7275 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7276 (mac_addr[4] << 8) | mac_addr[5];
7277 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7278
7279 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7280
7281 } else
7282 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7283
34f80b04
EG
7284 /* Close multi and leading connections
7285 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7286 for_each_nondefault_queue(bp, i)
7287 if (bnx2x_stop_multi(bp, i))
228241eb 7288 goto unload_error;
a2fbb9ea 7289
da5a662a
VZ
7290 rc = bnx2x_stop_leading(bp);
7291 if (rc) {
34f80b04 7292 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7293#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7294 return -EBUSY;
da5a662a
VZ
7295#else
7296 goto unload_error;
34f80b04 7297#endif
228241eb
ET
7298 }
7299
7300unload_error:
34f80b04 7301 if (!BP_NOMCP(bp))
228241eb 7302 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7303 else {
f5372251 7304 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7305 load_count[0], load_count[1], load_count[2]);
7306 load_count[0]--;
da5a662a 7307 load_count[1 + port]--;
f5372251 7308 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7309 load_count[0], load_count[1], load_count[2]);
7310 if (load_count[0] == 0)
7311 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7312 else if (load_count[1 + port] == 0)
34f80b04
EG
7313 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7314 else
7315 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7316 }
a2fbb9ea 7317
34f80b04
EG
7318 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7319 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7320 bnx2x__link_reset(bp);
a2fbb9ea
ET
7321
7322 /* Reset the chip */
228241eb 7323 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7324
7325 /* Report UNLOAD_DONE to MCP */
34f80b04 7326 if (!BP_NOMCP(bp))
a2fbb9ea 7327 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7328 bp->port.pmf = 0;
a2fbb9ea 7329
7a9b2557 7330 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7331 bnx2x_free_skbs(bp);
555f6c78 7332 for_each_rx_queue(bp, i)
3196a88a 7333 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7334 for_each_rx_queue(bp, i)
7cde1c8b 7335 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7336 bnx2x_free_mem(bp);
7337
7338 bp->state = BNX2X_STATE_CLOSED;
228241eb 7339
a2fbb9ea
ET
7340 netif_carrier_off(bp->dev);
7341
7342 return 0;
7343}
7344
34f80b04
EG
7345static void bnx2x_reset_task(struct work_struct *work)
7346{
7347 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7348
7349#ifdef BNX2X_STOP_ON_ERROR
7350 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7351 " so reset not done to allow debug dump,\n"
7352 KERN_ERR " you will need to reboot when done\n");
7353 return;
7354#endif
7355
7356 rtnl_lock();
7357
7358 if (!netif_running(bp->dev))
7359 goto reset_task_exit;
7360
7361 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7362 bnx2x_nic_load(bp, LOAD_NORMAL);
7363
7364reset_task_exit:
7365 rtnl_unlock();
7366}
7367
a2fbb9ea
ET
7368/* end of nic load/unload */
7369
7370/* ethtool_ops */
7371
7372/*
7373 * Init service functions
7374 */
7375
f1ef27ef
EG
7376static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7377{
7378 switch (func) {
7379 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7380 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7381 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7382 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7383 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7384 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7385 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7386 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7387 default:
7388 BNX2X_ERR("Unsupported function index: %d\n", func);
7389 return (u32)(-1);
7390 }
7391}
7392
7393static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7394{
7395 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7396
7397 /* Flush all outstanding writes */
7398 mmiowb();
7399
7400 /* Pretend to be function 0 */
7401 REG_WR(bp, reg, 0);
7402 /* Flush the GRC transaction (in the chip) */
7403 new_val = REG_RD(bp, reg);
7404 if (new_val != 0) {
7405 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7406 new_val);
7407 BUG();
7408 }
7409
7410 /* From now we are in the "like-E1" mode */
7411 bnx2x_int_disable(bp);
7412
7413 /* Flush all outstanding writes */
7414 mmiowb();
7415
7416 /* Restore the original funtion settings */
7417 REG_WR(bp, reg, orig_func);
7418 new_val = REG_RD(bp, reg);
7419 if (new_val != orig_func) {
7420 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7421 orig_func, new_val);
7422 BUG();
7423 }
7424}
7425
7426static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7427{
7428 if (CHIP_IS_E1H(bp))
7429 bnx2x_undi_int_disable_e1h(bp, func);
7430 else
7431 bnx2x_int_disable(bp);
7432}
7433
34f80b04
EG
7434static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7435{
7436 u32 val;
7437
7438 /* Check if there is any driver already loaded */
7439 val = REG_RD(bp, MISC_REG_UNPREPARED);
7440 if (val == 0x1) {
7441 /* Check if it is the UNDI driver
7442 * UNDI driver initializes CID offset for normal bell to 0x7
7443 */
4a37fb66 7444 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7445 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7446 if (val == 0x7) {
7447 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7448 /* save our func */
34f80b04 7449 int func = BP_FUNC(bp);
da5a662a
VZ
7450 u32 swap_en;
7451 u32 swap_val;
34f80b04 7452
b4661739
EG
7453 /* clear the UNDI indication */
7454 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7455
34f80b04
EG
7456 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7457
7458 /* try unload UNDI on port 0 */
7459 bp->func = 0;
da5a662a
VZ
7460 bp->fw_seq =
7461 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7462 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7463 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7464
7465 /* if UNDI is loaded on the other port */
7466 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7467
da5a662a
VZ
7468 /* send "DONE" for previous unload */
7469 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7470
7471 /* unload UNDI on port 1 */
34f80b04 7472 bp->func = 1;
da5a662a
VZ
7473 bp->fw_seq =
7474 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7475 DRV_MSG_SEQ_NUMBER_MASK);
7476 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7477
7478 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7479 }
7480
b4661739
EG
7481 /* now it's safe to release the lock */
7482 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7483
f1ef27ef 7484 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7485
7486 /* close input traffic and wait for it */
7487 /* Do not rcv packets to BRB */
7488 REG_WR(bp,
7489 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7490 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7491 /* Do not direct rcv packets that are not for MCP to
7492 * the BRB */
7493 REG_WR(bp,
7494 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7495 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7496 /* clear AEU */
7497 REG_WR(bp,
7498 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7499 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7500 msleep(10);
7501
7502 /* save NIG port swap info */
7503 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7504 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7505 /* reset device */
7506 REG_WR(bp,
7507 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7508 0xd3ffffff);
34f80b04
EG
7509 REG_WR(bp,
7510 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7511 0x1403);
da5a662a
VZ
7512 /* take the NIG out of reset and restore swap values */
7513 REG_WR(bp,
7514 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7515 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7516 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7517 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7518
7519 /* send unload done to the MCP */
7520 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7521
7522 /* restore our func and fw_seq */
7523 bp->func = func;
7524 bp->fw_seq =
7525 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7526 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7527
7528 } else
7529 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7530 }
7531}
7532
7533static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7534{
7535 u32 val, val2, val3, val4, id;
72ce58c3 7536 u16 pmc;
34f80b04
EG
7537
7538 /* Get the chip revision id and number. */
7539 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7540 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7541 id = ((val & 0xffff) << 16);
7542 val = REG_RD(bp, MISC_REG_CHIP_REV);
7543 id |= ((val & 0xf) << 12);
7544 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7545 id |= ((val & 0xff) << 4);
5a40e08e 7546 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7547 id |= (val & 0xf);
7548 bp->common.chip_id = id;
7549 bp->link_params.chip_id = bp->common.chip_id;
7550 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7551
1c06328c
EG
7552 val = (REG_RD(bp, 0x2874) & 0x55);
7553 if ((bp->common.chip_id & 0x1) ||
7554 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7555 bp->flags |= ONE_PORT_FLAG;
7556 BNX2X_DEV_INFO("single port device\n");
7557 }
7558
34f80b04
EG
7559 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7560 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7561 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7562 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7563 bp->common.flash_size, bp->common.flash_size);
7564
7565 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7566 bp->link_params.shmem_base = bp->common.shmem_base;
7567 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7568
7569 if (!bp->common.shmem_base ||
7570 (bp->common.shmem_base < 0xA0000) ||
7571 (bp->common.shmem_base >= 0xC0000)) {
7572 BNX2X_DEV_INFO("MCP not active\n");
7573 bp->flags |= NO_MCP_FLAG;
7574 return;
7575 }
7576
7577 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7578 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7579 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7580 BNX2X_ERR("BAD MCP validity signature\n");
7581
7582 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7583 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7584
7585 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7586 SHARED_HW_CFG_LED_MODE_MASK) >>
7587 SHARED_HW_CFG_LED_MODE_SHIFT);
7588
c2c8b03e
EG
7589 bp->link_params.feature_config_flags = 0;
7590 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7591 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7592 bp->link_params.feature_config_flags |=
7593 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7594 else
7595 bp->link_params.feature_config_flags &=
7596 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7597
34f80b04
EG
7598 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7599 bp->common.bc_ver = val;
7600 BNX2X_DEV_INFO("bc_ver %X\n", val);
7601 if (val < BNX2X_BC_VER) {
7602 /* for now only warn
7603 * later we might need to enforce this */
7604 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7605 " please upgrade BC\n", BNX2X_BC_VER, val);
7606 }
72ce58c3
EG
7607
7608 if (BP_E1HVN(bp) == 0) {
7609 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7610 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7611 } else {
7612 /* no WOL capability for E1HVN != 0 */
7613 bp->flags |= NO_WOL_FLAG;
7614 }
7615 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7616 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7617
7618 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7619 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7620 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7621 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7622
7623 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7624 val, val2, val3, val4);
7625}
7626
7627static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7628 u32 switch_cfg)
a2fbb9ea 7629{
34f80b04 7630 int port = BP_PORT(bp);
a2fbb9ea
ET
7631 u32 ext_phy_type;
7632
a2fbb9ea
ET
7633 switch (switch_cfg) {
7634 case SWITCH_CFG_1G:
7635 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7636
c18487ee
YR
7637 ext_phy_type =
7638 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7639 switch (ext_phy_type) {
7640 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7641 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7642 ext_phy_type);
7643
34f80b04
EG
7644 bp->port.supported |= (SUPPORTED_10baseT_Half |
7645 SUPPORTED_10baseT_Full |
7646 SUPPORTED_100baseT_Half |
7647 SUPPORTED_100baseT_Full |
7648 SUPPORTED_1000baseT_Full |
7649 SUPPORTED_2500baseX_Full |
7650 SUPPORTED_TP |
7651 SUPPORTED_FIBRE |
7652 SUPPORTED_Autoneg |
7653 SUPPORTED_Pause |
7654 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7655 break;
7656
7657 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7658 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7659 ext_phy_type);
7660
34f80b04
EG
7661 bp->port.supported |= (SUPPORTED_10baseT_Half |
7662 SUPPORTED_10baseT_Full |
7663 SUPPORTED_100baseT_Half |
7664 SUPPORTED_100baseT_Full |
7665 SUPPORTED_1000baseT_Full |
7666 SUPPORTED_TP |
7667 SUPPORTED_FIBRE |
7668 SUPPORTED_Autoneg |
7669 SUPPORTED_Pause |
7670 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7671 break;
7672
7673 default:
7674 BNX2X_ERR("NVRAM config error. "
7675 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7676 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7677 return;
7678 }
7679
34f80b04
EG
7680 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7681 port*0x10);
7682 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7683 break;
7684
7685 case SWITCH_CFG_10G:
7686 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7687
c18487ee
YR
7688 ext_phy_type =
7689 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7690 switch (ext_phy_type) {
7691 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7692 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7693 ext_phy_type);
7694
34f80b04
EG
7695 bp->port.supported |= (SUPPORTED_10baseT_Half |
7696 SUPPORTED_10baseT_Full |
7697 SUPPORTED_100baseT_Half |
7698 SUPPORTED_100baseT_Full |
7699 SUPPORTED_1000baseT_Full |
7700 SUPPORTED_2500baseX_Full |
7701 SUPPORTED_10000baseT_Full |
7702 SUPPORTED_TP |
7703 SUPPORTED_FIBRE |
7704 SUPPORTED_Autoneg |
7705 SUPPORTED_Pause |
7706 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7707 break;
7708
589abe3a
EG
7709 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7710 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7711 ext_phy_type);
f1410647 7712
34f80b04 7713 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7714 SUPPORTED_1000baseT_Full |
34f80b04 7715 SUPPORTED_FIBRE |
589abe3a 7716 SUPPORTED_Autoneg |
34f80b04
EG
7717 SUPPORTED_Pause |
7718 SUPPORTED_Asym_Pause);
f1410647
ET
7719 break;
7720
589abe3a
EG
7721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7722 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7723 ext_phy_type);
7724
34f80b04 7725 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7726 SUPPORTED_2500baseX_Full |
34f80b04 7727 SUPPORTED_1000baseT_Full |
589abe3a
EG
7728 SUPPORTED_FIBRE |
7729 SUPPORTED_Autoneg |
7730 SUPPORTED_Pause |
7731 SUPPORTED_Asym_Pause);
7732 break;
7733
7734 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7735 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7736 ext_phy_type);
7737
7738 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7739 SUPPORTED_FIBRE |
7740 SUPPORTED_Pause |
7741 SUPPORTED_Asym_Pause);
f1410647
ET
7742 break;
7743
589abe3a
EG
7744 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7745 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7746 ext_phy_type);
7747
34f80b04
EG
7748 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7749 SUPPORTED_1000baseT_Full |
7750 SUPPORTED_FIBRE |
34f80b04
EG
7751 SUPPORTED_Pause |
7752 SUPPORTED_Asym_Pause);
f1410647
ET
7753 break;
7754
589abe3a
EG
7755 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7756 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7757 ext_phy_type);
7758
34f80b04 7759 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7760 SUPPORTED_1000baseT_Full |
34f80b04 7761 SUPPORTED_Autoneg |
589abe3a 7762 SUPPORTED_FIBRE |
34f80b04
EG
7763 SUPPORTED_Pause |
7764 SUPPORTED_Asym_Pause);
c18487ee
YR
7765 break;
7766
f1410647
ET
7767 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7768 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7769 ext_phy_type);
7770
34f80b04
EG
7771 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7772 SUPPORTED_TP |
7773 SUPPORTED_Autoneg |
7774 SUPPORTED_Pause |
7775 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7776 break;
7777
28577185
EG
7778 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7779 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7780 ext_phy_type);
7781
7782 bp->port.supported |= (SUPPORTED_10baseT_Half |
7783 SUPPORTED_10baseT_Full |
7784 SUPPORTED_100baseT_Half |
7785 SUPPORTED_100baseT_Full |
7786 SUPPORTED_1000baseT_Full |
7787 SUPPORTED_10000baseT_Full |
7788 SUPPORTED_TP |
7789 SUPPORTED_Autoneg |
7790 SUPPORTED_Pause |
7791 SUPPORTED_Asym_Pause);
7792 break;
7793
c18487ee
YR
7794 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7795 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7796 bp->link_params.ext_phy_config);
7797 break;
7798
a2fbb9ea
ET
7799 default:
7800 BNX2X_ERR("NVRAM config error. "
7801 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7802 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7803 return;
7804 }
7805
34f80b04
EG
7806 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7807 port*0x18);
7808 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7809
a2fbb9ea
ET
7810 break;
7811
7812 default:
7813 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7814 bp->port.link_config);
a2fbb9ea
ET
7815 return;
7816 }
34f80b04 7817 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7818
7819 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7820 if (!(bp->link_params.speed_cap_mask &
7821 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7822 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7823
c18487ee
YR
7824 if (!(bp->link_params.speed_cap_mask &
7825 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7826 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7827
c18487ee
YR
7828 if (!(bp->link_params.speed_cap_mask &
7829 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7830 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7831
c18487ee
YR
7832 if (!(bp->link_params.speed_cap_mask &
7833 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7834 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7835
c18487ee
YR
7836 if (!(bp->link_params.speed_cap_mask &
7837 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7838 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7839 SUPPORTED_1000baseT_Full);
a2fbb9ea 7840
c18487ee
YR
7841 if (!(bp->link_params.speed_cap_mask &
7842 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7843 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7844
c18487ee
YR
7845 if (!(bp->link_params.speed_cap_mask &
7846 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7847 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7848
34f80b04 7849 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7850}
7851
34f80b04 7852static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7853{
c18487ee 7854 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7855
34f80b04 7856 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7857 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7858 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7859 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7860 bp->port.advertising = bp->port.supported;
a2fbb9ea 7861 } else {
c18487ee
YR
7862 u32 ext_phy_type =
7863 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7864
7865 if ((ext_phy_type ==
7866 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7867 (ext_phy_type ==
7868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7869 /* force 10G, no AN */
c18487ee 7870 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7871 bp->port.advertising =
a2fbb9ea
ET
7872 (ADVERTISED_10000baseT_Full |
7873 ADVERTISED_FIBRE);
7874 break;
7875 }
7876 BNX2X_ERR("NVRAM config error. "
7877 "Invalid link_config 0x%x"
7878 " Autoneg not supported\n",
34f80b04 7879 bp->port.link_config);
a2fbb9ea
ET
7880 return;
7881 }
7882 break;
7883
7884 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7885 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7886 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7887 bp->port.advertising = (ADVERTISED_10baseT_Full |
7888 ADVERTISED_TP);
a2fbb9ea
ET
7889 } else {
7890 BNX2X_ERR("NVRAM config error. "
7891 "Invalid link_config 0x%x"
7892 " speed_cap_mask 0x%x\n",
34f80b04 7893 bp->port.link_config,
c18487ee 7894 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7895 return;
7896 }
7897 break;
7898
7899 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7900 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7901 bp->link_params.req_line_speed = SPEED_10;
7902 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7903 bp->port.advertising = (ADVERTISED_10baseT_Half |
7904 ADVERTISED_TP);
a2fbb9ea
ET
7905 } else {
7906 BNX2X_ERR("NVRAM config error. "
7907 "Invalid link_config 0x%x"
7908 " speed_cap_mask 0x%x\n",
34f80b04 7909 bp->port.link_config,
c18487ee 7910 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7911 return;
7912 }
7913 break;
7914
7915 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7916 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7917 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7918 bp->port.advertising = (ADVERTISED_100baseT_Full |
7919 ADVERTISED_TP);
a2fbb9ea
ET
7920 } else {
7921 BNX2X_ERR("NVRAM config error. "
7922 "Invalid link_config 0x%x"
7923 " speed_cap_mask 0x%x\n",
34f80b04 7924 bp->port.link_config,
c18487ee 7925 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7926 return;
7927 }
7928 break;
7929
7930 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7931 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7932 bp->link_params.req_line_speed = SPEED_100;
7933 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7934 bp->port.advertising = (ADVERTISED_100baseT_Half |
7935 ADVERTISED_TP);
a2fbb9ea
ET
7936 } else {
7937 BNX2X_ERR("NVRAM config error. "
7938 "Invalid link_config 0x%x"
7939 " speed_cap_mask 0x%x\n",
34f80b04 7940 bp->port.link_config,
c18487ee 7941 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7942 return;
7943 }
7944 break;
7945
7946 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7947 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7948 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7949 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7950 ADVERTISED_TP);
a2fbb9ea
ET
7951 } else {
7952 BNX2X_ERR("NVRAM config error. "
7953 "Invalid link_config 0x%x"
7954 " speed_cap_mask 0x%x\n",
34f80b04 7955 bp->port.link_config,
c18487ee 7956 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7957 return;
7958 }
7959 break;
7960
7961 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7962 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7963 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7964 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7965 ADVERTISED_TP);
a2fbb9ea
ET
7966 } else {
7967 BNX2X_ERR("NVRAM config error. "
7968 "Invalid link_config 0x%x"
7969 " speed_cap_mask 0x%x\n",
34f80b04 7970 bp->port.link_config,
c18487ee 7971 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7972 return;
7973 }
7974 break;
7975
7976 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7977 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7978 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7979 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7980 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7981 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7982 ADVERTISED_FIBRE);
a2fbb9ea
ET
7983 } else {
7984 BNX2X_ERR("NVRAM config error. "
7985 "Invalid link_config 0x%x"
7986 " speed_cap_mask 0x%x\n",
34f80b04 7987 bp->port.link_config,
c18487ee 7988 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7989 return;
7990 }
7991 break;
7992
7993 default:
7994 BNX2X_ERR("NVRAM config error. "
7995 "BAD link speed link_config 0x%x\n",
34f80b04 7996 bp->port.link_config);
c18487ee 7997 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7998 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7999 break;
8000 }
a2fbb9ea 8001
34f80b04
EG
8002 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8003 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8004 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8005 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8006 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8007
c18487ee 8008 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8009 " advertising 0x%x\n",
c18487ee
YR
8010 bp->link_params.req_line_speed,
8011 bp->link_params.req_duplex,
34f80b04 8012 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8013}
8014
34f80b04 8015static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8016{
34f80b04
EG
8017 int port = BP_PORT(bp);
8018 u32 val, val2;
589abe3a 8019 u32 config;
c2c8b03e 8020 u16 i;
a2fbb9ea 8021
c18487ee 8022 bp->link_params.bp = bp;
34f80b04 8023 bp->link_params.port = port;
c18487ee 8024
c18487ee 8025 bp->link_params.lane_config =
a2fbb9ea 8026 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8027 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8028 SHMEM_RD(bp,
8029 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8030 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8031 SHMEM_RD(bp,
8032 dev_info.port_hw_config[port].speed_capability_mask);
8033
34f80b04 8034 bp->port.link_config =
a2fbb9ea
ET
8035 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8036
c2c8b03e
EG
8037 /* Get the 4 lanes xgxs config rx and tx */
8038 for (i = 0; i < 2; i++) {
8039 val = SHMEM_RD(bp,
8040 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8041 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8042 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8043
8044 val = SHMEM_RD(bp,
8045 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8046 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8047 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8048 }
8049
589abe3a
EG
8050 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8051 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8052 bp->link_params.feature_config_flags |=
8053 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8054 else
8055 bp->link_params.feature_config_flags &=
8056 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8057
3ce2c3f9
EG
8058 /* If the device is capable of WoL, set the default state according
8059 * to the HW
8060 */
8061 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8062 (config & PORT_FEATURE_WOL_ENABLED));
8063
c2c8b03e
EG
8064 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8065 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8066 bp->link_params.lane_config,
8067 bp->link_params.ext_phy_config,
34f80b04 8068 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8069
34f80b04 8070 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8071 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8072 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8073
8074 bnx2x_link_settings_requested(bp);
8075
8076 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8077 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8078 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8079 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8080 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8081 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8082 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8083 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8084 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8085 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8086}
8087
8088static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8089{
8090 int func = BP_FUNC(bp);
8091 u32 val, val2;
8092 int rc = 0;
a2fbb9ea 8093
34f80b04 8094 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8095
34f80b04
EG
8096 bp->e1hov = 0;
8097 bp->e1hmf = 0;
8098 if (CHIP_IS_E1H(bp)) {
8099 bp->mf_config =
8100 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8101
3196a88a
EG
8102 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8103 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8104 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8105
34f80b04
EG
8106 bp->e1hov = val;
8107 bp->e1hmf = 1;
8108 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8109 "(0x%04x)\n",
8110 func, bp->e1hov, bp->e1hov);
8111 } else {
f5372251 8112 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8113 if (BP_E1HVN(bp)) {
8114 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8115 " aborting\n", func);
8116 rc = -EPERM;
8117 }
8118 }
8119 }
a2fbb9ea 8120
34f80b04
EG
8121 if (!BP_NOMCP(bp)) {
8122 bnx2x_get_port_hwinfo(bp);
8123
8124 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8125 DRV_MSG_SEQ_NUMBER_MASK);
8126 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8127 }
8128
8129 if (IS_E1HMF(bp)) {
8130 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8131 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8132 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8133 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8134 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8135 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8136 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8137 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8138 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8139 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8140 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8141 ETH_ALEN);
8142 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8143 ETH_ALEN);
a2fbb9ea 8144 }
34f80b04
EG
8145
8146 return rc;
a2fbb9ea
ET
8147 }
8148
34f80b04
EG
8149 if (BP_NOMCP(bp)) {
8150 /* only supposed to happen on emulation/FPGA */
33471629 8151 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8152 random_ether_addr(bp->dev->dev_addr);
8153 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8154 }
a2fbb9ea 8155
34f80b04
EG
8156 return rc;
8157}
8158
8159static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8160{
8161 int func = BP_FUNC(bp);
87942b46 8162 int timer_interval;
34f80b04
EG
8163 int rc;
8164
da5a662a
VZ
8165 /* Disable interrupt handling until HW is initialized */
8166 atomic_set(&bp->intr_sem, 1);
8167
34f80b04 8168 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8169
1cf167f2 8170 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8171 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8172
8173 rc = bnx2x_get_hwinfo(bp);
8174
8175 /* need to reset chip if undi was active */
8176 if (!BP_NOMCP(bp))
8177 bnx2x_undi_unload(bp);
8178
8179 if (CHIP_REV_IS_FPGA(bp))
8180 printk(KERN_ERR PFX "FPGA detected\n");
8181
8182 if (BP_NOMCP(bp) && (func == 0))
8183 printk(KERN_ERR PFX
8184 "MCP disabled, must load devices in order!\n");
8185
555f6c78 8186 /* Set multi queue mode */
8badd27a
EG
8187 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8188 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8189 printk(KERN_ERR PFX
8badd27a 8190 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8191 multi_mode = ETH_RSS_MODE_DISABLED;
8192 }
8193 bp->multi_mode = multi_mode;
8194
8195
7a9b2557
VZ
8196 /* Set TPA flags */
8197 if (disable_tpa) {
8198 bp->flags &= ~TPA_ENABLE_FLAG;
8199 bp->dev->features &= ~NETIF_F_LRO;
8200 } else {
8201 bp->flags |= TPA_ENABLE_FLAG;
8202 bp->dev->features |= NETIF_F_LRO;
8203 }
8204
8d5726c4 8205 bp->mrrs = mrrs;
7a9b2557 8206
34f80b04
EG
8207 bp->tx_ring_size = MAX_TX_AVAIL;
8208 bp->rx_ring_size = MAX_RX_AVAIL;
8209
8210 bp->rx_csum = 1;
34f80b04
EG
8211
8212 bp->tx_ticks = 50;
8213 bp->rx_ticks = 25;
8214
87942b46
EG
8215 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8216 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8217
8218 init_timer(&bp->timer);
8219 bp->timer.expires = jiffies + bp->current_interval;
8220 bp->timer.data = (unsigned long) bp;
8221 bp->timer.function = bnx2x_timer;
8222
8223 return rc;
a2fbb9ea
ET
8224}
8225
8226/*
8227 * ethtool service functions
8228 */
8229
8230/* All ethtool functions called with rtnl_lock */
8231
8232static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8233{
8234 struct bnx2x *bp = netdev_priv(dev);
8235
34f80b04
EG
8236 cmd->supported = bp->port.supported;
8237 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8238
8239 if (netif_carrier_ok(dev)) {
c18487ee
YR
8240 cmd->speed = bp->link_vars.line_speed;
8241 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8242 } else {
c18487ee
YR
8243 cmd->speed = bp->link_params.req_line_speed;
8244 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8245 }
34f80b04
EG
8246 if (IS_E1HMF(bp)) {
8247 u16 vn_max_rate;
8248
8249 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8250 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8251 if (vn_max_rate < cmd->speed)
8252 cmd->speed = vn_max_rate;
8253 }
a2fbb9ea 8254
c18487ee
YR
8255 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8256 u32 ext_phy_type =
8257 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8258
8259 switch (ext_phy_type) {
8260 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8261 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8262 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8263 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8264 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8265 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8266 cmd->port = PORT_FIBRE;
8267 break;
8268
8269 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8271 cmd->port = PORT_TP;
8272 break;
8273
c18487ee
YR
8274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8275 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8276 bp->link_params.ext_phy_config);
8277 break;
8278
f1410647
ET
8279 default:
8280 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8281 bp->link_params.ext_phy_config);
8282 break;
f1410647
ET
8283 }
8284 } else
a2fbb9ea 8285 cmd->port = PORT_TP;
a2fbb9ea 8286
34f80b04 8287 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8288 cmd->transceiver = XCVR_INTERNAL;
8289
c18487ee 8290 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8291 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8292 else
a2fbb9ea 8293 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8294
8295 cmd->maxtxpkt = 0;
8296 cmd->maxrxpkt = 0;
8297
8298 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8299 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8300 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8301 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8302 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8303 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8304 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8305
8306 return 0;
8307}
8308
8309static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8310{
8311 struct bnx2x *bp = netdev_priv(dev);
8312 u32 advertising;
8313
34f80b04
EG
8314 if (IS_E1HMF(bp))
8315 return 0;
8316
a2fbb9ea
ET
8317 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8318 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8319 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8320 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8321 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8322 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8323 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8324
a2fbb9ea 8325 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8326 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8327 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8328 return -EINVAL;
f1410647 8329 }
a2fbb9ea
ET
8330
8331 /* advertise the requested speed and duplex if supported */
34f80b04 8332 cmd->advertising &= bp->port.supported;
a2fbb9ea 8333
c18487ee
YR
8334 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8335 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8336 bp->port.advertising |= (ADVERTISED_Autoneg |
8337 cmd->advertising);
a2fbb9ea
ET
8338
8339 } else { /* forced speed */
8340 /* advertise the requested speed and duplex if supported */
8341 switch (cmd->speed) {
8342 case SPEED_10:
8343 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8344 if (!(bp->port.supported &
f1410647
ET
8345 SUPPORTED_10baseT_Full)) {
8346 DP(NETIF_MSG_LINK,
8347 "10M full not supported\n");
a2fbb9ea 8348 return -EINVAL;
f1410647 8349 }
a2fbb9ea
ET
8350
8351 advertising = (ADVERTISED_10baseT_Full |
8352 ADVERTISED_TP);
8353 } else {
34f80b04 8354 if (!(bp->port.supported &
f1410647
ET
8355 SUPPORTED_10baseT_Half)) {
8356 DP(NETIF_MSG_LINK,
8357 "10M half not supported\n");
a2fbb9ea 8358 return -EINVAL;
f1410647 8359 }
a2fbb9ea
ET
8360
8361 advertising = (ADVERTISED_10baseT_Half |
8362 ADVERTISED_TP);
8363 }
8364 break;
8365
8366 case SPEED_100:
8367 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8368 if (!(bp->port.supported &
f1410647
ET
8369 SUPPORTED_100baseT_Full)) {
8370 DP(NETIF_MSG_LINK,
8371 "100M full not supported\n");
a2fbb9ea 8372 return -EINVAL;
f1410647 8373 }
a2fbb9ea
ET
8374
8375 advertising = (ADVERTISED_100baseT_Full |
8376 ADVERTISED_TP);
8377 } else {
34f80b04 8378 if (!(bp->port.supported &
f1410647
ET
8379 SUPPORTED_100baseT_Half)) {
8380 DP(NETIF_MSG_LINK,
8381 "100M half not supported\n");
a2fbb9ea 8382 return -EINVAL;
f1410647 8383 }
a2fbb9ea
ET
8384
8385 advertising = (ADVERTISED_100baseT_Half |
8386 ADVERTISED_TP);
8387 }
8388 break;
8389
8390 case SPEED_1000:
f1410647
ET
8391 if (cmd->duplex != DUPLEX_FULL) {
8392 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8393 return -EINVAL;
f1410647 8394 }
a2fbb9ea 8395
34f80b04 8396 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8397 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8398 return -EINVAL;
f1410647 8399 }
a2fbb9ea
ET
8400
8401 advertising = (ADVERTISED_1000baseT_Full |
8402 ADVERTISED_TP);
8403 break;
8404
8405 case SPEED_2500:
f1410647
ET
8406 if (cmd->duplex != DUPLEX_FULL) {
8407 DP(NETIF_MSG_LINK,
8408 "2.5G half not supported\n");
a2fbb9ea 8409 return -EINVAL;
f1410647 8410 }
a2fbb9ea 8411
34f80b04 8412 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8413 DP(NETIF_MSG_LINK,
8414 "2.5G full not supported\n");
a2fbb9ea 8415 return -EINVAL;
f1410647 8416 }
a2fbb9ea 8417
f1410647 8418 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8419 ADVERTISED_TP);
8420 break;
8421
8422 case SPEED_10000:
f1410647
ET
8423 if (cmd->duplex != DUPLEX_FULL) {
8424 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8425 return -EINVAL;
f1410647 8426 }
a2fbb9ea 8427
34f80b04 8428 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8429 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8430 return -EINVAL;
f1410647 8431 }
a2fbb9ea
ET
8432
8433 advertising = (ADVERTISED_10000baseT_Full |
8434 ADVERTISED_FIBRE);
8435 break;
8436
8437 default:
f1410647 8438 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8439 return -EINVAL;
8440 }
8441
c18487ee
YR
8442 bp->link_params.req_line_speed = cmd->speed;
8443 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8444 bp->port.advertising = advertising;
a2fbb9ea
ET
8445 }
8446
c18487ee 8447 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8448 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8449 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8450 bp->port.advertising);
a2fbb9ea 8451
34f80b04 8452 if (netif_running(dev)) {
bb2a0f7a 8453 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8454 bnx2x_link_set(bp);
8455 }
a2fbb9ea
ET
8456
8457 return 0;
8458}
8459
c18487ee
YR
8460#define PHY_FW_VER_LEN 10
8461
a2fbb9ea
ET
8462static void bnx2x_get_drvinfo(struct net_device *dev,
8463 struct ethtool_drvinfo *info)
8464{
8465 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8466 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8467
8468 strcpy(info->driver, DRV_MODULE_NAME);
8469 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8470
8471 phy_fw_ver[0] = '\0';
34f80b04 8472 if (bp->port.pmf) {
4a37fb66 8473 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8474 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8475 (bp->state != BNX2X_STATE_CLOSED),
8476 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8477 bnx2x_release_phy_lock(bp);
34f80b04 8478 }
c18487ee 8479
f0e53a84
EG
8480 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8481 (bp->common.bc_ver & 0xff0000) >> 16,
8482 (bp->common.bc_ver & 0xff00) >> 8,
8483 (bp->common.bc_ver & 0xff),
8484 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8485 strcpy(info->bus_info, pci_name(bp->pdev));
8486 info->n_stats = BNX2X_NUM_STATS;
8487 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8488 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8489 info->regdump_len = 0;
8490}
8491
8492static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8493{
8494 struct bnx2x *bp = netdev_priv(dev);
8495
8496 if (bp->flags & NO_WOL_FLAG) {
8497 wol->supported = 0;
8498 wol->wolopts = 0;
8499 } else {
8500 wol->supported = WAKE_MAGIC;
8501 if (bp->wol)
8502 wol->wolopts = WAKE_MAGIC;
8503 else
8504 wol->wolopts = 0;
8505 }
8506 memset(&wol->sopass, 0, sizeof(wol->sopass));
8507}
8508
8509static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8510{
8511 struct bnx2x *bp = netdev_priv(dev);
8512
8513 if (wol->wolopts & ~WAKE_MAGIC)
8514 return -EINVAL;
8515
8516 if (wol->wolopts & WAKE_MAGIC) {
8517 if (bp->flags & NO_WOL_FLAG)
8518 return -EINVAL;
8519
8520 bp->wol = 1;
34f80b04 8521 } else
a2fbb9ea 8522 bp->wol = 0;
34f80b04 8523
a2fbb9ea
ET
8524 return 0;
8525}
8526
8527static u32 bnx2x_get_msglevel(struct net_device *dev)
8528{
8529 struct bnx2x *bp = netdev_priv(dev);
8530
8531 return bp->msglevel;
8532}
8533
8534static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8535{
8536 struct bnx2x *bp = netdev_priv(dev);
8537
8538 if (capable(CAP_NET_ADMIN))
8539 bp->msglevel = level;
8540}
8541
8542static int bnx2x_nway_reset(struct net_device *dev)
8543{
8544 struct bnx2x *bp = netdev_priv(dev);
8545
34f80b04
EG
8546 if (!bp->port.pmf)
8547 return 0;
a2fbb9ea 8548
34f80b04 8549 if (netif_running(dev)) {
bb2a0f7a 8550 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8551 bnx2x_link_set(bp);
8552 }
a2fbb9ea
ET
8553
8554 return 0;
8555}
8556
8557static int bnx2x_get_eeprom_len(struct net_device *dev)
8558{
8559 struct bnx2x *bp = netdev_priv(dev);
8560
34f80b04 8561 return bp->common.flash_size;
a2fbb9ea
ET
8562}
8563
8564static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8565{
34f80b04 8566 int port = BP_PORT(bp);
a2fbb9ea
ET
8567 int count, i;
8568 u32 val = 0;
8569
8570 /* adjust timeout for emulation/FPGA */
8571 count = NVRAM_TIMEOUT_COUNT;
8572 if (CHIP_REV_IS_SLOW(bp))
8573 count *= 100;
8574
8575 /* request access to nvram interface */
8576 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8577 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8578
8579 for (i = 0; i < count*10; i++) {
8580 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8581 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8582 break;
8583
8584 udelay(5);
8585 }
8586
8587 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8588 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8589 return -EBUSY;
8590 }
8591
8592 return 0;
8593}
8594
8595static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8596{
34f80b04 8597 int port = BP_PORT(bp);
a2fbb9ea
ET
8598 int count, i;
8599 u32 val = 0;
8600
8601 /* adjust timeout for emulation/FPGA */
8602 count = NVRAM_TIMEOUT_COUNT;
8603 if (CHIP_REV_IS_SLOW(bp))
8604 count *= 100;
8605
8606 /* relinquish nvram interface */
8607 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8608 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8609
8610 for (i = 0; i < count*10; i++) {
8611 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8612 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8613 break;
8614
8615 udelay(5);
8616 }
8617
8618 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8619 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8620 return -EBUSY;
8621 }
8622
8623 return 0;
8624}
8625
8626static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8627{
8628 u32 val;
8629
8630 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8631
8632 /* enable both bits, even on read */
8633 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8634 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8635 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8636}
8637
8638static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8639{
8640 u32 val;
8641
8642 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8643
8644 /* disable both bits, even after read */
8645 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8646 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8647 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8648}
8649
4781bfad 8650static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8651 u32 cmd_flags)
8652{
f1410647 8653 int count, i, rc;
a2fbb9ea
ET
8654 u32 val;
8655
8656 /* build the command word */
8657 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8658
8659 /* need to clear DONE bit separately */
8660 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8661
8662 /* address of the NVRAM to read from */
8663 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8664 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8665
8666 /* issue a read command */
8667 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8668
8669 /* adjust timeout for emulation/FPGA */
8670 count = NVRAM_TIMEOUT_COUNT;
8671 if (CHIP_REV_IS_SLOW(bp))
8672 count *= 100;
8673
8674 /* wait for completion */
8675 *ret_val = 0;
8676 rc = -EBUSY;
8677 for (i = 0; i < count; i++) {
8678 udelay(5);
8679 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8680
8681 if (val & MCPR_NVM_COMMAND_DONE) {
8682 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8683 /* we read nvram data in cpu order
8684 * but ethtool sees it as an array of bytes
8685 * converting to big-endian will do the work */
4781bfad 8686 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8687 rc = 0;
8688 break;
8689 }
8690 }
8691
8692 return rc;
8693}
8694
8695static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8696 int buf_size)
8697{
8698 int rc;
8699 u32 cmd_flags;
4781bfad 8700 __be32 val;
a2fbb9ea
ET
8701
8702 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8703 DP(BNX2X_MSG_NVM,
c14423fe 8704 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8705 offset, buf_size);
8706 return -EINVAL;
8707 }
8708
34f80b04
EG
8709 if (offset + buf_size > bp->common.flash_size) {
8710 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8711 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8712 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8713 return -EINVAL;
8714 }
8715
8716 /* request access to nvram interface */
8717 rc = bnx2x_acquire_nvram_lock(bp);
8718 if (rc)
8719 return rc;
8720
8721 /* enable access to nvram interface */
8722 bnx2x_enable_nvram_access(bp);
8723
8724 /* read the first word(s) */
8725 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8726 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8727 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8728 memcpy(ret_buf, &val, 4);
8729
8730 /* advance to the next dword */
8731 offset += sizeof(u32);
8732 ret_buf += sizeof(u32);
8733 buf_size -= sizeof(u32);
8734 cmd_flags = 0;
8735 }
8736
8737 if (rc == 0) {
8738 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8739 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8740 memcpy(ret_buf, &val, 4);
8741 }
8742
8743 /* disable access to nvram interface */
8744 bnx2x_disable_nvram_access(bp);
8745 bnx2x_release_nvram_lock(bp);
8746
8747 return rc;
8748}
8749
8750static int bnx2x_get_eeprom(struct net_device *dev,
8751 struct ethtool_eeprom *eeprom, u8 *eebuf)
8752{
8753 struct bnx2x *bp = netdev_priv(dev);
8754 int rc;
8755
2add3acb
EG
8756 if (!netif_running(dev))
8757 return -EAGAIN;
8758
34f80b04 8759 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8760 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8761 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8762 eeprom->len, eeprom->len);
8763
8764 /* parameters already validated in ethtool_get_eeprom */
8765
8766 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8767
8768 return rc;
8769}
8770
8771static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8772 u32 cmd_flags)
8773{
f1410647 8774 int count, i, rc;
a2fbb9ea
ET
8775
8776 /* build the command word */
8777 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8778
8779 /* need to clear DONE bit separately */
8780 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8781
8782 /* write the data */
8783 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8784
8785 /* address of the NVRAM to write to */
8786 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8787 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8788
8789 /* issue the write command */
8790 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8791
8792 /* adjust timeout for emulation/FPGA */
8793 count = NVRAM_TIMEOUT_COUNT;
8794 if (CHIP_REV_IS_SLOW(bp))
8795 count *= 100;
8796
8797 /* wait for completion */
8798 rc = -EBUSY;
8799 for (i = 0; i < count; i++) {
8800 udelay(5);
8801 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8802 if (val & MCPR_NVM_COMMAND_DONE) {
8803 rc = 0;
8804 break;
8805 }
8806 }
8807
8808 return rc;
8809}
8810
f1410647 8811#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8812
8813static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8814 int buf_size)
8815{
8816 int rc;
8817 u32 cmd_flags;
8818 u32 align_offset;
4781bfad 8819 __be32 val;
a2fbb9ea 8820
34f80b04
EG
8821 if (offset + buf_size > bp->common.flash_size) {
8822 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8823 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8824 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8825 return -EINVAL;
8826 }
8827
8828 /* request access to nvram interface */
8829 rc = bnx2x_acquire_nvram_lock(bp);
8830 if (rc)
8831 return rc;
8832
8833 /* enable access to nvram interface */
8834 bnx2x_enable_nvram_access(bp);
8835
8836 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8837 align_offset = (offset & ~0x03);
8838 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8839
8840 if (rc == 0) {
8841 val &= ~(0xff << BYTE_OFFSET(offset));
8842 val |= (*data_buf << BYTE_OFFSET(offset));
8843
8844 /* nvram data is returned as an array of bytes
8845 * convert it back to cpu order */
8846 val = be32_to_cpu(val);
8847
a2fbb9ea
ET
8848 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8849 cmd_flags);
8850 }
8851
8852 /* disable access to nvram interface */
8853 bnx2x_disable_nvram_access(bp);
8854 bnx2x_release_nvram_lock(bp);
8855
8856 return rc;
8857}
8858
8859static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8860 int buf_size)
8861{
8862 int rc;
8863 u32 cmd_flags;
8864 u32 val;
8865 u32 written_so_far;
8866
34f80b04 8867 if (buf_size == 1) /* ethtool */
a2fbb9ea 8868 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8869
8870 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8871 DP(BNX2X_MSG_NVM,
c14423fe 8872 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8873 offset, buf_size);
8874 return -EINVAL;
8875 }
8876
34f80b04
EG
8877 if (offset + buf_size > bp->common.flash_size) {
8878 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8879 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8880 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8881 return -EINVAL;
8882 }
8883
8884 /* request access to nvram interface */
8885 rc = bnx2x_acquire_nvram_lock(bp);
8886 if (rc)
8887 return rc;
8888
8889 /* enable access to nvram interface */
8890 bnx2x_enable_nvram_access(bp);
8891
8892 written_so_far = 0;
8893 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8894 while ((written_so_far < buf_size) && (rc == 0)) {
8895 if (written_so_far == (buf_size - sizeof(u32)))
8896 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8897 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8898 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8899 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8900 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8901
8902 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8903
8904 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8905
8906 /* advance to the next dword */
8907 offset += sizeof(u32);
8908 data_buf += sizeof(u32);
8909 written_so_far += sizeof(u32);
8910 cmd_flags = 0;
8911 }
8912
8913 /* disable access to nvram interface */
8914 bnx2x_disable_nvram_access(bp);
8915 bnx2x_release_nvram_lock(bp);
8916
8917 return rc;
8918}
8919
8920static int bnx2x_set_eeprom(struct net_device *dev,
8921 struct ethtool_eeprom *eeprom, u8 *eebuf)
8922{
8923 struct bnx2x *bp = netdev_priv(dev);
8924 int rc;
8925
9f4c9583
EG
8926 if (!netif_running(dev))
8927 return -EAGAIN;
8928
34f80b04 8929 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8930 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8931 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8932 eeprom->len, eeprom->len);
8933
8934 /* parameters already validated in ethtool_set_eeprom */
8935
c18487ee 8936 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8937 if (eeprom->magic == 0x00504859)
8938 if (bp->port.pmf) {
8939
4a37fb66 8940 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8941 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8942 bp->link_params.ext_phy_config,
8943 (bp->state != BNX2X_STATE_CLOSED),
8944 eebuf, eeprom->len);
bb2a0f7a
YG
8945 if ((bp->state == BNX2X_STATE_OPEN) ||
8946 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8947 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8948 &bp->link_vars, 1);
34f80b04
EG
8949 rc |= bnx2x_phy_init(&bp->link_params,
8950 &bp->link_vars);
bb2a0f7a 8951 }
4a37fb66 8952 bnx2x_release_phy_lock(bp);
34f80b04
EG
8953
8954 } else /* Only the PMF can access the PHY */
8955 return -EINVAL;
8956 else
c18487ee 8957 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8958
8959 return rc;
8960}
8961
8962static int bnx2x_get_coalesce(struct net_device *dev,
8963 struct ethtool_coalesce *coal)
8964{
8965 struct bnx2x *bp = netdev_priv(dev);
8966
8967 memset(coal, 0, sizeof(struct ethtool_coalesce));
8968
8969 coal->rx_coalesce_usecs = bp->rx_ticks;
8970 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8971
8972 return 0;
8973}
8974
8975static int bnx2x_set_coalesce(struct net_device *dev,
8976 struct ethtool_coalesce *coal)
8977{
8978 struct bnx2x *bp = netdev_priv(dev);
8979
8980 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8981 if (bp->rx_ticks > 3000)
8982 bp->rx_ticks = 3000;
8983
8984 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8985 if (bp->tx_ticks > 0x3000)
8986 bp->tx_ticks = 0x3000;
8987
34f80b04 8988 if (netif_running(dev))
a2fbb9ea
ET
8989 bnx2x_update_coalesce(bp);
8990
8991 return 0;
8992}
8993
8994static void bnx2x_get_ringparam(struct net_device *dev,
8995 struct ethtool_ringparam *ering)
8996{
8997 struct bnx2x *bp = netdev_priv(dev);
8998
8999 ering->rx_max_pending = MAX_RX_AVAIL;
9000 ering->rx_mini_max_pending = 0;
9001 ering->rx_jumbo_max_pending = 0;
9002
9003 ering->rx_pending = bp->rx_ring_size;
9004 ering->rx_mini_pending = 0;
9005 ering->rx_jumbo_pending = 0;
9006
9007 ering->tx_max_pending = MAX_TX_AVAIL;
9008 ering->tx_pending = bp->tx_ring_size;
9009}
9010
9011static int bnx2x_set_ringparam(struct net_device *dev,
9012 struct ethtool_ringparam *ering)
9013{
9014 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9015 int rc = 0;
a2fbb9ea
ET
9016
9017 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9018 (ering->tx_pending > MAX_TX_AVAIL) ||
9019 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9020 return -EINVAL;
9021
9022 bp->rx_ring_size = ering->rx_pending;
9023 bp->tx_ring_size = ering->tx_pending;
9024
34f80b04
EG
9025 if (netif_running(dev)) {
9026 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9027 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9028 }
9029
34f80b04 9030 return rc;
a2fbb9ea
ET
9031}
9032
9033static void bnx2x_get_pauseparam(struct net_device *dev,
9034 struct ethtool_pauseparam *epause)
9035{
9036 struct bnx2x *bp = netdev_priv(dev);
9037
c0700f90 9038 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9039 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9040
c0700f90
DM
9041 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9042 BNX2X_FLOW_CTRL_RX);
9043 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9044 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9045
9046 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9047 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9048 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9049}
9050
9051static int bnx2x_set_pauseparam(struct net_device *dev,
9052 struct ethtool_pauseparam *epause)
9053{
9054 struct bnx2x *bp = netdev_priv(dev);
9055
34f80b04
EG
9056 if (IS_E1HMF(bp))
9057 return 0;
9058
a2fbb9ea
ET
9059 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9060 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9061 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9062
c0700f90 9063 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9064
f1410647 9065 if (epause->rx_pause)
c0700f90 9066 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9067
f1410647 9068 if (epause->tx_pause)
c0700f90 9069 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9070
c0700f90
DM
9071 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9072 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9073
c18487ee 9074 if (epause->autoneg) {
34f80b04 9075 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9076 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9077 return -EINVAL;
9078 }
a2fbb9ea 9079
c18487ee 9080 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9081 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9082 }
a2fbb9ea 9083
c18487ee
YR
9084 DP(NETIF_MSG_LINK,
9085 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9086
9087 if (netif_running(dev)) {
bb2a0f7a 9088 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9089 bnx2x_link_set(bp);
9090 }
a2fbb9ea
ET
9091
9092 return 0;
9093}
9094
df0f2343
VZ
9095static int bnx2x_set_flags(struct net_device *dev, u32 data)
9096{
9097 struct bnx2x *bp = netdev_priv(dev);
9098 int changed = 0;
9099 int rc = 0;
9100
9101 /* TPA requires Rx CSUM offloading */
9102 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9103 if (!(dev->features & NETIF_F_LRO)) {
9104 dev->features |= NETIF_F_LRO;
9105 bp->flags |= TPA_ENABLE_FLAG;
9106 changed = 1;
9107 }
9108
9109 } else if (dev->features & NETIF_F_LRO) {
9110 dev->features &= ~NETIF_F_LRO;
9111 bp->flags &= ~TPA_ENABLE_FLAG;
9112 changed = 1;
9113 }
9114
9115 if (changed && netif_running(dev)) {
9116 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9117 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9118 }
9119
9120 return rc;
9121}
9122
a2fbb9ea
ET
9123static u32 bnx2x_get_rx_csum(struct net_device *dev)
9124{
9125 struct bnx2x *bp = netdev_priv(dev);
9126
9127 return bp->rx_csum;
9128}
9129
9130static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9131{
9132 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9133 int rc = 0;
a2fbb9ea
ET
9134
9135 bp->rx_csum = data;
df0f2343
VZ
9136
9137 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9138 TPA'ed packets will be discarded due to wrong TCP CSUM */
9139 if (!data) {
9140 u32 flags = ethtool_op_get_flags(dev);
9141
9142 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9143 }
9144
9145 return rc;
a2fbb9ea
ET
9146}
9147
9148static int bnx2x_set_tso(struct net_device *dev, u32 data)
9149{
755735eb 9150 if (data) {
a2fbb9ea 9151 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9152 dev->features |= NETIF_F_TSO6;
9153 } else {
a2fbb9ea 9154 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9155 dev->features &= ~NETIF_F_TSO6;
9156 }
9157
a2fbb9ea
ET
9158 return 0;
9159}
9160
f3c87cdd 9161static const struct {
a2fbb9ea
ET
9162 char string[ETH_GSTRING_LEN];
9163} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9164 { "register_test (offline)" },
9165 { "memory_test (offline)" },
9166 { "loopback_test (offline)" },
9167 { "nvram_test (online)" },
9168 { "interrupt_test (online)" },
9169 { "link_test (online)" },
d3d4f495 9170 { "idle check (online)" }
a2fbb9ea
ET
9171};
9172
9173static int bnx2x_self_test_count(struct net_device *dev)
9174{
9175 return BNX2X_NUM_TESTS;
9176}
9177
f3c87cdd
YG
9178static int bnx2x_test_registers(struct bnx2x *bp)
9179{
9180 int idx, i, rc = -ENODEV;
9181 u32 wr_val = 0;
9dabc424 9182 int port = BP_PORT(bp);
f3c87cdd
YG
9183 static const struct {
9184 u32 offset0;
9185 u32 offset1;
9186 u32 mask;
9187 } reg_tbl[] = {
9188/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9189 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9190 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9191 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9192 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9193 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9194 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9195 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9196 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9197 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9198/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9199 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9200 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9201 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9202 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9203 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9204 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9205 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9206 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9207 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9208/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9209 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9210 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9211 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9212 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9213 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9214 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9215 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9216 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9217 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9218/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9219 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9220 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9221 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9222 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9223 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9224 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9225 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9226
9227 { 0xffffffff, 0, 0x00000000 }
9228 };
9229
9230 if (!netif_running(bp->dev))
9231 return rc;
9232
9233 /* Repeat the test twice:
9234 First by writing 0x00000000, second by writing 0xffffffff */
9235 for (idx = 0; idx < 2; idx++) {
9236
9237 switch (idx) {
9238 case 0:
9239 wr_val = 0;
9240 break;
9241 case 1:
9242 wr_val = 0xffffffff;
9243 break;
9244 }
9245
9246 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9247 u32 offset, mask, save_val, val;
f3c87cdd
YG
9248
9249 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9250 mask = reg_tbl[i].mask;
9251
9252 save_val = REG_RD(bp, offset);
9253
9254 REG_WR(bp, offset, wr_val);
9255 val = REG_RD(bp, offset);
9256
9257 /* Restore the original register's value */
9258 REG_WR(bp, offset, save_val);
9259
9260 /* verify that value is as expected value */
9261 if ((val & mask) != (wr_val & mask))
9262 goto test_reg_exit;
9263 }
9264 }
9265
9266 rc = 0;
9267
9268test_reg_exit:
9269 return rc;
9270}
9271
9272static int bnx2x_test_memory(struct bnx2x *bp)
9273{
9274 int i, j, rc = -ENODEV;
9275 u32 val;
9276 static const struct {
9277 u32 offset;
9278 int size;
9279 } mem_tbl[] = {
9280 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9281 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9282 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9283 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9284 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9285 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9286 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9287
9288 { 0xffffffff, 0 }
9289 };
9290 static const struct {
9291 char *name;
9292 u32 offset;
9dabc424
YG
9293 u32 e1_mask;
9294 u32 e1h_mask;
f3c87cdd 9295 } prty_tbl[] = {
9dabc424
YG
9296 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9297 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9298 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9299 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9300 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9301 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9302
9303 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9304 };
9305
9306 if (!netif_running(bp->dev))
9307 return rc;
9308
9309 /* Go through all the memories */
9310 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9311 for (j = 0; j < mem_tbl[i].size; j++)
9312 REG_RD(bp, mem_tbl[i].offset + j*4);
9313
9314 /* Check the parity status */
9315 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9316 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9317 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9318 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9319 DP(NETIF_MSG_HW,
9320 "%s is 0x%x\n", prty_tbl[i].name, val);
9321 goto test_mem_exit;
9322 }
9323 }
9324
9325 rc = 0;
9326
9327test_mem_exit:
9328 return rc;
9329}
9330
f3c87cdd
YG
9331static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9332{
9333 int cnt = 1000;
9334
9335 if (link_up)
9336 while (bnx2x_link_test(bp) && cnt--)
9337 msleep(10);
9338}
9339
9340static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9341{
9342 unsigned int pkt_size, num_pkts, i;
9343 struct sk_buff *skb;
9344 unsigned char *packet;
9345 struct bnx2x_fastpath *fp = &bp->fp[0];
9346 u16 tx_start_idx, tx_idx;
9347 u16 rx_start_idx, rx_idx;
9348 u16 pkt_prod;
9349 struct sw_tx_bd *tx_buf;
9350 struct eth_tx_bd *tx_bd;
9351 dma_addr_t mapping;
9352 union eth_rx_cqe *cqe;
9353 u8 cqe_fp_flags;
9354 struct sw_rx_bd *rx_buf;
9355 u16 len;
9356 int rc = -ENODEV;
9357
b5bf9068
EG
9358 /* check the loopback mode */
9359 switch (loopback_mode) {
9360 case BNX2X_PHY_LOOPBACK:
9361 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9362 return -EINVAL;
9363 break;
9364 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9365 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9366 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9367 break;
9368 default:
f3c87cdd 9369 return -EINVAL;
b5bf9068 9370 }
f3c87cdd 9371
b5bf9068
EG
9372 /* prepare the loopback packet */
9373 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9374 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9375 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9376 if (!skb) {
9377 rc = -ENOMEM;
9378 goto test_loopback_exit;
9379 }
9380 packet = skb_put(skb, pkt_size);
9381 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9382 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9383 for (i = ETH_HLEN; i < pkt_size; i++)
9384 packet[i] = (unsigned char) (i & 0xff);
9385
b5bf9068 9386 /* send the loopback packet */
f3c87cdd
YG
9387 num_pkts = 0;
9388 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9389 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9390
9391 pkt_prod = fp->tx_pkt_prod++;
9392 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9393 tx_buf->first_bd = fp->tx_bd_prod;
9394 tx_buf->skb = skb;
9395
9396 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9397 mapping = pci_map_single(bp->pdev, skb->data,
9398 skb_headlen(skb), PCI_DMA_TODEVICE);
9399 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9400 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9401 tx_bd->nbd = cpu_to_le16(1);
9402 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9403 tx_bd->vlan = cpu_to_le16(pkt_prod);
9404 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9405 ETH_TX_BD_FLAGS_END_BD);
9406 tx_bd->general_data = ((UNICAST_ADDRESS <<
9407 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9408
58f4c4cf
EG
9409 wmb();
9410
4781bfad 9411 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9412 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9413 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9414 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9415
9416 mmiowb();
9417
9418 num_pkts++;
9419 fp->tx_bd_prod++;
9420 bp->dev->trans_start = jiffies;
9421
9422 udelay(100);
9423
9424 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9425 if (tx_idx != tx_start_idx + num_pkts)
9426 goto test_loopback_exit;
9427
9428 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9429 if (rx_idx != rx_start_idx + num_pkts)
9430 goto test_loopback_exit;
9431
9432 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9433 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9434 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9435 goto test_loopback_rx_exit;
9436
9437 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9438 if (len != pkt_size)
9439 goto test_loopback_rx_exit;
9440
9441 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9442 skb = rx_buf->skb;
9443 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9444 for (i = ETH_HLEN; i < pkt_size; i++)
9445 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9446 goto test_loopback_rx_exit;
9447
9448 rc = 0;
9449
9450test_loopback_rx_exit:
f3c87cdd
YG
9451
9452 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9453 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9454 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9455 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9456
9457 /* Update producers */
9458 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9459 fp->rx_sge_prod);
f3c87cdd
YG
9460
9461test_loopback_exit:
9462 bp->link_params.loopback_mode = LOOPBACK_NONE;
9463
9464 return rc;
9465}
9466
9467static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9468{
b5bf9068 9469 int rc = 0, res;
f3c87cdd
YG
9470
9471 if (!netif_running(bp->dev))
9472 return BNX2X_LOOPBACK_FAILED;
9473
f8ef6e44 9474 bnx2x_netif_stop(bp, 1);
3910c8ae 9475 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9476
b5bf9068
EG
9477 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9478 if (res) {
9479 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9480 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9481 }
9482
b5bf9068
EG
9483 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9484 if (res) {
9485 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9486 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9487 }
9488
3910c8ae 9489 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9490 bnx2x_netif_start(bp);
9491
9492 return rc;
9493}
9494
9495#define CRC32_RESIDUAL 0xdebb20e3
9496
9497static int bnx2x_test_nvram(struct bnx2x *bp)
9498{
9499 static const struct {
9500 int offset;
9501 int size;
9502 } nvram_tbl[] = {
9503 { 0, 0x14 }, /* bootstrap */
9504 { 0x14, 0xec }, /* dir */
9505 { 0x100, 0x350 }, /* manuf_info */
9506 { 0x450, 0xf0 }, /* feature_info */
9507 { 0x640, 0x64 }, /* upgrade_key_info */
9508 { 0x6a4, 0x64 },
9509 { 0x708, 0x70 }, /* manuf_key_info */
9510 { 0x778, 0x70 },
9511 { 0, 0 }
9512 };
4781bfad 9513 __be32 buf[0x350 / 4];
f3c87cdd
YG
9514 u8 *data = (u8 *)buf;
9515 int i, rc;
9516 u32 magic, csum;
9517
9518 rc = bnx2x_nvram_read(bp, 0, data, 4);
9519 if (rc) {
f5372251 9520 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9521 goto test_nvram_exit;
9522 }
9523
9524 magic = be32_to_cpu(buf[0]);
9525 if (magic != 0x669955aa) {
9526 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9527 rc = -ENODEV;
9528 goto test_nvram_exit;
9529 }
9530
9531 for (i = 0; nvram_tbl[i].size; i++) {
9532
9533 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9534 nvram_tbl[i].size);
9535 if (rc) {
9536 DP(NETIF_MSG_PROBE,
f5372251 9537 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9538 goto test_nvram_exit;
9539 }
9540
9541 csum = ether_crc_le(nvram_tbl[i].size, data);
9542 if (csum != CRC32_RESIDUAL) {
9543 DP(NETIF_MSG_PROBE,
9544 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9545 rc = -ENODEV;
9546 goto test_nvram_exit;
9547 }
9548 }
9549
9550test_nvram_exit:
9551 return rc;
9552}
9553
9554static int bnx2x_test_intr(struct bnx2x *bp)
9555{
9556 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9557 int i, rc;
9558
9559 if (!netif_running(bp->dev))
9560 return -ENODEV;
9561
8d9c5f34 9562 config->hdr.length = 0;
af246401
EG
9563 if (CHIP_IS_E1(bp))
9564 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9565 else
9566 config->hdr.offset = BP_FUNC(bp);
0626b899 9567 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9568 config->hdr.reserved1 = 0;
9569
9570 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9571 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9572 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9573 if (rc == 0) {
9574 bp->set_mac_pending++;
9575 for (i = 0; i < 10; i++) {
9576 if (!bp->set_mac_pending)
9577 break;
9578 msleep_interruptible(10);
9579 }
9580 if (i == 10)
9581 rc = -ENODEV;
9582 }
9583
9584 return rc;
9585}
9586
a2fbb9ea
ET
9587static void bnx2x_self_test(struct net_device *dev,
9588 struct ethtool_test *etest, u64 *buf)
9589{
9590 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9591
9592 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9593
f3c87cdd 9594 if (!netif_running(dev))
a2fbb9ea 9595 return;
a2fbb9ea 9596
33471629 9597 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9598 if (IS_E1HMF(bp))
9599 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9600
9601 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9602 u8 link_up;
9603
9604 link_up = bp->link_vars.link_up;
9605 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9606 bnx2x_nic_load(bp, LOAD_DIAG);
9607 /* wait until link state is restored */
9608 bnx2x_wait_for_link(bp, link_up);
9609
9610 if (bnx2x_test_registers(bp) != 0) {
9611 buf[0] = 1;
9612 etest->flags |= ETH_TEST_FL_FAILED;
9613 }
9614 if (bnx2x_test_memory(bp) != 0) {
9615 buf[1] = 1;
9616 etest->flags |= ETH_TEST_FL_FAILED;
9617 }
9618 buf[2] = bnx2x_test_loopback(bp, link_up);
9619 if (buf[2] != 0)
9620 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9621
f3c87cdd
YG
9622 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9623 bnx2x_nic_load(bp, LOAD_NORMAL);
9624 /* wait until link state is restored */
9625 bnx2x_wait_for_link(bp, link_up);
9626 }
9627 if (bnx2x_test_nvram(bp) != 0) {
9628 buf[3] = 1;
a2fbb9ea
ET
9629 etest->flags |= ETH_TEST_FL_FAILED;
9630 }
f3c87cdd
YG
9631 if (bnx2x_test_intr(bp) != 0) {
9632 buf[4] = 1;
9633 etest->flags |= ETH_TEST_FL_FAILED;
9634 }
9635 if (bp->port.pmf)
9636 if (bnx2x_link_test(bp) != 0) {
9637 buf[5] = 1;
9638 etest->flags |= ETH_TEST_FL_FAILED;
9639 }
f3c87cdd
YG
9640
9641#ifdef BNX2X_EXTRA_DEBUG
9642 bnx2x_panic_dump(bp);
9643#endif
a2fbb9ea
ET
9644}
9645
de832a55
EG
9646static const struct {
9647 long offset;
9648 int size;
9649 u8 string[ETH_GSTRING_LEN];
9650} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9651/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9652 { Q_STATS_OFFSET32(error_bytes_received_hi),
9653 8, "[%d]: rx_error_bytes" },
9654 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9655 8, "[%d]: rx_ucast_packets" },
9656 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9657 8, "[%d]: rx_mcast_packets" },
9658 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9659 8, "[%d]: rx_bcast_packets" },
9660 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9661 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9662 4, "[%d]: rx_phy_ip_err_discards"},
9663 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9664 4, "[%d]: rx_skb_alloc_discard" },
9665 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9666
9667/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9668 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9669 8, "[%d]: tx_packets" }
9670};
9671
bb2a0f7a
YG
9672static const struct {
9673 long offset;
9674 int size;
9675 u32 flags;
66e855f3
YG
9676#define STATS_FLAGS_PORT 1
9677#define STATS_FLAGS_FUNC 2
de832a55 9678#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9679 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9680} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9681/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9682 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9683 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9684 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9685 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9686 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9687 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9688 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9689 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9690 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9691 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9692 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9693 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9694 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9695 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9696 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9697 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9698 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9699/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9700 8, STATS_FLAGS_PORT, "rx_fragments" },
9701 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9702 8, STATS_FLAGS_PORT, "rx_jabbers" },
9703 { STATS_OFFSET32(no_buff_discard_hi),
9704 8, STATS_FLAGS_BOTH, "rx_discards" },
9705 { STATS_OFFSET32(mac_filter_discard),
9706 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9707 { STATS_OFFSET32(xxoverflow_discard),
9708 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9709 { STATS_OFFSET32(brb_drop_hi),
9710 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9711 { STATS_OFFSET32(brb_truncate_hi),
9712 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9713 { STATS_OFFSET32(pause_frames_received_hi),
9714 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9715 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9716 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9717 { STATS_OFFSET32(nig_timer_max),
9718 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9719/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9720 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9721 { STATS_OFFSET32(rx_skb_alloc_failed),
9722 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9723 { STATS_OFFSET32(hw_csum_err),
9724 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9725
9726 { STATS_OFFSET32(total_bytes_transmitted_hi),
9727 8, STATS_FLAGS_BOTH, "tx_bytes" },
9728 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9729 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9730 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9731 8, STATS_FLAGS_BOTH, "tx_packets" },
9732 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9733 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9734 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9735 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9736 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9737 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9738 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9739 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9740/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9741 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9742 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9743 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9744 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9745 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9746 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9747 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9748 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9749 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9750 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9751 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9752 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9753 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9754 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9755 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9756 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9757 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9758 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9759 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9760/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9761 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9762 { STATS_OFFSET32(pause_frames_sent_hi),
9763 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9764};
9765
de832a55
EG
9766#define IS_PORT_STAT(i) \
9767 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9768#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9769#define IS_E1HMF_MODE_STAT(bp) \
9770 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9771
a2fbb9ea
ET
9772static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9773{
bb2a0f7a 9774 struct bnx2x *bp = netdev_priv(dev);
de832a55 9775 int i, j, k;
bb2a0f7a 9776
a2fbb9ea
ET
9777 switch (stringset) {
9778 case ETH_SS_STATS:
de832a55
EG
9779 if (is_multi(bp)) {
9780 k = 0;
9781 for_each_queue(bp, i) {
9782 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9783 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9784 bnx2x_q_stats_arr[j].string, i);
9785 k += BNX2X_NUM_Q_STATS;
9786 }
9787 if (IS_E1HMF_MODE_STAT(bp))
9788 break;
9789 for (j = 0; j < BNX2X_NUM_STATS; j++)
9790 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9791 bnx2x_stats_arr[j].string);
9792 } else {
9793 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9794 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9795 continue;
9796 strcpy(buf + j*ETH_GSTRING_LEN,
9797 bnx2x_stats_arr[i].string);
9798 j++;
9799 }
bb2a0f7a 9800 }
a2fbb9ea
ET
9801 break;
9802
9803 case ETH_SS_TEST:
9804 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9805 break;
9806 }
9807}
9808
9809static int bnx2x_get_stats_count(struct net_device *dev)
9810{
bb2a0f7a 9811 struct bnx2x *bp = netdev_priv(dev);
de832a55 9812 int i, num_stats;
bb2a0f7a 9813
de832a55
EG
9814 if (is_multi(bp)) {
9815 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9816 if (!IS_E1HMF_MODE_STAT(bp))
9817 num_stats += BNX2X_NUM_STATS;
9818 } else {
9819 if (IS_E1HMF_MODE_STAT(bp)) {
9820 num_stats = 0;
9821 for (i = 0; i < BNX2X_NUM_STATS; i++)
9822 if (IS_FUNC_STAT(i))
9823 num_stats++;
9824 } else
9825 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9826 }
de832a55 9827
bb2a0f7a 9828 return num_stats;
a2fbb9ea
ET
9829}
9830
9831static void bnx2x_get_ethtool_stats(struct net_device *dev,
9832 struct ethtool_stats *stats, u64 *buf)
9833{
9834 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9835 u32 *hw_stats, *offset;
9836 int i, j, k;
bb2a0f7a 9837
de832a55
EG
9838 if (is_multi(bp)) {
9839 k = 0;
9840 for_each_queue(bp, i) {
9841 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9842 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9843 if (bnx2x_q_stats_arr[j].size == 0) {
9844 /* skip this counter */
9845 buf[k + j] = 0;
9846 continue;
9847 }
9848 offset = (hw_stats +
9849 bnx2x_q_stats_arr[j].offset);
9850 if (bnx2x_q_stats_arr[j].size == 4) {
9851 /* 4-byte counter */
9852 buf[k + j] = (u64) *offset;
9853 continue;
9854 }
9855 /* 8-byte counter */
9856 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9857 }
9858 k += BNX2X_NUM_Q_STATS;
9859 }
9860 if (IS_E1HMF_MODE_STAT(bp))
9861 return;
9862 hw_stats = (u32 *)&bp->eth_stats;
9863 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9864 if (bnx2x_stats_arr[j].size == 0) {
9865 /* skip this counter */
9866 buf[k + j] = 0;
9867 continue;
9868 }
9869 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9870 if (bnx2x_stats_arr[j].size == 4) {
9871 /* 4-byte counter */
9872 buf[k + j] = (u64) *offset;
9873 continue;
9874 }
9875 /* 8-byte counter */
9876 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9877 }
de832a55
EG
9878 } else {
9879 hw_stats = (u32 *)&bp->eth_stats;
9880 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9881 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9882 continue;
9883 if (bnx2x_stats_arr[i].size == 0) {
9884 /* skip this counter */
9885 buf[j] = 0;
9886 j++;
9887 continue;
9888 }
9889 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9890 if (bnx2x_stats_arr[i].size == 4) {
9891 /* 4-byte counter */
9892 buf[j] = (u64) *offset;
9893 j++;
9894 continue;
9895 }
9896 /* 8-byte counter */
9897 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9898 j++;
a2fbb9ea 9899 }
a2fbb9ea
ET
9900 }
9901}
9902
9903static int bnx2x_phys_id(struct net_device *dev, u32 data)
9904{
9905 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9906 int port = BP_PORT(bp);
a2fbb9ea
ET
9907 int i;
9908
34f80b04
EG
9909 if (!netif_running(dev))
9910 return 0;
9911
9912 if (!bp->port.pmf)
9913 return 0;
9914
a2fbb9ea
ET
9915 if (data == 0)
9916 data = 2;
9917
9918 for (i = 0; i < (data * 2); i++) {
c18487ee 9919 if ((i % 2) == 0)
34f80b04 9920 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9921 bp->link_params.hw_led_mode,
9922 bp->link_params.chip_id);
9923 else
34f80b04 9924 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9925 bp->link_params.hw_led_mode,
9926 bp->link_params.chip_id);
9927
a2fbb9ea
ET
9928 msleep_interruptible(500);
9929 if (signal_pending(current))
9930 break;
9931 }
9932
c18487ee 9933 if (bp->link_vars.link_up)
34f80b04 9934 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9935 bp->link_vars.line_speed,
9936 bp->link_params.hw_led_mode,
9937 bp->link_params.chip_id);
a2fbb9ea
ET
9938
9939 return 0;
9940}
9941
9942static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9943 .get_settings = bnx2x_get_settings,
9944 .set_settings = bnx2x_set_settings,
9945 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9946 .get_wol = bnx2x_get_wol,
9947 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9948 .get_msglevel = bnx2x_get_msglevel,
9949 .set_msglevel = bnx2x_set_msglevel,
9950 .nway_reset = bnx2x_nway_reset,
9951 .get_link = ethtool_op_get_link,
9952 .get_eeprom_len = bnx2x_get_eeprom_len,
9953 .get_eeprom = bnx2x_get_eeprom,
9954 .set_eeprom = bnx2x_set_eeprom,
9955 .get_coalesce = bnx2x_get_coalesce,
9956 .set_coalesce = bnx2x_set_coalesce,
9957 .get_ringparam = bnx2x_get_ringparam,
9958 .set_ringparam = bnx2x_set_ringparam,
9959 .get_pauseparam = bnx2x_get_pauseparam,
9960 .set_pauseparam = bnx2x_set_pauseparam,
9961 .get_rx_csum = bnx2x_get_rx_csum,
9962 .set_rx_csum = bnx2x_set_rx_csum,
9963 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9964 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9965 .set_flags = bnx2x_set_flags,
9966 .get_flags = ethtool_op_get_flags,
9967 .get_sg = ethtool_op_get_sg,
9968 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9969 .get_tso = ethtool_op_get_tso,
9970 .set_tso = bnx2x_set_tso,
9971 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9972 .self_test = bnx2x_self_test,
9973 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9974 .phys_id = bnx2x_phys_id,
9975 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9976 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9977};
9978
9979/* end of ethtool_ops */
9980
9981/****************************************************************************
9982* General service functions
9983****************************************************************************/
9984
9985static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9986{
9987 u16 pmcsr;
9988
9989 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9990
9991 switch (state) {
9992 case PCI_D0:
34f80b04 9993 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9994 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9995 PCI_PM_CTRL_PME_STATUS));
9996
9997 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9998 /* delay required during transition out of D3hot */
a2fbb9ea 9999 msleep(20);
34f80b04 10000 break;
a2fbb9ea 10001
34f80b04
EG
10002 case PCI_D3hot:
10003 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10004 pmcsr |= 3;
a2fbb9ea 10005
34f80b04
EG
10006 if (bp->wol)
10007 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10008
34f80b04
EG
10009 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10010 pmcsr);
a2fbb9ea 10011
34f80b04
EG
10012 /* No more memory access after this point until
10013 * device is brought back to D0.
10014 */
10015 break;
10016
10017 default:
10018 return -EINVAL;
10019 }
10020 return 0;
a2fbb9ea
ET
10021}
10022
237907c1
EG
10023static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10024{
10025 u16 rx_cons_sb;
10026
10027 /* Tell compiler that status block fields can change */
10028 barrier();
10029 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10030 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10031 rx_cons_sb++;
10032 return (fp->rx_comp_cons != rx_cons_sb);
10033}
10034
34f80b04
EG
10035/*
10036 * net_device service functions
10037 */
10038
a2fbb9ea
ET
10039static int bnx2x_poll(struct napi_struct *napi, int budget)
10040{
10041 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10042 napi);
10043 struct bnx2x *bp = fp->bp;
10044 int work_done = 0;
10045
10046#ifdef BNX2X_STOP_ON_ERROR
10047 if (unlikely(bp->panic))
34f80b04 10048 goto poll_panic;
a2fbb9ea
ET
10049#endif
10050
10051 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10052 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10053 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10054
10055 bnx2x_update_fpsb_idx(fp);
10056
237907c1 10057 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
10058 bnx2x_tx_int(fp, budget);
10059
237907c1 10060 if (bnx2x_has_rx_work(fp))
a2fbb9ea 10061 work_done = bnx2x_rx_int(fp, budget);
da5a662a 10062 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10063
10064 /* must not complete if we consumed full budget */
da5a662a 10065 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10066
10067#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10068poll_panic:
a2fbb9ea 10069#endif
288379f0 10070 napi_complete(napi);
a2fbb9ea 10071
0626b899 10072 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10073 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10074 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10075 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10076 }
a2fbb9ea
ET
10077 return work_done;
10078}
10079
755735eb
EG
10080
10081/* we split the first BD into headers and data BDs
33471629 10082 * to ease the pain of our fellow microcode engineers
755735eb
EG
10083 * we use one mapping for both BDs
10084 * So far this has only been observed to happen
10085 * in Other Operating Systems(TM)
10086 */
10087static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10088 struct bnx2x_fastpath *fp,
10089 struct eth_tx_bd **tx_bd, u16 hlen,
10090 u16 bd_prod, int nbd)
10091{
10092 struct eth_tx_bd *h_tx_bd = *tx_bd;
10093 struct eth_tx_bd *d_tx_bd;
10094 dma_addr_t mapping;
10095 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10096
10097 /* first fix first BD */
10098 h_tx_bd->nbd = cpu_to_le16(nbd);
10099 h_tx_bd->nbytes = cpu_to_le16(hlen);
10100
10101 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10102 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10103 h_tx_bd->addr_lo, h_tx_bd->nbd);
10104
10105 /* now get a new data BD
10106 * (after the pbd) and fill it */
10107 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10108 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10109
10110 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10111 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10112
10113 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10114 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10115 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10116 d_tx_bd->vlan = 0;
10117 /* this marks the BD as one that has no individual mapping
10118 * the FW ignores this flag in a BD not marked start
10119 */
10120 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10121 DP(NETIF_MSG_TX_QUEUED,
10122 "TSO split data size is %d (%x:%x)\n",
10123 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10124
10125 /* update tx_bd for marking the last BD flag */
10126 *tx_bd = d_tx_bd;
10127
10128 return bd_prod;
10129}
10130
10131static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10132{
10133 if (fix > 0)
10134 csum = (u16) ~csum_fold(csum_sub(csum,
10135 csum_partial(t_header - fix, fix, 0)));
10136
10137 else if (fix < 0)
10138 csum = (u16) ~csum_fold(csum_add(csum,
10139 csum_partial(t_header, -fix, 0)));
10140
10141 return swab16(csum);
10142}
10143
10144static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10145{
10146 u32 rc;
10147
10148 if (skb->ip_summed != CHECKSUM_PARTIAL)
10149 rc = XMIT_PLAIN;
10150
10151 else {
4781bfad 10152 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10153 rc = XMIT_CSUM_V6;
10154 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10155 rc |= XMIT_CSUM_TCP;
10156
10157 } else {
10158 rc = XMIT_CSUM_V4;
10159 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10160 rc |= XMIT_CSUM_TCP;
10161 }
10162 }
10163
10164 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10165 rc |= XMIT_GSO_V4;
10166
10167 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10168 rc |= XMIT_GSO_V6;
10169
10170 return rc;
10171}
10172
632da4d6 10173#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10174/* check if packet requires linearization (packet is too fragmented)
10175 no need to check fragmentation if page size > 8K (there will be no
10176 violation to FW restrictions) */
755735eb
EG
10177static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10178 u32 xmit_type)
10179{
10180 int to_copy = 0;
10181 int hlen = 0;
10182 int first_bd_sz = 0;
10183
10184 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10185 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10186
10187 if (xmit_type & XMIT_GSO) {
10188 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10189 /* Check if LSO packet needs to be copied:
10190 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10191 int wnd_size = MAX_FETCH_BD - 3;
33471629 10192 /* Number of windows to check */
755735eb
EG
10193 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10194 int wnd_idx = 0;
10195 int frag_idx = 0;
10196 u32 wnd_sum = 0;
10197
10198 /* Headers length */
10199 hlen = (int)(skb_transport_header(skb) - skb->data) +
10200 tcp_hdrlen(skb);
10201
10202 /* Amount of data (w/o headers) on linear part of SKB*/
10203 first_bd_sz = skb_headlen(skb) - hlen;
10204
10205 wnd_sum = first_bd_sz;
10206
10207 /* Calculate the first sum - it's special */
10208 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10209 wnd_sum +=
10210 skb_shinfo(skb)->frags[frag_idx].size;
10211
10212 /* If there was data on linear skb data - check it */
10213 if (first_bd_sz > 0) {
10214 if (unlikely(wnd_sum < lso_mss)) {
10215 to_copy = 1;
10216 goto exit_lbl;
10217 }
10218
10219 wnd_sum -= first_bd_sz;
10220 }
10221
10222 /* Others are easier: run through the frag list and
10223 check all windows */
10224 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10225 wnd_sum +=
10226 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10227
10228 if (unlikely(wnd_sum < lso_mss)) {
10229 to_copy = 1;
10230 break;
10231 }
10232 wnd_sum -=
10233 skb_shinfo(skb)->frags[wnd_idx].size;
10234 }
10235
10236 } else {
10237 /* in non-LSO too fragmented packet should always
10238 be linearized */
10239 to_copy = 1;
10240 }
10241 }
10242
10243exit_lbl:
10244 if (unlikely(to_copy))
10245 DP(NETIF_MSG_TX_QUEUED,
10246 "Linearization IS REQUIRED for %s packet. "
10247 "num_frags %d hlen %d first_bd_sz %d\n",
10248 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10249 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10250
10251 return to_copy;
10252}
632da4d6 10253#endif
755735eb
EG
10254
10255/* called with netif_tx_lock
a2fbb9ea 10256 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10257 * netif_wake_queue()
a2fbb9ea
ET
10258 */
10259static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10260{
10261 struct bnx2x *bp = netdev_priv(dev);
10262 struct bnx2x_fastpath *fp;
555f6c78 10263 struct netdev_queue *txq;
a2fbb9ea
ET
10264 struct sw_tx_bd *tx_buf;
10265 struct eth_tx_bd *tx_bd;
10266 struct eth_tx_parse_bd *pbd = NULL;
10267 u16 pkt_prod, bd_prod;
755735eb 10268 int nbd, fp_index;
a2fbb9ea 10269 dma_addr_t mapping;
755735eb
EG
10270 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10271 int vlan_off = (bp->e1hov ? 4 : 0);
10272 int i;
10273 u8 hlen = 0;
a2fbb9ea
ET
10274
10275#ifdef BNX2X_STOP_ON_ERROR
10276 if (unlikely(bp->panic))
10277 return NETDEV_TX_BUSY;
10278#endif
10279
555f6c78
EG
10280 fp_index = skb_get_queue_mapping(skb);
10281 txq = netdev_get_tx_queue(dev, fp_index);
10282
a2fbb9ea 10283 fp = &bp->fp[fp_index];
755735eb 10284
231fd58a 10285 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10286 fp->eth_q_stats.driver_xoff++,
555f6c78 10287 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10288 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10289 return NETDEV_TX_BUSY;
10290 }
10291
755735eb
EG
10292 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10293 " gso type %x xmit_type %x\n",
10294 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10295 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10296
632da4d6 10297#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10298 /* First, check if we need to linearize the skb (due to FW
10299 restrictions). No need to check fragmentation if page size > 8K
10300 (there will be no violation to FW restrictions) */
755735eb
EG
10301 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10302 /* Statistics of linearization */
10303 bp->lin_cnt++;
10304 if (skb_linearize(skb) != 0) {
10305 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10306 "silently dropping this SKB\n");
10307 dev_kfree_skb_any(skb);
da5a662a 10308 return NETDEV_TX_OK;
755735eb
EG
10309 }
10310 }
632da4d6 10311#endif
755735eb 10312
a2fbb9ea 10313 /*
755735eb 10314 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10315 then for TSO or xsum we have a parsing info BD,
755735eb 10316 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10317 (don't forget to mark the last one as last,
10318 and to unmap only AFTER you write to the BD ...)
755735eb 10319 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10320 */
10321
10322 pkt_prod = fp->tx_pkt_prod++;
755735eb 10323 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10324
755735eb 10325 /* get a tx_buf and first BD */
a2fbb9ea
ET
10326 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10327 tx_bd = &fp->tx_desc_ring[bd_prod];
10328
10329 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10330 tx_bd->general_data = (UNICAST_ADDRESS <<
10331 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10332 /* header nbd */
10333 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10334
755735eb
EG
10335 /* remember the first BD of the packet */
10336 tx_buf->first_bd = fp->tx_bd_prod;
10337 tx_buf->skb = skb;
a2fbb9ea
ET
10338
10339 DP(NETIF_MSG_TX_QUEUED,
10340 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10341 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10342
0c6671b0
EG
10343#ifdef BCM_VLAN
10344 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10345 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10346 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10347 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10348 vlan_off += 4;
10349 } else
0c6671b0 10350#endif
755735eb 10351 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10352
755735eb 10353 if (xmit_type) {
755735eb 10354 /* turn on parsing and get a BD */
a2fbb9ea
ET
10355 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10356 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10357
10358 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10359 }
10360
10361 if (xmit_type & XMIT_CSUM) {
10362 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10363
10364 /* for now NS flag is not used in Linux */
4781bfad
EG
10365 pbd->global_data =
10366 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10367 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10368
755735eb
EG
10369 pbd->ip_hlen = (skb_transport_header(skb) -
10370 skb_network_header(skb)) / 2;
10371
10372 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10373
755735eb
EG
10374 pbd->total_hlen = cpu_to_le16(hlen);
10375 hlen = hlen*2 - vlan_off;
a2fbb9ea 10376
755735eb
EG
10377 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10378
10379 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10380 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10381 ETH_TX_BD_FLAGS_IP_CSUM;
10382 else
10383 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10384
10385 if (xmit_type & XMIT_CSUM_TCP) {
10386 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10387
10388 } else {
10389 s8 fix = SKB_CS_OFF(skb); /* signed! */
10390
a2fbb9ea 10391 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10392 pbd->cs_offset = fix / 2;
a2fbb9ea 10393
755735eb
EG
10394 DP(NETIF_MSG_TX_QUEUED,
10395 "hlen %d offset %d fix %d csum before fix %x\n",
10396 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10397 SKB_CS(skb));
10398
10399 /* HW bug: fixup the CSUM */
10400 pbd->tcp_pseudo_csum =
10401 bnx2x_csum_fix(skb_transport_header(skb),
10402 SKB_CS(skb), fix);
10403
10404 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10405 pbd->tcp_pseudo_csum);
10406 }
a2fbb9ea
ET
10407 }
10408
10409 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10410 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10411
10412 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10413 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10414 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10415 tx_bd->nbd = cpu_to_le16(nbd);
10416 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10417
10418 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10419 " nbytes %d flags %x vlan %x\n",
10420 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10421 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10422 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10423
755735eb 10424 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10425
10426 DP(NETIF_MSG_TX_QUEUED,
10427 "TSO packet len %d hlen %d total len %d tso size %d\n",
10428 skb->len, hlen, skb_headlen(skb),
10429 skb_shinfo(skb)->gso_size);
10430
10431 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10432
755735eb
EG
10433 if (unlikely(skb_headlen(skb) > hlen))
10434 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10435 bd_prod, ++nbd);
a2fbb9ea
ET
10436
10437 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10438 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10439 pbd->tcp_flags = pbd_tcp_flags(skb);
10440
10441 if (xmit_type & XMIT_GSO_V4) {
10442 pbd->ip_id = swab16(ip_hdr(skb)->id);
10443 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10444 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10445 ip_hdr(skb)->daddr,
10446 0, IPPROTO_TCP, 0));
755735eb
EG
10447
10448 } else
10449 pbd->tcp_pseudo_csum =
10450 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10451 &ipv6_hdr(skb)->daddr,
10452 0, IPPROTO_TCP, 0));
10453
a2fbb9ea
ET
10454 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10455 }
10456
755735eb
EG
10457 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10458 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10459
755735eb
EG
10460 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10461 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10462
755735eb
EG
10463 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10464 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10465
755735eb
EG
10466 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10467 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10468 tx_bd->nbytes = cpu_to_le16(frag->size);
10469 tx_bd->vlan = cpu_to_le16(pkt_prod);
10470 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10471
755735eb
EG
10472 DP(NETIF_MSG_TX_QUEUED,
10473 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10474 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10475 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10476 }
10477
755735eb 10478 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10479 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10480
10481 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10482 tx_bd, tx_bd->bd_flags.as_bitfield);
10483
a2fbb9ea
ET
10484 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10485
755735eb 10486 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10487 * if the packet contains or ends with it
10488 */
10489 if (TX_BD_POFF(bd_prod) < nbd)
10490 nbd++;
10491
10492 if (pbd)
10493 DP(NETIF_MSG_TX_QUEUED,
10494 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10495 " tcp_flags %x xsum %x seq %u hlen %u\n",
10496 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10497 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10498 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10499
755735eb 10500 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10501
58f4c4cf
EG
10502 /*
10503 * Make sure that the BD data is updated before updating the producer
10504 * since FW might read the BD right after the producer is updated.
10505 * This is only applicable for weak-ordered memory model archs such
10506 * as IA-64. The following barrier is also mandatory since FW will
10507 * assumes packets must have BDs.
10508 */
10509 wmb();
10510
4781bfad 10511 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10512 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10513 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10514 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10515
10516 mmiowb();
10517
755735eb 10518 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10519 dev->trans_start = jiffies;
10520
10521 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10522 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10523 if we put Tx into XOFF state. */
10524 smp_mb();
555f6c78 10525 netif_tx_stop_queue(txq);
de832a55 10526 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10527 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10528 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10529 }
10530 fp->tx_pkt++;
10531
10532 return NETDEV_TX_OK;
10533}
10534
bb2a0f7a 10535/* called with rtnl_lock */
a2fbb9ea
ET
10536static int bnx2x_open(struct net_device *dev)
10537{
10538 struct bnx2x *bp = netdev_priv(dev);
10539
6eccabb3
EG
10540 netif_carrier_off(dev);
10541
a2fbb9ea
ET
10542 bnx2x_set_power_state(bp, PCI_D0);
10543
bb2a0f7a 10544 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10545}
10546
bb2a0f7a 10547/* called with rtnl_lock */
a2fbb9ea
ET
10548static int bnx2x_close(struct net_device *dev)
10549{
a2fbb9ea
ET
10550 struct bnx2x *bp = netdev_priv(dev);
10551
10552 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10553 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10554 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10555 if (!CHIP_REV_IS_SLOW(bp))
10556 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10557
10558 return 0;
10559}
10560
f5372251 10561/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10562static void bnx2x_set_rx_mode(struct net_device *dev)
10563{
10564 struct bnx2x *bp = netdev_priv(dev);
10565 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10566 int port = BP_PORT(bp);
10567
10568 if (bp->state != BNX2X_STATE_OPEN) {
10569 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10570 return;
10571 }
10572
10573 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10574
10575 if (dev->flags & IFF_PROMISC)
10576 rx_mode = BNX2X_RX_MODE_PROMISC;
10577
10578 else if ((dev->flags & IFF_ALLMULTI) ||
10579 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10580 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10581
10582 else { /* some multicasts */
10583 if (CHIP_IS_E1(bp)) {
10584 int i, old, offset;
10585 struct dev_mc_list *mclist;
10586 struct mac_configuration_cmd *config =
10587 bnx2x_sp(bp, mcast_config);
10588
10589 for (i = 0, mclist = dev->mc_list;
10590 mclist && (i < dev->mc_count);
10591 i++, mclist = mclist->next) {
10592
10593 config->config_table[i].
10594 cam_entry.msb_mac_addr =
10595 swab16(*(u16 *)&mclist->dmi_addr[0]);
10596 config->config_table[i].
10597 cam_entry.middle_mac_addr =
10598 swab16(*(u16 *)&mclist->dmi_addr[2]);
10599 config->config_table[i].
10600 cam_entry.lsb_mac_addr =
10601 swab16(*(u16 *)&mclist->dmi_addr[4]);
10602 config->config_table[i].cam_entry.flags =
10603 cpu_to_le16(port);
10604 config->config_table[i].
10605 target_table_entry.flags = 0;
10606 config->config_table[i].
10607 target_table_entry.client_id = 0;
10608 config->config_table[i].
10609 target_table_entry.vlan_id = 0;
10610
10611 DP(NETIF_MSG_IFUP,
10612 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10613 config->config_table[i].
10614 cam_entry.msb_mac_addr,
10615 config->config_table[i].
10616 cam_entry.middle_mac_addr,
10617 config->config_table[i].
10618 cam_entry.lsb_mac_addr);
10619 }
8d9c5f34 10620 old = config->hdr.length;
34f80b04
EG
10621 if (old > i) {
10622 for (; i < old; i++) {
10623 if (CAM_IS_INVALID(config->
10624 config_table[i])) {
af246401 10625 /* already invalidated */
34f80b04
EG
10626 break;
10627 }
10628 /* invalidate */
10629 CAM_INVALIDATE(config->
10630 config_table[i]);
10631 }
10632 }
10633
10634 if (CHIP_REV_IS_SLOW(bp))
10635 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10636 else
10637 offset = BNX2X_MAX_MULTICAST*(1 + port);
10638
8d9c5f34 10639 config->hdr.length = i;
34f80b04 10640 config->hdr.offset = offset;
8d9c5f34 10641 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10642 config->hdr.reserved1 = 0;
10643
10644 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10645 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10646 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10647 0);
10648 } else { /* E1H */
10649 /* Accept one or more multicasts */
10650 struct dev_mc_list *mclist;
10651 u32 mc_filter[MC_HASH_SIZE];
10652 u32 crc, bit, regidx;
10653 int i;
10654
10655 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10656
10657 for (i = 0, mclist = dev->mc_list;
10658 mclist && (i < dev->mc_count);
10659 i++, mclist = mclist->next) {
10660
7c510e4b
JB
10661 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10662 mclist->dmi_addr);
34f80b04
EG
10663
10664 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10665 bit = (crc >> 24) & 0xff;
10666 regidx = bit >> 5;
10667 bit &= 0x1f;
10668 mc_filter[regidx] |= (1 << bit);
10669 }
10670
10671 for (i = 0; i < MC_HASH_SIZE; i++)
10672 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10673 mc_filter[i]);
10674 }
10675 }
10676
10677 bp->rx_mode = rx_mode;
10678 bnx2x_set_storm_rx_mode(bp);
10679}
10680
10681/* called with rtnl_lock */
a2fbb9ea
ET
10682static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10683{
10684 struct sockaddr *addr = p;
10685 struct bnx2x *bp = netdev_priv(dev);
10686
34f80b04 10687 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10688 return -EINVAL;
10689
10690 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10691 if (netif_running(dev)) {
10692 if (CHIP_IS_E1(bp))
3101c2bc 10693 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10694 else
3101c2bc 10695 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10696 }
a2fbb9ea
ET
10697
10698 return 0;
10699}
10700
c18487ee 10701/* called with rtnl_lock */
a2fbb9ea
ET
10702static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10703{
10704 struct mii_ioctl_data *data = if_mii(ifr);
10705 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10706 int port = BP_PORT(bp);
a2fbb9ea
ET
10707 int err;
10708
10709 switch (cmd) {
10710 case SIOCGMIIPHY:
34f80b04 10711 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10712
c14423fe 10713 /* fallthrough */
c18487ee 10714
a2fbb9ea 10715 case SIOCGMIIREG: {
c18487ee 10716 u16 mii_regval;
a2fbb9ea 10717
c18487ee
YR
10718 if (!netif_running(dev))
10719 return -EAGAIN;
a2fbb9ea 10720
34f80b04 10721 mutex_lock(&bp->port.phy_mutex);
3196a88a 10722 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10723 DEFAULT_PHY_DEV_ADDR,
10724 (data->reg_num & 0x1f), &mii_regval);
10725 data->val_out = mii_regval;
34f80b04 10726 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10727 return err;
10728 }
10729
10730 case SIOCSMIIREG:
10731 if (!capable(CAP_NET_ADMIN))
10732 return -EPERM;
10733
c18487ee
YR
10734 if (!netif_running(dev))
10735 return -EAGAIN;
10736
34f80b04 10737 mutex_lock(&bp->port.phy_mutex);
3196a88a 10738 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10739 DEFAULT_PHY_DEV_ADDR,
10740 (data->reg_num & 0x1f), data->val_in);
34f80b04 10741 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10742 return err;
10743
10744 default:
10745 /* do nothing */
10746 break;
10747 }
10748
10749 return -EOPNOTSUPP;
10750}
10751
34f80b04 10752/* called with rtnl_lock */
a2fbb9ea
ET
10753static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10754{
10755 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10756 int rc = 0;
a2fbb9ea
ET
10757
10758 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10759 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10760 return -EINVAL;
10761
10762 /* This does not race with packet allocation
c14423fe 10763 * because the actual alloc size is
a2fbb9ea
ET
10764 * only updated as part of load
10765 */
10766 dev->mtu = new_mtu;
10767
10768 if (netif_running(dev)) {
34f80b04
EG
10769 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10770 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10771 }
34f80b04
EG
10772
10773 return rc;
a2fbb9ea
ET
10774}
10775
10776static void bnx2x_tx_timeout(struct net_device *dev)
10777{
10778 struct bnx2x *bp = netdev_priv(dev);
10779
10780#ifdef BNX2X_STOP_ON_ERROR
10781 if (!bp->panic)
10782 bnx2x_panic();
10783#endif
10784 /* This allows the netif to be shutdown gracefully before resetting */
10785 schedule_work(&bp->reset_task);
10786}
10787
10788#ifdef BCM_VLAN
34f80b04 10789/* called with rtnl_lock */
a2fbb9ea
ET
10790static void bnx2x_vlan_rx_register(struct net_device *dev,
10791 struct vlan_group *vlgrp)
10792{
10793 struct bnx2x *bp = netdev_priv(dev);
10794
10795 bp->vlgrp = vlgrp;
0c6671b0
EG
10796
10797 /* Set flags according to the required capabilities */
10798 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10799
10800 if (dev->features & NETIF_F_HW_VLAN_TX)
10801 bp->flags |= HW_VLAN_TX_FLAG;
10802
10803 if (dev->features & NETIF_F_HW_VLAN_RX)
10804 bp->flags |= HW_VLAN_RX_FLAG;
10805
a2fbb9ea 10806 if (netif_running(dev))
49d66772 10807 bnx2x_set_client_config(bp);
a2fbb9ea 10808}
34f80b04 10809
a2fbb9ea
ET
10810#endif
10811
10812#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10813static void poll_bnx2x(struct net_device *dev)
10814{
10815 struct bnx2x *bp = netdev_priv(dev);
10816
10817 disable_irq(bp->pdev->irq);
10818 bnx2x_interrupt(bp->pdev->irq, dev);
10819 enable_irq(bp->pdev->irq);
10820}
10821#endif
10822
c64213cd
SH
10823static const struct net_device_ops bnx2x_netdev_ops = {
10824 .ndo_open = bnx2x_open,
10825 .ndo_stop = bnx2x_close,
10826 .ndo_start_xmit = bnx2x_start_xmit,
10827 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10828 .ndo_set_mac_address = bnx2x_change_mac_addr,
10829 .ndo_validate_addr = eth_validate_addr,
10830 .ndo_do_ioctl = bnx2x_ioctl,
10831 .ndo_change_mtu = bnx2x_change_mtu,
10832 .ndo_tx_timeout = bnx2x_tx_timeout,
10833#ifdef BCM_VLAN
10834 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10835#endif
10836#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10837 .ndo_poll_controller = poll_bnx2x,
10838#endif
10839};
10840
10841
34f80b04
EG
10842static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10843 struct net_device *dev)
a2fbb9ea
ET
10844{
10845 struct bnx2x *bp;
10846 int rc;
10847
10848 SET_NETDEV_DEV(dev, &pdev->dev);
10849 bp = netdev_priv(dev);
10850
34f80b04
EG
10851 bp->dev = dev;
10852 bp->pdev = pdev;
a2fbb9ea 10853 bp->flags = 0;
34f80b04 10854 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10855
10856 rc = pci_enable_device(pdev);
10857 if (rc) {
10858 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10859 goto err_out;
10860 }
10861
10862 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10863 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10864 " aborting\n");
10865 rc = -ENODEV;
10866 goto err_out_disable;
10867 }
10868
10869 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10870 printk(KERN_ERR PFX "Cannot find second PCI device"
10871 " base address, aborting\n");
10872 rc = -ENODEV;
10873 goto err_out_disable;
10874 }
10875
34f80b04
EG
10876 if (atomic_read(&pdev->enable_cnt) == 1) {
10877 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10878 if (rc) {
10879 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10880 " aborting\n");
10881 goto err_out_disable;
10882 }
a2fbb9ea 10883
34f80b04
EG
10884 pci_set_master(pdev);
10885 pci_save_state(pdev);
10886 }
a2fbb9ea
ET
10887
10888 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10889 if (bp->pm_cap == 0) {
10890 printk(KERN_ERR PFX "Cannot find power management"
10891 " capability, aborting\n");
10892 rc = -EIO;
10893 goto err_out_release;
10894 }
10895
10896 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10897 if (bp->pcie_cap == 0) {
10898 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10899 " aborting\n");
10900 rc = -EIO;
10901 goto err_out_release;
10902 }
10903
10904 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10905 bp->flags |= USING_DAC_FLAG;
10906 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10907 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10908 " failed, aborting\n");
10909 rc = -EIO;
10910 goto err_out_release;
10911 }
10912
10913 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10914 printk(KERN_ERR PFX "System does not support DMA,"
10915 " aborting\n");
10916 rc = -EIO;
10917 goto err_out_release;
10918 }
10919
34f80b04
EG
10920 dev->mem_start = pci_resource_start(pdev, 0);
10921 dev->base_addr = dev->mem_start;
10922 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10923
10924 dev->irq = pdev->irq;
10925
275f165f 10926 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10927 if (!bp->regview) {
10928 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10929 rc = -ENOMEM;
10930 goto err_out_release;
10931 }
10932
34f80b04
EG
10933 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10934 min_t(u64, BNX2X_DB_SIZE,
10935 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10936 if (!bp->doorbells) {
10937 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10938 rc = -ENOMEM;
10939 goto err_out_unmap;
10940 }
10941
10942 bnx2x_set_power_state(bp, PCI_D0);
10943
34f80b04
EG
10944 /* clean indirect addresses */
10945 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10946 PCICFG_VENDOR_ID_OFFSET);
10947 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10948 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10949 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10950 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10951
34f80b04 10952 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10953
c64213cd 10954 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10955 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10956 dev->features |= NETIF_F_SG;
10957 dev->features |= NETIF_F_HW_CSUM;
10958 if (bp->flags & USING_DAC_FLAG)
10959 dev->features |= NETIF_F_HIGHDMA;
10960#ifdef BCM_VLAN
10961 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10962 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10963#endif
10964 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10965 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10966
10967 return 0;
10968
10969err_out_unmap:
10970 if (bp->regview) {
10971 iounmap(bp->regview);
10972 bp->regview = NULL;
10973 }
a2fbb9ea
ET
10974 if (bp->doorbells) {
10975 iounmap(bp->doorbells);
10976 bp->doorbells = NULL;
10977 }
10978
10979err_out_release:
34f80b04
EG
10980 if (atomic_read(&pdev->enable_cnt) == 1)
10981 pci_release_regions(pdev);
a2fbb9ea
ET
10982
10983err_out_disable:
10984 pci_disable_device(pdev);
10985 pci_set_drvdata(pdev, NULL);
10986
10987err_out:
10988 return rc;
10989}
10990
25047950
ET
10991static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10992{
10993 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10994
10995 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10996 return val;
10997}
10998
10999/* return value of 1=2.5GHz 2=5GHz */
11000static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11001{
11002 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11003
11004 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11005 return val;
11006}
11007
a2fbb9ea
ET
11008static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11009 const struct pci_device_id *ent)
11010{
11011 static int version_printed;
11012 struct net_device *dev = NULL;
11013 struct bnx2x *bp;
25047950 11014 int rc;
a2fbb9ea
ET
11015
11016 if (version_printed++ == 0)
11017 printk(KERN_INFO "%s", version);
11018
11019 /* dev zeroed in init_etherdev */
555f6c78 11020 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11021 if (!dev) {
11022 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11023 return -ENOMEM;
34f80b04 11024 }
a2fbb9ea 11025
a2fbb9ea
ET
11026 bp = netdev_priv(dev);
11027 bp->msglevel = debug;
11028
34f80b04 11029 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11030 if (rc < 0) {
11031 free_netdev(dev);
11032 return rc;
11033 }
11034
a2fbb9ea
ET
11035 pci_set_drvdata(pdev, dev);
11036
34f80b04 11037 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11038 if (rc)
11039 goto init_one_exit;
11040
11041 rc = register_netdev(dev);
34f80b04 11042 if (rc) {
693fc0d1 11043 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11044 goto init_one_exit;
11045 }
11046
25047950 11047 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11048 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11049 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11050 bnx2x_get_pcie_width(bp),
11051 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11052 dev->base_addr, bp->pdev->irq);
e174961c 11053 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 11054 return 0;
34f80b04
EG
11055
11056init_one_exit:
11057 if (bp->regview)
11058 iounmap(bp->regview);
11059
11060 if (bp->doorbells)
11061 iounmap(bp->doorbells);
11062
11063 free_netdev(dev);
11064
11065 if (atomic_read(&pdev->enable_cnt) == 1)
11066 pci_release_regions(pdev);
11067
11068 pci_disable_device(pdev);
11069 pci_set_drvdata(pdev, NULL);
11070
11071 return rc;
a2fbb9ea
ET
11072}
11073
11074static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11075{
11076 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11077 struct bnx2x *bp;
11078
11079 if (!dev) {
228241eb
ET
11080 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11081 return;
11082 }
228241eb 11083 bp = netdev_priv(dev);
a2fbb9ea 11084
a2fbb9ea
ET
11085 unregister_netdev(dev);
11086
11087 if (bp->regview)
11088 iounmap(bp->regview);
11089
11090 if (bp->doorbells)
11091 iounmap(bp->doorbells);
11092
11093 free_netdev(dev);
34f80b04
EG
11094
11095 if (atomic_read(&pdev->enable_cnt) == 1)
11096 pci_release_regions(pdev);
11097
a2fbb9ea
ET
11098 pci_disable_device(pdev);
11099 pci_set_drvdata(pdev, NULL);
11100}
11101
11102static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11103{
11104 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11105 struct bnx2x *bp;
11106
34f80b04
EG
11107 if (!dev) {
11108 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11109 return -ENODEV;
11110 }
11111 bp = netdev_priv(dev);
a2fbb9ea 11112
34f80b04 11113 rtnl_lock();
a2fbb9ea 11114
34f80b04 11115 pci_save_state(pdev);
228241eb 11116
34f80b04
EG
11117 if (!netif_running(dev)) {
11118 rtnl_unlock();
11119 return 0;
11120 }
a2fbb9ea
ET
11121
11122 netif_device_detach(dev);
a2fbb9ea 11123
da5a662a 11124 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11125
a2fbb9ea 11126 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11127
34f80b04
EG
11128 rtnl_unlock();
11129
a2fbb9ea
ET
11130 return 0;
11131}
11132
11133static int bnx2x_resume(struct pci_dev *pdev)
11134{
11135 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11136 struct bnx2x *bp;
a2fbb9ea
ET
11137 int rc;
11138
228241eb
ET
11139 if (!dev) {
11140 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11141 return -ENODEV;
11142 }
228241eb 11143 bp = netdev_priv(dev);
a2fbb9ea 11144
34f80b04
EG
11145 rtnl_lock();
11146
228241eb 11147 pci_restore_state(pdev);
34f80b04
EG
11148
11149 if (!netif_running(dev)) {
11150 rtnl_unlock();
11151 return 0;
11152 }
11153
a2fbb9ea
ET
11154 bnx2x_set_power_state(bp, PCI_D0);
11155 netif_device_attach(dev);
11156
da5a662a 11157 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11158
34f80b04
EG
11159 rtnl_unlock();
11160
11161 return rc;
a2fbb9ea
ET
11162}
11163
f8ef6e44
YG
11164static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11165{
11166 int i;
11167
11168 bp->state = BNX2X_STATE_ERROR;
11169
11170 bp->rx_mode = BNX2X_RX_MODE_NONE;
11171
11172 bnx2x_netif_stop(bp, 0);
11173
11174 del_timer_sync(&bp->timer);
11175 bp->stats_state = STATS_STATE_DISABLED;
11176 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11177
11178 /* Release IRQs */
11179 bnx2x_free_irq(bp);
11180
11181 if (CHIP_IS_E1(bp)) {
11182 struct mac_configuration_cmd *config =
11183 bnx2x_sp(bp, mcast_config);
11184
8d9c5f34 11185 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11186 CAM_INVALIDATE(config->config_table[i]);
11187 }
11188
11189 /* Free SKBs, SGEs, TPA pool and driver internals */
11190 bnx2x_free_skbs(bp);
555f6c78 11191 for_each_rx_queue(bp, i)
f8ef6e44 11192 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11193 for_each_rx_queue(bp, i)
7cde1c8b 11194 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11195 bnx2x_free_mem(bp);
11196
11197 bp->state = BNX2X_STATE_CLOSED;
11198
11199 netif_carrier_off(bp->dev);
11200
11201 return 0;
11202}
11203
11204static void bnx2x_eeh_recover(struct bnx2x *bp)
11205{
11206 u32 val;
11207
11208 mutex_init(&bp->port.phy_mutex);
11209
11210 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11211 bp->link_params.shmem_base = bp->common.shmem_base;
11212 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11213
11214 if (!bp->common.shmem_base ||
11215 (bp->common.shmem_base < 0xA0000) ||
11216 (bp->common.shmem_base >= 0xC0000)) {
11217 BNX2X_DEV_INFO("MCP not active\n");
11218 bp->flags |= NO_MCP_FLAG;
11219 return;
11220 }
11221
11222 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11223 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11224 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11225 BNX2X_ERR("BAD MCP validity signature\n");
11226
11227 if (!BP_NOMCP(bp)) {
11228 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11229 & DRV_MSG_SEQ_NUMBER_MASK);
11230 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11231 }
11232}
11233
493adb1f
WX
11234/**
11235 * bnx2x_io_error_detected - called when PCI error is detected
11236 * @pdev: Pointer to PCI device
11237 * @state: The current pci connection state
11238 *
11239 * This function is called after a PCI bus error affecting
11240 * this device has been detected.
11241 */
11242static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11243 pci_channel_state_t state)
11244{
11245 struct net_device *dev = pci_get_drvdata(pdev);
11246 struct bnx2x *bp = netdev_priv(dev);
11247
11248 rtnl_lock();
11249
11250 netif_device_detach(dev);
11251
11252 if (netif_running(dev))
f8ef6e44 11253 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11254
11255 pci_disable_device(pdev);
11256
11257 rtnl_unlock();
11258
11259 /* Request a slot reset */
11260 return PCI_ERS_RESULT_NEED_RESET;
11261}
11262
11263/**
11264 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11265 * @pdev: Pointer to PCI device
11266 *
11267 * Restart the card from scratch, as if from a cold-boot.
11268 */
11269static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11270{
11271 struct net_device *dev = pci_get_drvdata(pdev);
11272 struct bnx2x *bp = netdev_priv(dev);
11273
11274 rtnl_lock();
11275
11276 if (pci_enable_device(pdev)) {
11277 dev_err(&pdev->dev,
11278 "Cannot re-enable PCI device after reset\n");
11279 rtnl_unlock();
11280 return PCI_ERS_RESULT_DISCONNECT;
11281 }
11282
11283 pci_set_master(pdev);
11284 pci_restore_state(pdev);
11285
11286 if (netif_running(dev))
11287 bnx2x_set_power_state(bp, PCI_D0);
11288
11289 rtnl_unlock();
11290
11291 return PCI_ERS_RESULT_RECOVERED;
11292}
11293
11294/**
11295 * bnx2x_io_resume - called when traffic can start flowing again
11296 * @pdev: Pointer to PCI device
11297 *
11298 * This callback is called when the error recovery driver tells us that
11299 * its OK to resume normal operation.
11300 */
11301static void bnx2x_io_resume(struct pci_dev *pdev)
11302{
11303 struct net_device *dev = pci_get_drvdata(pdev);
11304 struct bnx2x *bp = netdev_priv(dev);
11305
11306 rtnl_lock();
11307
f8ef6e44
YG
11308 bnx2x_eeh_recover(bp);
11309
493adb1f 11310 if (netif_running(dev))
f8ef6e44 11311 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11312
11313 netif_device_attach(dev);
11314
11315 rtnl_unlock();
11316}
11317
11318static struct pci_error_handlers bnx2x_err_handler = {
11319 .error_detected = bnx2x_io_error_detected,
11320 .slot_reset = bnx2x_io_slot_reset,
11321 .resume = bnx2x_io_resume,
11322};
11323
a2fbb9ea 11324static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11325 .name = DRV_MODULE_NAME,
11326 .id_table = bnx2x_pci_tbl,
11327 .probe = bnx2x_init_one,
11328 .remove = __devexit_p(bnx2x_remove_one),
11329 .suspend = bnx2x_suspend,
11330 .resume = bnx2x_resume,
11331 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11332};
11333
11334static int __init bnx2x_init(void)
11335{
1cf167f2
EG
11336 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11337 if (bnx2x_wq == NULL) {
11338 printk(KERN_ERR PFX "Cannot create workqueue\n");
11339 return -ENOMEM;
11340 }
11341
a2fbb9ea
ET
11342 return pci_register_driver(&bnx2x_pci_driver);
11343}
11344
11345static void __exit bnx2x_cleanup(void)
11346{
11347 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11348
11349 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11350}
11351
11352module_init(bnx2x_init);
11353module_exit(bnx2x_cleanup);
11354