]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
dma-mapping: replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
0a64ea57 56#include "bnx2x_dump.h"
a2fbb9ea 57
c1accad3
EG
58#define DRV_MODULE_VERSION "1.48.105"
59#define DRV_MODULE_RELDATE "2009/03/02"
34f80b04 60#define BNX2X_BC_VER 0x040200
a2fbb9ea 61
34f80b04
EG
62/* Time in jiffies before concluding the transmitter is hung */
63#define TX_TIMEOUT (5*HZ)
a2fbb9ea 64
53a10565 65static char version[] __devinitdata =
34f80b04 66 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
67 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68
24e3fcef 69MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 70MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
71MODULE_LICENSE("GPL");
72MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 73
555f6c78
EG
74static int multi_mode = 1;
75module_param(multi_mode, int, 0);
2059aba7 76MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
555f6c78 77
19680c48 78static int disable_tpa;
19680c48 79module_param(disable_tpa, int, 0);
9898f86d 80MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
81
82static int int_mode;
83module_param(int_mode, int, 0);
84MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
85
9898f86d 86static int poll;
a2fbb9ea 87module_param(poll, int, 0);
9898f86d 88MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
89
90static int mrrs = -1;
91module_param(mrrs, int, 0);
92MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
93
9898f86d 94static int debug;
a2fbb9ea 95module_param(debug, int, 0);
9898f86d
EG
96MODULE_PARM_DESC(debug, " Default debug msglevel");
97
98static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 99
1cf167f2 100static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
101
102enum bnx2x_board_type {
103 BCM57710 = 0,
34f80b04
EG
104 BCM57711 = 1,
105 BCM57711E = 2,
a2fbb9ea
ET
106};
107
34f80b04 108/* indexed by board_type, above */
53a10565 109static struct {
a2fbb9ea
ET
110 char *name;
111} board_info[] __devinitdata = {
34f80b04
EG
112 { "Broadcom NetXtreme II BCM57710 XGb" },
113 { "Broadcom NetXtreme II BCM57711 XGb" },
114 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
115};
116
34f80b04 117
a2fbb9ea
ET
118static const struct pci_device_id bnx2x_pci_tbl[] = {
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
125 { 0 }
126};
127
128MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
129
130/****************************************************************************
131* General service functions
132****************************************************************************/
133
134/* used only at init
135 * locking is done by mcp
136 */
137static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
138{
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
140 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
141 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
142 PCICFG_VENDOR_ID_OFFSET);
143}
144
a2fbb9ea
ET
145static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
146{
147 u32 val;
148
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
150 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
151 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
152 PCICFG_VENDOR_ID_OFFSET);
153
154 return val;
155}
a2fbb9ea
ET
156
157static const u32 dmae_reg_go_c[] = {
158 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
159 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
160 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
161 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162};
163
164/* copy command into DMAE command memory and set DMAE command go */
165static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
166 int idx)
167{
168 u32 cmd_offset;
169 int i;
170
171 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
172 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
173 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
174
ad8d3948
EG
175 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
176 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
177 }
178 REG_WR(bp, dmae_reg_go_c[idx], 1);
179}
180
ad8d3948
EG
181void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 u32 len32)
a2fbb9ea 183{
ad8d3948 184 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 185 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
186 int cnt = 200;
187
188 if (!bp->dmae_ready) {
189 u32 *data = bnx2x_sp(bp, wb_data[0]);
190
191 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
192 " using indirect\n", dst_addr, len32);
193 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
194 return;
195 }
196
197 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
198
199 memset(dmae, 0, sizeof(struct dmae_command));
200
201 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
202 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
203 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
204#ifdef __BIG_ENDIAN
205 DMAE_CMD_ENDIANITY_B_DW_SWAP |
206#else
207 DMAE_CMD_ENDIANITY_DW_SWAP |
208#endif
34f80b04
EG
209 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
210 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
211 dmae->src_addr_lo = U64_LO(dma_addr);
212 dmae->src_addr_hi = U64_HI(dma_addr);
213 dmae->dst_addr_lo = dst_addr >> 2;
214 dmae->dst_addr_hi = 0;
215 dmae->len = len32;
216 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
217 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 218 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 219
c3eefaf6 220 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
221 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
222 "dst_addr [%x:%08x (%08x)]\n"
223 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
224 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
225 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
226 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 227 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
228 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
229 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
230
231 *wb_comp = 0;
232
34f80b04 233 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
234
235 udelay(5);
ad8d3948
EG
236
237 while (*wb_comp != DMAE_COMP_VAL) {
238 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239
ad8d3948 240 if (!cnt) {
c3eefaf6 241 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
242 break;
243 }
ad8d3948 244 cnt--;
12469401
YG
245 /* adjust delay for emulation/FPGA */
246 if (CHIP_REV_IS_SLOW(bp))
247 msleep(100);
248 else
249 udelay(5);
a2fbb9ea 250 }
ad8d3948
EG
251
252 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
253}
254
c18487ee 255void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 256{
ad8d3948 257 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 258 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
259 int cnt = 200;
260
261 if (!bp->dmae_ready) {
262 u32 *data = bnx2x_sp(bp, wb_data[0]);
263 int i;
264
265 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
266 " using indirect\n", src_addr, len32);
267 for (i = 0; i < len32; i++)
268 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
269 return;
270 }
271
272 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
273
274 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
275 memset(dmae, 0, sizeof(struct dmae_command));
276
277 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
278 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
279 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
280#ifdef __BIG_ENDIAN
281 DMAE_CMD_ENDIANITY_B_DW_SWAP |
282#else
283 DMAE_CMD_ENDIANITY_DW_SWAP |
284#endif
34f80b04
EG
285 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
286 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
287 dmae->src_addr_lo = src_addr >> 2;
288 dmae->src_addr_hi = 0;
289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
290 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
291 dmae->len = len32;
292 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
293 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 294 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 295
c3eefaf6 296 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
297 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
298 "dst_addr [%x:%08x (%08x)]\n"
299 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
300 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
301 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
302 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
303
304 *wb_comp = 0;
305
34f80b04 306 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
307
308 udelay(5);
ad8d3948
EG
309
310 while (*wb_comp != DMAE_COMP_VAL) {
311
ad8d3948 312 if (!cnt) {
c3eefaf6 313 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
314 break;
315 }
ad8d3948 316 cnt--;
12469401
YG
317 /* adjust delay for emulation/FPGA */
318 if (CHIP_REV_IS_SLOW(bp))
319 msleep(100);
320 else
321 udelay(5);
a2fbb9ea 322 }
ad8d3948 323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
326
327 mutex_unlock(&bp->dmae_mutex);
328}
329
330/* used only for slowpath so not inlined */
331static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332{
333 u32 wb_write[2];
334
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 338}
a2fbb9ea 339
ad8d3948
EG
340#ifdef USE_WB_RD
341static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342{
343 u32 wb_data[2];
344
345 REG_RD_DMAE(bp, reg, wb_data, 2);
346
347 return HILO_U64(wb_data[0], wb_data[1]);
348}
349#endif
350
a2fbb9ea
ET
351static int bnx2x_mc_assert(struct bnx2x *bp)
352{
a2fbb9ea 353 char last_idx;
34f80b04
EG
354 int i, rc = 0;
355 u32 row0, row1, row2, row3;
356
357 /* XSTORM */
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 if (last_idx)
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
379 rc++;
380 } else {
381 break;
382 }
383 }
384
385 /* TSTORM */
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 if (last_idx)
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
407 rc++;
408 } else {
409 break;
410 }
411 }
412
413 /* CSTORM */
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 if (last_idx)
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
435 rc++;
436 } else {
437 break;
438 }
439 }
440
441 /* USTORM */
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 if (last_idx)
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
463 rc++;
464 } else {
465 break;
a2fbb9ea
ET
466 }
467 }
34f80b04 468
a2fbb9ea
ET
469 return rc;
470}
c14423fe 471
a2fbb9ea
ET
472static void bnx2x_fw_dump(struct bnx2x *bp)
473{
474 u32 mark, offset;
4781bfad 475 __be32 data[9];
a2fbb9ea
ET
476 int word;
477
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
481
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 offset + 4*word));
486 data[8] = 0x0;
49d66772 487 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
488 }
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
497}
498
499static void bnx2x_panic_dump(struct bnx2x *bp)
500{
501 int i;
502 u16 j, start, end;
503
66e855f3
YG
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
506
a2fbb9ea
ET
507 BNX2X_ERR("begin crash dump -----------------\n");
508
8440d2b6
EG
509 /* Indices */
510 /* Common */
511 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
512 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
513 " spq_prod_idx(%u)\n",
514 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
515 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
516
517 /* Rx */
518 for_each_rx_queue(bp, i) {
a2fbb9ea 519 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 520
c3eefaf6 521 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
522 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
523 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 524 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
525 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
526 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 527 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
528 " fp_u_idx(%x) *sb_u_idx(%x)\n",
529 fp->rx_sge_prod, fp->last_max_sge,
530 le16_to_cpu(fp->fp_u_idx),
531 fp->status_blk->u_status_block.status_block_index);
532 }
a2fbb9ea 533
8440d2b6
EG
534 /* Tx */
535 for_each_tx_queue(bp, i) {
536 struct bnx2x_fastpath *fp = &bp->fp[i];
537 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 538
c3eefaf6 539 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
540 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
541 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
542 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 543 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
8440d2b6
EG
544 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
545 fp->status_blk->c_status_block.status_block_index,
546 hw_prods->packets_prod, hw_prods->bds_prod);
547 }
a2fbb9ea 548
8440d2b6
EG
549 /* Rings */
550 /* Rx */
551 for_each_rx_queue(bp, i) {
552 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
553
554 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
555 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 556 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
557 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
558 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
559
c3eefaf6
EG
560 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
561 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
562 }
563
3196a88a
EG
564 start = RX_SGE(fp->rx_sge_prod);
565 end = RX_SGE(fp->last_max_sge);
8440d2b6 566 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
567 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
568 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
569
c3eefaf6
EG
570 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
571 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
572 }
573
a2fbb9ea
ET
574 start = RCQ_BD(fp->rx_comp_cons - 10);
575 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 576 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
577 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
578
c3eefaf6
EG
579 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
580 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
581 }
582 }
583
8440d2b6
EG
584 /* Tx */
585 for_each_tx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
587
588 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
589 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
590 for (j = start; j != end; j = TX_BD(j + 1)) {
591 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
592
c3eefaf6
EG
593 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
594 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
595 }
596
597 start = TX_BD(fp->tx_bd_cons - 10);
598 end = TX_BD(fp->tx_bd_cons + 254);
599 for (j = start; j != end; j = TX_BD(j + 1)) {
600 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
601
c3eefaf6
EG
602 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
603 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
604 }
605 }
a2fbb9ea 606
34f80b04 607 bnx2x_fw_dump(bp);
a2fbb9ea
ET
608 bnx2x_mc_assert(bp);
609 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
610}
611
615f8fd9 612static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 613{
34f80b04 614 int port = BP_PORT(bp);
a2fbb9ea
ET
615 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
616 u32 val = REG_RD(bp, addr);
617 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 618 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
619
620 if (msix) {
8badd27a
EG
621 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
622 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
623 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
624 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
625 } else if (msi) {
626 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
627 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
628 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
629 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
630 } else {
631 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 632 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
633 HC_CONFIG_0_REG_INT_LINE_EN_0 |
634 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 635
8badd27a
EG
636 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
637 val, port, addr);
615f8fd9
ET
638
639 REG_WR(bp, addr, val);
640
a2fbb9ea
ET
641 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
642 }
643
8badd27a
EG
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
645 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
646
647 REG_WR(bp, addr, val);
34f80b04
EG
648
649 if (CHIP_IS_E1H(bp)) {
650 /* init leading/trailing edge */
651 if (IS_E1HMF(bp)) {
8badd27a 652 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 653 if (bp->port.pmf)
4acac6a5
EG
654 /* enable nig and gpio3 attention */
655 val |= 0x1100;
34f80b04
EG
656 } else
657 val = 0xffff;
658
659 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
660 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
661 }
a2fbb9ea
ET
662}
663
615f8fd9 664static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 665{
34f80b04 666 int port = BP_PORT(bp);
a2fbb9ea
ET
667 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
668 u32 val = REG_RD(bp, addr);
669
670 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 val, port, addr);
677
8badd27a
EG
678 /* flush all outstanding writes */
679 mmiowb();
680
a2fbb9ea
ET
681 REG_WR(bp, addr, val);
682 if (REG_RD(bp, addr) != val)
683 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 684
a2fbb9ea
ET
685}
686
f8ef6e44 687static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 688{
a2fbb9ea 689 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 690 int i, offset;
a2fbb9ea 691
34f80b04 692 /* disable interrupt handling */
a2fbb9ea 693 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
694 if (disable_hw)
695 /* prevent the HW from sending interrupts */
696 bnx2x_int_disable(bp);
a2fbb9ea
ET
697
698 /* make sure all ISRs are done */
699 if (msix) {
8badd27a
EG
700 synchronize_irq(bp->msix_table[0].vector);
701 offset = 1;
a2fbb9ea 702 for_each_queue(bp, i)
8badd27a 703 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
704 } else
705 synchronize_irq(bp->pdev->irq);
706
707 /* make sure sp_task is not running */
1cf167f2
EG
708 cancel_delayed_work(&bp->sp_task);
709 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
710}
711
34f80b04 712/* fast path */
a2fbb9ea
ET
713
714/*
34f80b04 715 * General service functions
a2fbb9ea
ET
716 */
717
34f80b04 718static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
719 u8 storm, u16 index, u8 op, u8 update)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
723 struct igu_ack_register igu_ack;
724
725 igu_ack.status_block_index = index;
726 igu_ack.sb_id_and_flags =
34f80b04 727 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
728 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
729 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
730 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
731
5c862848
EG
732 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
733 (*(u32 *)&igu_ack), hc_addr);
734 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
735}
736
737static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
738{
739 struct host_status_block *fpsb = fp->status_blk;
740 u16 rc = 0;
741
742 barrier(); /* status block is written to by the chip */
743 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
744 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
745 rc |= 1;
746 }
747 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
748 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
749 rc |= 2;
750 }
751 return rc;
752}
753
a2fbb9ea
ET
754static u16 bnx2x_ack_int(struct bnx2x *bp)
755{
5c862848
EG
756 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
757 COMMAND_REG_SIMD_MASK);
758 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 759
5c862848
EG
760 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
761 result, hc_addr);
a2fbb9ea 762
a2fbb9ea
ET
763 return result;
764}
765
766
767/*
768 * fast path service functions
769 */
770
237907c1
EG
771static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
772{
773 u16 tx_cons_sb;
774
775 /* Tell compiler that status block fields can change */
776 barrier();
777 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
778 return (fp->tx_pkt_cons != tx_cons_sb);
779}
780
781static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
782{
783 /* Tell compiler that consumer and producer can change */
784 barrier();
785 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
786}
787
a2fbb9ea
ET
788/* free skb in the packet ring at pos idx
789 * return idx of last bd freed
790 */
791static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
792 u16 idx)
793{
794 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
795 struct eth_tx_bd *tx_bd;
796 struct sk_buff *skb = tx_buf->skb;
34f80b04 797 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
798 int nbd;
799
800 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
801 idx, tx_buf, skb);
802
803 /* unmap first bd */
804 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
805 tx_bd = &fp->tx_desc_ring[bd_idx];
806 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
807 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
808
809 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 810 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
811#ifdef BNX2X_STOP_ON_ERROR
812 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 813 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
814 bnx2x_panic();
815 }
816#endif
817
818 /* Skip a parse bd and the TSO split header bd
819 since they have no mapping */
820 if (nbd)
821 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
822
823 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
824 ETH_TX_BD_FLAGS_TCP_CSUM |
825 ETH_TX_BD_FLAGS_SW_LSO)) {
826 if (--nbd)
827 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
828 tx_bd = &fp->tx_desc_ring[bd_idx];
829 /* is this a TSO split header bd? */
830 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
831 if (--nbd)
832 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
833 }
834 }
835
836 /* now free frags */
837 while (nbd > 0) {
838
839 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
840 tx_bd = &fp->tx_desc_ring[bd_idx];
841 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
842 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
843 if (--nbd)
844 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
845 }
846
847 /* release skb */
53e5e96e 848 WARN_ON(!skb);
a2fbb9ea
ET
849 dev_kfree_skb(skb);
850 tx_buf->first_bd = 0;
851 tx_buf->skb = NULL;
852
34f80b04 853 return new_cons;
a2fbb9ea
ET
854}
855
34f80b04 856static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 857{
34f80b04
EG
858 s16 used;
859 u16 prod;
860 u16 cons;
a2fbb9ea 861
34f80b04 862 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
863 prod = fp->tx_bd_prod;
864 cons = fp->tx_bd_cons;
865
34f80b04
EG
866 /* NUM_TX_RINGS = number of "next-page" entries
867 It will be used as a threshold */
868 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 869
34f80b04 870#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
871 WARN_ON(used < 0);
872 WARN_ON(used > fp->bp->tx_ring_size);
873 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 874#endif
a2fbb9ea 875
34f80b04 876 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
877}
878
7961f791 879static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
880{
881 struct bnx2x *bp = fp->bp;
555f6c78 882 struct netdev_queue *txq;
a2fbb9ea
ET
883 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
884 int done = 0;
885
886#ifdef BNX2X_STOP_ON_ERROR
887 if (unlikely(bp->panic))
888 return;
889#endif
890
555f6c78 891 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
892 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
893 sw_cons = fp->tx_pkt_cons;
894
895 while (sw_cons != hw_cons) {
896 u16 pkt_cons;
897
898 pkt_cons = TX_BD(sw_cons);
899
900 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
901
34f80b04 902 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
903 hw_cons, sw_cons, pkt_cons);
904
34f80b04 905/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
906 rmb();
907 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
908 }
909*/
910 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
911 sw_cons++;
912 done++;
a2fbb9ea
ET
913 }
914
915 fp->tx_pkt_cons = sw_cons;
916 fp->tx_bd_cons = bd_cons;
917
a2fbb9ea 918 /* TBD need a thresh? */
555f6c78 919 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 920
555f6c78 921 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 922
6044735d
EG
923 /* Need to make the tx_bd_cons update visible to start_xmit()
924 * before checking for netif_tx_queue_stopped(). Without the
925 * memory barrier, there is a small possibility that
926 * start_xmit() will miss it and cause the queue to be stopped
927 * forever.
928 */
929 smp_mb();
930
555f6c78 931 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 932 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 933 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 934 netif_tx_wake_queue(txq);
a2fbb9ea 935
555f6c78 936 __netif_tx_unlock(txq);
a2fbb9ea
ET
937 }
938}
939
3196a88a 940
a2fbb9ea
ET
941static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942 union eth_rx_cqe *rr_cqe)
943{
944 struct bnx2x *bp = fp->bp;
945 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
947
34f80b04 948 DP(BNX2X_MSG_SP,
a2fbb9ea 949 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 950 fp->index, cid, command, bp->state,
34f80b04 951 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
952
953 bp->spq_left++;
954
0626b899 955 if (fp->index) {
a2fbb9ea
ET
956 switch (command | fp->state) {
957 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958 BNX2X_FP_STATE_OPENING):
959 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
960 cid);
961 fp->state = BNX2X_FP_STATE_OPEN;
962 break;
963
964 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
966 cid);
967 fp->state = BNX2X_FP_STATE_HALTED;
968 break;
969
970 default:
34f80b04
EG
971 BNX2X_ERR("unexpected MC reply (%d) "
972 "fp->state is %x\n", command, fp->state);
973 break;
a2fbb9ea 974 }
34f80b04 975 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
976 return;
977 }
c14423fe 978
a2fbb9ea
ET
979 switch (command | bp->state) {
980 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982 bp->state = BNX2X_STATE_OPEN;
983 break;
984
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988 fp->state = BNX2X_FP_STATE_HALTED;
989 break;
990
a2fbb9ea 991 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 992 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 993 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
994 break;
995
3196a88a 996
a2fbb9ea 997 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 998 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 999 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1000 bp->set_mac_pending = 0;
a2fbb9ea
ET
1001 break;
1002
49d66772 1003 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1004 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1005 break;
1006
a2fbb9ea 1007 default:
34f80b04 1008 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1009 command, bp->state);
34f80b04 1010 break;
a2fbb9ea 1011 }
34f80b04 1012 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1013}
1014
7a9b2557
VZ
1015static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016 struct bnx2x_fastpath *fp, u16 index)
1017{
1018 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019 struct page *page = sw_buf->page;
1020 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1021
1022 /* Skip "next page" elements */
1023 if (!page)
1024 return;
1025
1026 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1027 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1028 __free_pages(page, PAGES_PER_SGE_SHIFT);
1029
1030 sw_buf->page = NULL;
1031 sge->addr_hi = 0;
1032 sge->addr_lo = 0;
1033}
1034
1035static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036 struct bnx2x_fastpath *fp, int last)
1037{
1038 int i;
1039
1040 for (i = 0; i < last; i++)
1041 bnx2x_free_rx_sge(bp, fp, i);
1042}
1043
1044static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045 struct bnx2x_fastpath *fp, u16 index)
1046{
1047 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1050 dma_addr_t mapping;
1051
1052 if (unlikely(page == NULL))
1053 return -ENOMEM;
1054
4f40f2cb 1055 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1056 PCI_DMA_FROMDEVICE);
8d8bb39b 1057 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1059 return -ENOMEM;
1060 }
1061
1062 sw_buf->page = page;
1063 pci_unmap_addr_set(sw_buf, mapping, mapping);
1064
1065 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1067
1068 return 0;
1069}
1070
a2fbb9ea
ET
1071static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072 struct bnx2x_fastpath *fp, u16 index)
1073{
1074 struct sk_buff *skb;
1075 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1077 dma_addr_t mapping;
1078
1079 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080 if (unlikely(skb == NULL))
1081 return -ENOMEM;
1082
437cf2f1 1083 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1084 PCI_DMA_FROMDEVICE);
8d8bb39b 1085 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1086 dev_kfree_skb(skb);
1087 return -ENOMEM;
1088 }
1089
1090 rx_buf->skb = skb;
1091 pci_unmap_addr_set(rx_buf, mapping, mapping);
1092
1093 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1095
1096 return 0;
1097}
1098
1099/* note that we are not allocating a new skb,
1100 * we are just moving one from cons to prod
1101 * we are not creating a new mapping,
1102 * so there is no need to check for dma_mapping_error().
1103 */
1104static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105 struct sk_buff *skb, u16 cons, u16 prod)
1106{
1107 struct bnx2x *bp = fp->bp;
1108 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1112
1113 pci_dma_sync_single_for_device(bp->pdev,
1114 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1115 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1116
1117 prod_rx_buf->skb = cons_rx_buf->skb;
1118 pci_unmap_addr_set(prod_rx_buf, mapping,
1119 pci_unmap_addr(cons_rx_buf, mapping));
1120 *prod_bd = *cons_bd;
1121}
1122
7a9b2557
VZ
1123static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1124 u16 idx)
1125{
1126 u16 last_max = fp->last_max_sge;
1127
1128 if (SUB_S16(idx, last_max) > 0)
1129 fp->last_max_sge = idx;
1130}
1131
1132static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1133{
1134 int i, j;
1135
1136 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137 int idx = RX_SGE_CNT * i - 1;
1138
1139 for (j = 0; j < 2; j++) {
1140 SGE_MASK_CLEAR_BIT(fp, idx);
1141 idx--;
1142 }
1143 }
1144}
1145
1146static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147 struct eth_fast_path_rx_cqe *fp_cqe)
1148{
1149 struct bnx2x *bp = fp->bp;
4f40f2cb 1150 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1151 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1152 SGE_PAGE_SHIFT;
7a9b2557
VZ
1153 u16 last_max, last_elem, first_elem;
1154 u16 delta = 0;
1155 u16 i;
1156
1157 if (!sge_len)
1158 return;
1159
1160 /* First mark all used pages */
1161 for (i = 0; i < sge_len; i++)
1162 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1163
1164 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1166
1167 /* Here we assume that the last SGE index is the biggest */
1168 prefetch((void *)(fp->sge_mask));
1169 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1170
1171 last_max = RX_SGE(fp->last_max_sge);
1172 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1174
1175 /* If ring is not full */
1176 if (last_elem + 1 != first_elem)
1177 last_elem++;
1178
1179 /* Now update the prod */
1180 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181 if (likely(fp->sge_mask[i]))
1182 break;
1183
1184 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185 delta += RX_SGE_MASK_ELEM_SZ;
1186 }
1187
1188 if (delta > 0) {
1189 fp->rx_sge_prod += delta;
1190 /* clear page-end entries */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1192 }
1193
1194 DP(NETIF_MSG_RX_STATUS,
1195 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1196 fp->last_max_sge, fp->rx_sge_prod);
1197}
1198
1199static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1200{
1201 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202 memset(fp->sge_mask, 0xff,
1203 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1204
33471629
EG
1205 /* Clear the two last indices in the page to 1:
1206 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1207 hence will never be indicated and should be removed from
1208 the calculations. */
1209 bnx2x_clear_sge_mask_next_elems(fp);
1210}
1211
1212static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213 struct sk_buff *skb, u16 cons, u16 prod)
1214{
1215 struct bnx2x *bp = fp->bp;
1216 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219 dma_addr_t mapping;
1220
1221 /* move empty skb from pool to prod and map it */
1222 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1224 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1225 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1226
1227 /* move partial skb from cons to pool (don't unmap yet) */
1228 fp->tpa_pool[queue] = *cons_rx_buf;
1229
1230 /* mark bin state as start - print error if current state != stop */
1231 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1233
1234 fp->tpa_state[queue] = BNX2X_TPA_START;
1235
1236 /* point prod_bd to new skb */
1237 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1239
1240#ifdef BNX2X_STOP_ON_ERROR
1241 fp->tpa_queue_used |= (1 << queue);
1242#ifdef __powerpc64__
1243 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1244#else
1245 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1246#endif
1247 fp->tpa_queue_used);
1248#endif
1249}
1250
1251static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252 struct sk_buff *skb,
1253 struct eth_fast_path_rx_cqe *fp_cqe,
1254 u16 cqe_idx)
1255{
1256 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1257 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258 u32 i, frag_len, frag_size, pages;
1259 int err;
1260 int j;
1261
1262 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1263 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1264
1265 /* This is needed in order to enable forwarding support */
1266 if (frag_size)
4f40f2cb 1267 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1268 max(frag_size, (u32)len_on_bd));
1269
1270#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1271 if (pages >
1272 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1273 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1274 pages, cqe_idx);
1275 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1276 fp_cqe->pkt_len, len_on_bd);
1277 bnx2x_panic();
1278 return -EINVAL;
1279 }
1280#endif
1281
1282 /* Run through the SGL and compose the fragmented skb */
1283 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1285
1286 /* FW gives the indices of the SGE as if the ring is an array
1287 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1288 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1289 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1290 old_rx_pg = *rx_pg;
1291
1292 /* If we fail to allocate a substitute page, we simply stop
1293 where we are and drop the whole packet */
1294 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295 if (unlikely(err)) {
de832a55 1296 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1297 return err;
1298 }
1299
1300 /* Unmap the page as we r going to pass it to the stack */
1301 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1302 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1303
1304 /* Add one frag and update the appropriate fields in the skb */
1305 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1306
1307 skb->data_len += frag_len;
1308 skb->truesize += frag_len;
1309 skb->len += frag_len;
1310
1311 frag_size -= frag_len;
1312 }
1313
1314 return 0;
1315}
1316
1317static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1319 u16 cqe_idx)
1320{
1321 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322 struct sk_buff *skb = rx_buf->skb;
1323 /* alloc new skb */
1324 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1325
1326 /* Unmap skb in the pool anyway, as we are going to change
1327 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1328 fails. */
1329 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1330 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1331
7a9b2557 1332 if (likely(new_skb)) {
66e855f3
YG
1333 /* fix ip xsum and give it to the stack */
1334 /* (no need to map the new skb) */
0c6671b0
EG
1335#ifdef BCM_VLAN
1336 int is_vlan_cqe =
1337 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338 PARSING_FLAGS_VLAN);
1339 int is_not_hwaccel_vlan_cqe =
1340 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1341#endif
7a9b2557
VZ
1342
1343 prefetch(skb);
1344 prefetch(((char *)(skb)) + 128);
1345
7a9b2557
VZ
1346#ifdef BNX2X_STOP_ON_ERROR
1347 if (pad + len > bp->rx_buf_size) {
1348 BNX2X_ERR("skb_put is about to fail... "
1349 "pad %d len %d rx_buf_size %d\n",
1350 pad, len, bp->rx_buf_size);
1351 bnx2x_panic();
1352 return;
1353 }
1354#endif
1355
1356 skb_reserve(skb, pad);
1357 skb_put(skb, len);
1358
1359 skb->protocol = eth_type_trans(skb, bp->dev);
1360 skb->ip_summed = CHECKSUM_UNNECESSARY;
1361
1362 {
1363 struct iphdr *iph;
1364
1365 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1366#ifdef BCM_VLAN
1367 /* If there is no Rx VLAN offloading -
1368 take VLAN tag into an account */
1369 if (unlikely(is_not_hwaccel_vlan_cqe))
1370 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1371#endif
7a9b2557
VZ
1372 iph->check = 0;
1373 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1374 }
1375
1376 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377 &cqe->fast_path_cqe, cqe_idx)) {
1378#ifdef BCM_VLAN
0c6671b0
EG
1379 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1381 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382 le16_to_cpu(cqe->fast_path_cqe.
1383 vlan_tag));
1384 else
1385#endif
1386 netif_receive_skb(skb);
1387 } else {
1388 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389 " - dropping packet!\n");
1390 dev_kfree_skb(skb);
1391 }
1392
7a9b2557
VZ
1393
1394 /* put new skb in bin */
1395 fp->tpa_pool[queue].skb = new_skb;
1396
1397 } else {
66e855f3 1398 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1399 DP(NETIF_MSG_RX_STATUS,
1400 "Failed to allocate new skb - dropping packet!\n");
de832a55 1401 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1402 }
1403
1404 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1405}
1406
1407static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408 struct bnx2x_fastpath *fp,
1409 u16 bd_prod, u16 rx_comp_prod,
1410 u16 rx_sge_prod)
1411{
8d9c5f34 1412 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1413 int i;
1414
1415 /* Update producers */
1416 rx_prods.bd_prod = bd_prod;
1417 rx_prods.cqe_prod = rx_comp_prod;
1418 rx_prods.sge_prod = rx_sge_prod;
1419
58f4c4cf
EG
1420 /*
1421 * Make sure that the BD and SGE data is updated before updating the
1422 * producers since FW might read the BD/SGE right after the producer
1423 * is updated.
1424 * This is only applicable for weak-ordered memory model archs such
1425 * as IA-64. The following barrier is also mandatory since FW will
1426 * assumes BDs must have buffers.
1427 */
1428 wmb();
1429
8d9c5f34
EG
1430 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1432 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1433 ((u32 *)&rx_prods)[i]);
1434
58f4c4cf
EG
1435 mmiowb(); /* keep prod updates ordered */
1436
7a9b2557 1437 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1438 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1439 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1440}
1441
a2fbb9ea
ET
1442static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1443{
1444 struct bnx2x *bp = fp->bp;
34f80b04 1445 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1446 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1447 int rx_pkt = 0;
1448
1449#ifdef BNX2X_STOP_ON_ERROR
1450 if (unlikely(bp->panic))
1451 return 0;
1452#endif
1453
34f80b04
EG
1454 /* CQ "next element" is of the size of the regular element,
1455 that's why it's ok here */
a2fbb9ea
ET
1456 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1458 hw_comp_cons++;
1459
1460 bd_cons = fp->rx_bd_cons;
1461 bd_prod = fp->rx_bd_prod;
34f80b04 1462 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1463 sw_comp_cons = fp->rx_comp_cons;
1464 sw_comp_prod = fp->rx_comp_prod;
1465
1466 /* Memory barrier necessary as speculative reads of the rx
1467 * buffer can be ahead of the index in the status block
1468 */
1469 rmb();
1470
1471 DP(NETIF_MSG_RX_STATUS,
1472 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1473 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1474
1475 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1476 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1477 struct sk_buff *skb;
1478 union eth_rx_cqe *cqe;
34f80b04
EG
1479 u8 cqe_fp_flags;
1480 u16 len, pad;
a2fbb9ea
ET
1481
1482 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483 bd_prod = RX_BD(bd_prod);
1484 bd_cons = RX_BD(bd_cons);
1485
1486 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1487 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1488
a2fbb9ea 1489 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1490 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1491 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1492 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1493 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1495
1496 /* is this a slowpath msg? */
34f80b04 1497 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1498 bnx2x_sp_event(fp, cqe);
1499 goto next_cqe;
1500
1501 /* this is an rx packet */
1502 } else {
1503 rx_buf = &fp->rx_buf_ring[bd_cons];
1504 skb = rx_buf->skb;
a2fbb9ea
ET
1505 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506 pad = cqe->fast_path_cqe.placement_offset;
1507
7a9b2557
VZ
1508 /* If CQE is marked both TPA_START and TPA_END
1509 it is a non-TPA CQE */
1510 if ((!fp->disable_tpa) &&
1511 (TPA_TYPE(cqe_fp_flags) !=
1512 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1513 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1514
1515 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516 DP(NETIF_MSG_RX_STATUS,
1517 "calling tpa_start on queue %d\n",
1518 queue);
1519
1520 bnx2x_tpa_start(fp, queue, skb,
1521 bd_cons, bd_prod);
1522 goto next_rx;
1523 }
1524
1525 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526 DP(NETIF_MSG_RX_STATUS,
1527 "calling tpa_stop on queue %d\n",
1528 queue);
1529
1530 if (!BNX2X_RX_SUM_FIX(cqe))
1531 BNX2X_ERR("STOP on none TCP "
1532 "data\n");
1533
1534 /* This is a size of the linear data
1535 on this skb */
1536 len = le16_to_cpu(cqe->fast_path_cqe.
1537 len_on_bd);
1538 bnx2x_tpa_stop(bp, fp, queue, pad,
1539 len, cqe, comp_ring_cons);
1540#ifdef BNX2X_STOP_ON_ERROR
1541 if (bp->panic)
1542 return -EINVAL;
1543#endif
1544
1545 bnx2x_update_sge_prod(fp,
1546 &cqe->fast_path_cqe);
1547 goto next_cqe;
1548 }
1549 }
1550
a2fbb9ea
ET
1551 pci_dma_sync_single_for_device(bp->pdev,
1552 pci_unmap_addr(rx_buf, mapping),
1553 pad + RX_COPY_THRESH,
1554 PCI_DMA_FROMDEVICE);
1555 prefetch(skb);
1556 prefetch(((char *)(skb)) + 128);
1557
1558 /* is this an error packet? */
34f80b04 1559 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1560 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1561 "ERROR flags %x rx packet %u\n",
1562 cqe_fp_flags, sw_comp_cons);
de832a55 1563 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1564 goto reuse_rx;
1565 }
1566
1567 /* Since we don't have a jumbo ring
1568 * copy small packets if mtu > 1500
1569 */
1570 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571 (len <= RX_COPY_THRESH)) {
1572 struct sk_buff *new_skb;
1573
1574 new_skb = netdev_alloc_skb(bp->dev,
1575 len + pad);
1576 if (new_skb == NULL) {
1577 DP(NETIF_MSG_RX_ERR,
34f80b04 1578 "ERROR packet dropped "
a2fbb9ea 1579 "because of alloc failure\n");
de832a55 1580 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1581 goto reuse_rx;
1582 }
1583
1584 /* aligned copy */
1585 skb_copy_from_linear_data_offset(skb, pad,
1586 new_skb->data + pad, len);
1587 skb_reserve(new_skb, pad);
1588 skb_put(new_skb, len);
1589
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591
1592 skb = new_skb;
1593
1594 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595 pci_unmap_single(bp->pdev,
1596 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1597 bp->rx_buf_size,
a2fbb9ea
ET
1598 PCI_DMA_FROMDEVICE);
1599 skb_reserve(skb, pad);
1600 skb_put(skb, len);
1601
1602 } else {
1603 DP(NETIF_MSG_RX_ERR,
34f80b04 1604 "ERROR packet dropped because "
a2fbb9ea 1605 "of alloc failure\n");
de832a55 1606 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1607reuse_rx:
1608 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609 goto next_rx;
1610 }
1611
1612 skb->protocol = eth_type_trans(skb, bp->dev);
1613
1614 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1615 if (bp->rx_csum) {
1adcd8be
EG
1616 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1618 else
de832a55 1619 fp->eth_q_stats.hw_csum_err++;
66e855f3 1620 }
a2fbb9ea
ET
1621 }
1622
748e5439 1623 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1624#ifdef BCM_VLAN
0c6671b0 1625 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1626 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1628 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1630 else
1631#endif
34f80b04 1632 netif_receive_skb(skb);
a2fbb9ea 1633
a2fbb9ea
ET
1634
1635next_rx:
1636 rx_buf->skb = NULL;
1637
1638 bd_cons = NEXT_RX_IDX(bd_cons);
1639 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1640 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1641 rx_pkt++;
a2fbb9ea
ET
1642next_cqe:
1643 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1645
34f80b04 1646 if (rx_pkt == budget)
a2fbb9ea
ET
1647 break;
1648 } /* while */
1649
1650 fp->rx_bd_cons = bd_cons;
34f80b04 1651 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1652 fp->rx_comp_cons = sw_comp_cons;
1653 fp->rx_comp_prod = sw_comp_prod;
1654
7a9b2557
VZ
1655 /* Update producers */
1656 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1657 fp->rx_sge_prod);
a2fbb9ea
ET
1658
1659 fp->rx_pkt += rx_pkt;
1660 fp->rx_calls++;
1661
1662 return rx_pkt;
1663}
1664
1665static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1666{
1667 struct bnx2x_fastpath *fp = fp_cookie;
1668 struct bnx2x *bp = fp->bp;
0626b899 1669 int index = fp->index;
a2fbb9ea 1670
da5a662a
VZ
1671 /* Return here if interrupt is disabled */
1672 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1674 return IRQ_HANDLED;
1675 }
1676
34f80b04 1677 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1678 index, fp->sb_id);
1679 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1680
1681#ifdef BNX2X_STOP_ON_ERROR
1682 if (unlikely(bp->panic))
1683 return IRQ_HANDLED;
1684#endif
1685
1686 prefetch(fp->rx_cons_sb);
1687 prefetch(fp->tx_cons_sb);
1688 prefetch(&fp->status_blk->c_status_block.status_block_index);
1689 prefetch(&fp->status_blk->u_status_block.status_block_index);
1690
288379f0 1691 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1692
a2fbb9ea
ET
1693 return IRQ_HANDLED;
1694}
1695
1696static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1697{
555f6c78 1698 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1699 u16 status = bnx2x_ack_int(bp);
34f80b04 1700 u16 mask;
a2fbb9ea 1701
34f80b04 1702 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1703 if (unlikely(status == 0)) {
1704 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1705 return IRQ_NONE;
1706 }
f5372251 1707 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1708
34f80b04 1709 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712 return IRQ_HANDLED;
1713 }
1714
3196a88a
EG
1715#ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp->panic))
1717 return IRQ_HANDLED;
1718#endif
1719
34f80b04
EG
1720 mask = 0x2 << bp->fp[0].sb_id;
1721 if (status & mask) {
a2fbb9ea
ET
1722 struct bnx2x_fastpath *fp = &bp->fp[0];
1723
1724 prefetch(fp->rx_cons_sb);
1725 prefetch(fp->tx_cons_sb);
1726 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727 prefetch(&fp->status_blk->u_status_block.status_block_index);
1728
288379f0 1729 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1730
34f80b04 1731 status &= ~mask;
a2fbb9ea
ET
1732 }
1733
a2fbb9ea 1734
34f80b04 1735 if (unlikely(status & 0x1)) {
1cf167f2 1736 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1737
1738 status &= ~0x1;
1739 if (!status)
1740 return IRQ_HANDLED;
1741 }
1742
34f80b04
EG
1743 if (status)
1744 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1745 status);
a2fbb9ea 1746
c18487ee 1747 return IRQ_HANDLED;
a2fbb9ea
ET
1748}
1749
c18487ee 1750/* end of fast path */
a2fbb9ea 1751
bb2a0f7a 1752static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1753
c18487ee
YR
1754/* Link */
1755
1756/*
1757 * General service functions
1758 */
a2fbb9ea 1759
4a37fb66 1760static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1761{
1762 u32 lock_status;
1763 u32 resource_bit = (1 << resource);
4a37fb66
YG
1764 int func = BP_FUNC(bp);
1765 u32 hw_lock_control_reg;
c18487ee 1766 int cnt;
a2fbb9ea 1767
c18487ee
YR
1768 /* Validating that the resource is within range */
1769 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1770 DP(NETIF_MSG_HW,
1771 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1773 return -EINVAL;
1774 }
a2fbb9ea 1775
4a37fb66
YG
1776 if (func <= 5) {
1777 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1778 } else {
1779 hw_lock_control_reg =
1780 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1781 }
1782
c18487ee 1783 /* Validating that the resource is not already taken */
4a37fb66 1784 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1785 if (lock_status & resource_bit) {
1786 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1787 lock_status, resource_bit);
1788 return -EEXIST;
1789 }
a2fbb9ea 1790
46230476
EG
1791 /* Try for 5 second every 5ms */
1792 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1793 /* Try to acquire the lock */
4a37fb66
YG
1794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1796 if (lock_status & resource_bit)
1797 return 0;
a2fbb9ea 1798
c18487ee 1799 msleep(5);
a2fbb9ea 1800 }
c18487ee
YR
1801 DP(NETIF_MSG_HW, "Timeout\n");
1802 return -EAGAIN;
1803}
a2fbb9ea 1804
4a37fb66 1805static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1806{
1807 u32 lock_status;
1808 u32 resource_bit = (1 << resource);
4a37fb66
YG
1809 int func = BP_FUNC(bp);
1810 u32 hw_lock_control_reg;
a2fbb9ea 1811
c18487ee
YR
1812 /* Validating that the resource is within range */
1813 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1814 DP(NETIF_MSG_HW,
1815 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1817 return -EINVAL;
1818 }
1819
4a37fb66
YG
1820 if (func <= 5) {
1821 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1822 } else {
1823 hw_lock_control_reg =
1824 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1825 }
1826
c18487ee 1827 /* Validating that the resource is currently taken */
4a37fb66 1828 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1829 if (!(lock_status & resource_bit)) {
1830 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1831 lock_status, resource_bit);
1832 return -EFAULT;
a2fbb9ea
ET
1833 }
1834
4a37fb66 1835 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1836 return 0;
1837}
1838
1839/* HW Lock for shared dual port PHYs */
4a37fb66 1840static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1841{
34f80b04 1842 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1843
46c6a674
EG
1844 if (bp->port.need_hw_lock)
1845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1846}
a2fbb9ea 1847
4a37fb66 1848static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1849{
46c6a674
EG
1850 if (bp->port.need_hw_lock)
1851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1852
34f80b04 1853 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1854}
a2fbb9ea 1855
4acac6a5
EG
1856int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1857{
1858 /* The GPIO should be swapped if swap register is set and active */
1859 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861 int gpio_shift = gpio_num +
1862 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863 u32 gpio_mask = (1 << gpio_shift);
1864 u32 gpio_reg;
1865 int value;
1866
1867 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1869 return -EINVAL;
1870 }
1871
1872 /* read GPIO value */
1873 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1874
1875 /* get the requested pin value */
1876 if ((gpio_reg & gpio_mask) == gpio_mask)
1877 value = 1;
1878 else
1879 value = 0;
1880
1881 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1882
1883 return value;
1884}
1885
17de50b7 1886int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1887{
1888 /* The GPIO should be swapped if swap register is set and active */
1889 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1890 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1891 int gpio_shift = gpio_num +
1892 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893 u32 gpio_mask = (1 << gpio_shift);
1894 u32 gpio_reg;
a2fbb9ea 1895
c18487ee
YR
1896 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1898 return -EINVAL;
1899 }
a2fbb9ea 1900
4a37fb66 1901 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1902 /* read GPIO and mask except the float bits */
1903 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1904
c18487ee
YR
1905 switch (mode) {
1906 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908 gpio_num, gpio_shift);
1909 /* clear FLOAT and set CLR */
1910 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1912 break;
a2fbb9ea 1913
c18487ee
YR
1914 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916 gpio_num, gpio_shift);
1917 /* clear FLOAT and set SET */
1918 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1920 break;
a2fbb9ea 1921
17de50b7 1922 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1923 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924 gpio_num, gpio_shift);
1925 /* set FLOAT */
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1927 break;
a2fbb9ea 1928
c18487ee
YR
1929 default:
1930 break;
a2fbb9ea
ET
1931 }
1932
c18487ee 1933 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1935
c18487ee 1936 return 0;
a2fbb9ea
ET
1937}
1938
4acac6a5
EG
1939int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1940{
1941 /* The GPIO should be swapped if swap register is set and active */
1942 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944 int gpio_shift = gpio_num +
1945 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946 u32 gpio_mask = (1 << gpio_shift);
1947 u32 gpio_reg;
1948
1949 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951 return -EINVAL;
1952 }
1953
1954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1955 /* read GPIO int */
1956 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1957
1958 switch (mode) {
1959 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961 "output low\n", gpio_num, gpio_shift);
1962 /* clear SET and set CLR */
1963 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1965 break;
1966
1967 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969 "output high\n", gpio_num, gpio_shift);
1970 /* clear CLR and set SET */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1973 break;
1974
1975 default:
1976 break;
1977 }
1978
1979 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981
1982 return 0;
1983}
1984
c18487ee 1985static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1986{
c18487ee
YR
1987 u32 spio_mask = (1 << spio_num);
1988 u32 spio_reg;
a2fbb9ea 1989
c18487ee
YR
1990 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991 (spio_num > MISC_REGISTERS_SPIO_7)) {
1992 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1993 return -EINVAL;
a2fbb9ea
ET
1994 }
1995
4a37fb66 1996 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1997 /* read SPIO and mask except the float bits */
1998 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1999
c18487ee 2000 switch (mode) {
6378c025 2001 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2002 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003 /* clear FLOAT and set CLR */
2004 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2006 break;
a2fbb9ea 2007
6378c025 2008 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010 /* clear FLOAT and set SET */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2013 break;
a2fbb9ea 2014
c18487ee
YR
2015 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2017 /* set FLOAT */
2018 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019 break;
a2fbb9ea 2020
c18487ee
YR
2021 default:
2022 break;
a2fbb9ea
ET
2023 }
2024
c18487ee 2025 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2026 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2027
a2fbb9ea
ET
2028 return 0;
2029}
2030
c18487ee 2031static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2032{
ad33ea3a
EG
2033 switch (bp->link_vars.ieee_fc &
2034 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2035 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2036 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2037 ADVERTISED_Pause);
2038 break;
356e2385 2039
c18487ee 2040 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2041 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2042 ADVERTISED_Pause);
2043 break;
356e2385 2044
c18487ee 2045 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2046 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2047 break;
356e2385 2048
c18487ee 2049 default:
34f80b04 2050 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2051 ADVERTISED_Pause);
2052 break;
2053 }
2054}
f1410647 2055
c18487ee
YR
2056static void bnx2x_link_report(struct bnx2x *bp)
2057{
2058 if (bp->link_vars.link_up) {
2059 if (bp->state == BNX2X_STATE_OPEN)
2060 netif_carrier_on(bp->dev);
2061 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2062
c18487ee 2063 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2064
c18487ee
YR
2065 if (bp->link_vars.duplex == DUPLEX_FULL)
2066 printk("full duplex");
2067 else
2068 printk("half duplex");
f1410647 2069
c0700f90
DM
2070 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2071 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2072 printk(", receive ");
356e2385
EG
2073 if (bp->link_vars.flow_ctrl &
2074 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2075 printk("& transmit ");
2076 } else {
2077 printk(", transmit ");
2078 }
2079 printk("flow control ON");
2080 }
2081 printk("\n");
f1410647 2082
c18487ee
YR
2083 } else { /* link_down */
2084 netif_carrier_off(bp->dev);
2085 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2086 }
c18487ee
YR
2087}
2088
b5bf9068 2089static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2090{
19680c48
EG
2091 if (!BP_NOMCP(bp)) {
2092 u8 rc;
a2fbb9ea 2093
19680c48 2094 /* Initialize link parameters structure variables */
8c99e7b0
YR
2095 /* It is recommended to turn off RX FC for jumbo frames
2096 for better performance */
2097 if (IS_E1HMF(bp))
c0700f90 2098 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2099 else if (bp->dev->mtu > 5000)
c0700f90 2100 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2101 else
c0700f90 2102 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2103
4a37fb66 2104 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2105
2106 if (load_mode == LOAD_DIAG)
2107 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2108
19680c48 2109 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2110
4a37fb66 2111 bnx2x_release_phy_lock(bp);
a2fbb9ea 2112
3c96c68b
EG
2113 bnx2x_calc_fc_adv(bp);
2114
b5bf9068
EG
2115 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2116 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2117 bnx2x_link_report(bp);
b5bf9068 2118 }
34f80b04 2119
19680c48
EG
2120 return rc;
2121 }
f5372251 2122 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2123 return -EINVAL;
a2fbb9ea
ET
2124}
2125
c18487ee 2126static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2127{
19680c48 2128 if (!BP_NOMCP(bp)) {
4a37fb66 2129 bnx2x_acquire_phy_lock(bp);
19680c48 2130 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2131 bnx2x_release_phy_lock(bp);
a2fbb9ea 2132
19680c48
EG
2133 bnx2x_calc_fc_adv(bp);
2134 } else
f5372251 2135 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2136}
a2fbb9ea 2137
c18487ee
YR
2138static void bnx2x__link_reset(struct bnx2x *bp)
2139{
19680c48 2140 if (!BP_NOMCP(bp)) {
4a37fb66 2141 bnx2x_acquire_phy_lock(bp);
589abe3a 2142 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2143 bnx2x_release_phy_lock(bp);
19680c48 2144 } else
f5372251 2145 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2146}
a2fbb9ea 2147
c18487ee
YR
2148static u8 bnx2x_link_test(struct bnx2x *bp)
2149{
2150 u8 rc;
a2fbb9ea 2151
4a37fb66 2152 bnx2x_acquire_phy_lock(bp);
c18487ee 2153 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2154 bnx2x_release_phy_lock(bp);
a2fbb9ea 2155
c18487ee
YR
2156 return rc;
2157}
a2fbb9ea 2158
8a1c38d1 2159static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2160{
8a1c38d1
EG
2161 u32 r_param = bp->link_vars.line_speed / 8;
2162 u32 fair_periodic_timeout_usec;
2163 u32 t_fair;
34f80b04 2164
8a1c38d1
EG
2165 memset(&(bp->cmng.rs_vars), 0,
2166 sizeof(struct rate_shaping_vars_per_port));
2167 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2168
8a1c38d1
EG
2169 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2170 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2171
8a1c38d1
EG
2172 /* this is the threshold below which no timer arming will occur
2173 1.25 coefficient is for the threshold to be a little bigger
2174 than the real time, to compensate for timer in-accuracy */
2175 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2176 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2177
8a1c38d1
EG
2178 /* resolution of fairness timer */
2179 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2180 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2181 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2182
8a1c38d1
EG
2183 /* this is the threshold below which we won't arm the timer anymore */
2184 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2185
8a1c38d1
EG
2186 /* we multiply by 1e3/8 to get bytes/msec.
2187 We don't want the credits to pass a credit
2188 of the t_fair*FAIR_MEM (algorithm resolution) */
2189 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2190 /* since each tick is 4 usec */
2191 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2192}
2193
8a1c38d1 2194static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2195{
2196 struct rate_shaping_vars_per_vn m_rs_vn;
2197 struct fairness_vars_per_vn m_fair_vn;
2198 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2199 u16 vn_min_rate, vn_max_rate;
2200 int i;
2201
2202 /* If function is hidden - set min and max to zeroes */
2203 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2204 vn_min_rate = 0;
2205 vn_max_rate = 0;
2206
2207 } else {
2208 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2209 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2210 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2211 if current min rate is zero - set it to 1.
33471629 2212 This is a requirement of the algorithm. */
8a1c38d1 2213 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2214 vn_min_rate = DEF_MIN_RATE;
2215 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2216 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2217 }
2218
8a1c38d1
EG
2219 DP(NETIF_MSG_IFUP,
2220 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2221 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2222
2223 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2224 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2225
2226 /* global vn counter - maximal Mbps for this vn */
2227 m_rs_vn.vn_counter.rate = vn_max_rate;
2228
2229 /* quota - number of bytes transmitted in this period */
2230 m_rs_vn.vn_counter.quota =
2231 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2232
8a1c38d1 2233 if (bp->vn_weight_sum) {
34f80b04
EG
2234 /* credit for each period of the fairness algorithm:
2235 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2236 vn_weight_sum should not be larger than 10000, thus
2237 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2238 than zero */
34f80b04 2239 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2240 max((u32)(vn_min_rate * (T_FAIR_COEF /
2241 (8 * bp->vn_weight_sum))),
2242 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2243 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2244 m_fair_vn.vn_credit_delta);
2245 }
2246
34f80b04
EG
2247 /* Store it to internal memory */
2248 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2249 REG_WR(bp, BAR_XSTRORM_INTMEM +
2250 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2251 ((u32 *)(&m_rs_vn))[i]);
2252
2253 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2254 REG_WR(bp, BAR_XSTRORM_INTMEM +
2255 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2256 ((u32 *)(&m_fair_vn))[i]);
2257}
2258
8a1c38d1 2259
c18487ee
YR
2260/* This function is called upon link interrupt */
2261static void bnx2x_link_attn(struct bnx2x *bp)
2262{
bb2a0f7a
YG
2263 /* Make sure that we are synced with the current statistics */
2264 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2265
c18487ee 2266 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2267
bb2a0f7a
YG
2268 if (bp->link_vars.link_up) {
2269
1c06328c
EG
2270 /* dropless flow control */
2271 if (CHIP_IS_E1H(bp)) {
2272 int port = BP_PORT(bp);
2273 u32 pause_enabled = 0;
2274
2275 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2276 pause_enabled = 1;
2277
2278 REG_WR(bp, BAR_USTRORM_INTMEM +
2279 USTORM_PAUSE_ENABLED_OFFSET(port),
2280 pause_enabled);
2281 }
2282
bb2a0f7a
YG
2283 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2284 struct host_port_stats *pstats;
2285
2286 pstats = bnx2x_sp(bp, port_stats);
2287 /* reset old bmac stats */
2288 memset(&(pstats->mac_stx[0]), 0,
2289 sizeof(struct mac_stx));
2290 }
2291 if ((bp->state == BNX2X_STATE_OPEN) ||
2292 (bp->state == BNX2X_STATE_DISABLED))
2293 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2294 }
2295
c18487ee
YR
2296 /* indicate link status */
2297 bnx2x_link_report(bp);
34f80b04
EG
2298
2299 if (IS_E1HMF(bp)) {
8a1c38d1 2300 int port = BP_PORT(bp);
34f80b04 2301 int func;
8a1c38d1 2302 int vn;
34f80b04
EG
2303
2304 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2305 if (vn == BP_E1HVN(bp))
2306 continue;
2307
8a1c38d1 2308 func = ((vn << 1) | port);
34f80b04
EG
2309
2310 /* Set the attention towards other drivers
2311 on the same port */
2312 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2313 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2314 }
34f80b04 2315
8a1c38d1
EG
2316 if (bp->link_vars.link_up) {
2317 int i;
2318
2319 /* Init rate shaping and fairness contexts */
2320 bnx2x_init_port_minmax(bp);
34f80b04 2321
34f80b04 2322 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2323 bnx2x_init_vn_minmax(bp, 2*vn + port);
2324
2325 /* Store it to internal memory */
2326 for (i = 0;
2327 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2328 REG_WR(bp, BAR_XSTRORM_INTMEM +
2329 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2330 ((u32 *)(&bp->cmng))[i]);
2331 }
34f80b04 2332 }
c18487ee 2333}
a2fbb9ea 2334
c18487ee
YR
2335static void bnx2x__link_status_update(struct bnx2x *bp)
2336{
2337 if (bp->state != BNX2X_STATE_OPEN)
2338 return;
a2fbb9ea 2339
c18487ee 2340 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2341
bb2a0f7a
YG
2342 if (bp->link_vars.link_up)
2343 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2344 else
2345 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2346
c18487ee
YR
2347 /* indicate link status */
2348 bnx2x_link_report(bp);
a2fbb9ea 2349}
a2fbb9ea 2350
34f80b04
EG
2351static void bnx2x_pmf_update(struct bnx2x *bp)
2352{
2353 int port = BP_PORT(bp);
2354 u32 val;
2355
2356 bp->port.pmf = 1;
2357 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2358
2359 /* enable nig attention */
2360 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2361 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2362 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2363
2364 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2365}
2366
c18487ee 2367/* end of Link */
a2fbb9ea
ET
2368
2369/* slow path */
2370
2371/*
2372 * General service functions
2373 */
2374
2375/* the slow path queue is odd since completions arrive on the fastpath ring */
2376static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2377 u32 data_hi, u32 data_lo, int common)
2378{
34f80b04 2379 int func = BP_FUNC(bp);
a2fbb9ea 2380
34f80b04
EG
2381 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2382 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2383 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2384 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2385 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2386
2387#ifdef BNX2X_STOP_ON_ERROR
2388 if (unlikely(bp->panic))
2389 return -EIO;
2390#endif
2391
34f80b04 2392 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2393
2394 if (!bp->spq_left) {
2395 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2396 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2397 bnx2x_panic();
2398 return -EBUSY;
2399 }
f1410647 2400
a2fbb9ea
ET
2401 /* CID needs port number to be encoded int it */
2402 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2403 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2404 HW_CID(bp, cid)));
2405 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2406 if (common)
2407 bp->spq_prod_bd->hdr.type |=
2408 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2409
2410 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2411 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2412
2413 bp->spq_left--;
2414
2415 if (bp->spq_prod_bd == bp->spq_last_bd) {
2416 bp->spq_prod_bd = bp->spq;
2417 bp->spq_prod_idx = 0;
2418 DP(NETIF_MSG_TIMER, "end of spq\n");
2419
2420 } else {
2421 bp->spq_prod_bd++;
2422 bp->spq_prod_idx++;
2423 }
2424
34f80b04 2425 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2426 bp->spq_prod_idx);
2427
34f80b04 2428 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2429 return 0;
2430}
2431
2432/* acquire split MCP access lock register */
4a37fb66 2433static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2434{
a2fbb9ea 2435 u32 i, j, val;
34f80b04 2436 int rc = 0;
a2fbb9ea
ET
2437
2438 might_sleep();
2439 i = 100;
2440 for (j = 0; j < i*10; j++) {
2441 val = (1UL << 31);
2442 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2443 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2444 if (val & (1L << 31))
2445 break;
2446
2447 msleep(5);
2448 }
a2fbb9ea 2449 if (!(val & (1L << 31))) {
19680c48 2450 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2451 rc = -EBUSY;
2452 }
2453
2454 return rc;
2455}
2456
4a37fb66
YG
2457/* release split MCP access lock register */
2458static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2459{
2460 u32 val = 0;
2461
2462 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2463}
2464
2465static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2466{
2467 struct host_def_status_block *def_sb = bp->def_status_blk;
2468 u16 rc = 0;
2469
2470 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2471 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2472 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2473 rc |= 1;
2474 }
2475 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2476 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2477 rc |= 2;
2478 }
2479 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2480 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2481 rc |= 4;
2482 }
2483 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2484 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2485 rc |= 8;
2486 }
2487 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2488 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2489 rc |= 16;
2490 }
2491 return rc;
2492}
2493
2494/*
2495 * slow path service functions
2496 */
2497
2498static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2499{
34f80b04 2500 int port = BP_PORT(bp);
5c862848
EG
2501 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2502 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2503 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2504 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2505 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2506 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2507 u32 aeu_mask;
87942b46 2508 u32 nig_mask = 0;
a2fbb9ea 2509
a2fbb9ea
ET
2510 if (bp->attn_state & asserted)
2511 BNX2X_ERR("IGU ERROR\n");
2512
3fcaf2e5
EG
2513 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2514 aeu_mask = REG_RD(bp, aeu_addr);
2515
a2fbb9ea 2516 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2517 aeu_mask, asserted);
2518 aeu_mask &= ~(asserted & 0xff);
2519 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2520
3fcaf2e5
EG
2521 REG_WR(bp, aeu_addr, aeu_mask);
2522 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2523
3fcaf2e5 2524 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2525 bp->attn_state |= asserted;
3fcaf2e5 2526 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2527
2528 if (asserted & ATTN_HARD_WIRED_MASK) {
2529 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2530
a5e9a7cf
EG
2531 bnx2x_acquire_phy_lock(bp);
2532
877e9aa4 2533 /* save nig interrupt mask */
87942b46 2534 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2535 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2536
c18487ee 2537 bnx2x_link_attn(bp);
a2fbb9ea
ET
2538
2539 /* handle unicore attn? */
2540 }
2541 if (asserted & ATTN_SW_TIMER_4_FUNC)
2542 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2543
2544 if (asserted & GPIO_2_FUNC)
2545 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2546
2547 if (asserted & GPIO_3_FUNC)
2548 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2549
2550 if (asserted & GPIO_4_FUNC)
2551 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2552
2553 if (port == 0) {
2554 if (asserted & ATTN_GENERAL_ATTN_1) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2557 }
2558 if (asserted & ATTN_GENERAL_ATTN_2) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2561 }
2562 if (asserted & ATTN_GENERAL_ATTN_3) {
2563 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2564 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2565 }
2566 } else {
2567 if (asserted & ATTN_GENERAL_ATTN_4) {
2568 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2569 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2570 }
2571 if (asserted & ATTN_GENERAL_ATTN_5) {
2572 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2574 }
2575 if (asserted & ATTN_GENERAL_ATTN_6) {
2576 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2577 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2578 }
2579 }
2580
2581 } /* if hardwired */
2582
5c862848
EG
2583 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2584 asserted, hc_addr);
2585 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2586
2587 /* now set back the mask */
a5e9a7cf 2588 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2589 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2590 bnx2x_release_phy_lock(bp);
2591 }
a2fbb9ea
ET
2592}
2593
877e9aa4 2594static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2595{
34f80b04 2596 int port = BP_PORT(bp);
877e9aa4
ET
2597 int reg_offset;
2598 u32 val;
2599
34f80b04
EG
2600 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2601 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2602
34f80b04 2603 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2604
2605 val = REG_RD(bp, reg_offset);
2606 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2607 REG_WR(bp, reg_offset, val);
2608
2609 BNX2X_ERR("SPIO5 hw attention\n");
2610
35b19ba5
EG
2611 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2612 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2613 /* Fan failure attention */
2614
17de50b7 2615 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2616 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2617 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2618 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2619 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2620 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2621 /* mark the failure */
c18487ee 2622 bp->link_params.ext_phy_config &=
877e9aa4 2623 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2624 bp->link_params.ext_phy_config |=
877e9aa4
ET
2625 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2626 SHMEM_WR(bp,
2627 dev_info.port_hw_config[port].
2628 external_phy_config,
c18487ee 2629 bp->link_params.ext_phy_config);
877e9aa4
ET
2630 /* log the failure */
2631 printk(KERN_ERR PFX "Fan Failure on Network"
2632 " Controller %s has caused the driver to"
2633 " shutdown the card to prevent permanent"
2634 " damage. Please contact Dell Support for"
2635 " assistance\n", bp->dev->name);
2636 break;
2637
2638 default:
2639 break;
2640 }
2641 }
34f80b04 2642
589abe3a
EG
2643 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2644 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2645 bnx2x_acquire_phy_lock(bp);
2646 bnx2x_handle_module_detect_int(&bp->link_params);
2647 bnx2x_release_phy_lock(bp);
2648 }
2649
34f80b04
EG
2650 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2651
2652 val = REG_RD(bp, reg_offset);
2653 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2654 REG_WR(bp, reg_offset, val);
2655
2656 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2657 (attn & HW_INTERRUT_ASSERT_SET_0));
2658 bnx2x_panic();
2659 }
877e9aa4
ET
2660}
2661
2662static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2663{
2664 u32 val;
2665
0626b899 2666 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2667
2668 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2669 BNX2X_ERR("DB hw attention 0x%x\n", val);
2670 /* DORQ discard attention */
2671 if (val & 0x2)
2672 BNX2X_ERR("FATAL error from DORQ\n");
2673 }
34f80b04
EG
2674
2675 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2676
2677 int port = BP_PORT(bp);
2678 int reg_offset;
2679
2680 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2681 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2682
2683 val = REG_RD(bp, reg_offset);
2684 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2685 REG_WR(bp, reg_offset, val);
2686
2687 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2688 (attn & HW_INTERRUT_ASSERT_SET_1));
2689 bnx2x_panic();
2690 }
877e9aa4
ET
2691}
2692
2693static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2694{
2695 u32 val;
2696
2697 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2698
2699 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2700 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2701 /* CFC error attention */
2702 if (val & 0x2)
2703 BNX2X_ERR("FATAL error from CFC\n");
2704 }
2705
2706 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2707
2708 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2709 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2710 /* RQ_USDMDP_FIFO_OVERFLOW */
2711 if (val & 0x18000)
2712 BNX2X_ERR("FATAL error from PXP\n");
2713 }
34f80b04
EG
2714
2715 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2716
2717 int port = BP_PORT(bp);
2718 int reg_offset;
2719
2720 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2721 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2722
2723 val = REG_RD(bp, reg_offset);
2724 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2725 REG_WR(bp, reg_offset, val);
2726
2727 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2728 (attn & HW_INTERRUT_ASSERT_SET_2));
2729 bnx2x_panic();
2730 }
877e9aa4
ET
2731}
2732
2733static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2734{
34f80b04
EG
2735 u32 val;
2736
877e9aa4
ET
2737 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2738
34f80b04
EG
2739 if (attn & BNX2X_PMF_LINK_ASSERT) {
2740 int func = BP_FUNC(bp);
2741
2742 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2743 bnx2x__link_status_update(bp);
2744 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2745 DRV_STATUS_PMF)
2746 bnx2x_pmf_update(bp);
2747
2748 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2749
2750 BNX2X_ERR("MC assert!\n");
2751 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2753 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2754 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2755 bnx2x_panic();
2756
2757 } else if (attn & BNX2X_MCP_ASSERT) {
2758
2759 BNX2X_ERR("MCP assert!\n");
2760 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2761 bnx2x_fw_dump(bp);
877e9aa4
ET
2762
2763 } else
2764 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2765 }
2766
2767 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2768 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2769 if (attn & BNX2X_GRC_TIMEOUT) {
2770 val = CHIP_IS_E1H(bp) ?
2771 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2772 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2773 }
2774 if (attn & BNX2X_GRC_RSV) {
2775 val = CHIP_IS_E1H(bp) ?
2776 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2777 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2778 }
877e9aa4 2779 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2780 }
2781}
2782
2783static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2784{
a2fbb9ea
ET
2785 struct attn_route attn;
2786 struct attn_route group_mask;
34f80b04 2787 int port = BP_PORT(bp);
877e9aa4 2788 int index;
a2fbb9ea
ET
2789 u32 reg_addr;
2790 u32 val;
3fcaf2e5 2791 u32 aeu_mask;
a2fbb9ea
ET
2792
2793 /* need to take HW lock because MCP or other port might also
2794 try to handle this event */
4a37fb66 2795 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2796
2797 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2798 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2799 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2800 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2801 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2802 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2803
2804 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2805 if (deasserted & (1 << index)) {
2806 group_mask = bp->attn_group[index];
2807
34f80b04
EG
2808 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2809 index, group_mask.sig[0], group_mask.sig[1],
2810 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2811
877e9aa4
ET
2812 bnx2x_attn_int_deasserted3(bp,
2813 attn.sig[3] & group_mask.sig[3]);
2814 bnx2x_attn_int_deasserted1(bp,
2815 attn.sig[1] & group_mask.sig[1]);
2816 bnx2x_attn_int_deasserted2(bp,
2817 attn.sig[2] & group_mask.sig[2]);
2818 bnx2x_attn_int_deasserted0(bp,
2819 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2820
a2fbb9ea
ET
2821 if ((attn.sig[0] & group_mask.sig[0] &
2822 HW_PRTY_ASSERT_SET_0) ||
2823 (attn.sig[1] & group_mask.sig[1] &
2824 HW_PRTY_ASSERT_SET_1) ||
2825 (attn.sig[2] & group_mask.sig[2] &
2826 HW_PRTY_ASSERT_SET_2))
6378c025 2827 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2828 }
2829 }
2830
4a37fb66 2831 bnx2x_release_alr(bp);
a2fbb9ea 2832
5c862848 2833 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2834
2835 val = ~deasserted;
3fcaf2e5
EG
2836 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2837 val, reg_addr);
5c862848 2838 REG_WR(bp, reg_addr, val);
a2fbb9ea 2839
a2fbb9ea 2840 if (~bp->attn_state & deasserted)
3fcaf2e5 2841 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2842
2843 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2844 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2845
3fcaf2e5
EG
2846 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2847 aeu_mask = REG_RD(bp, reg_addr);
2848
2849 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2850 aeu_mask, deasserted);
2851 aeu_mask |= (deasserted & 0xff);
2852 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2853
3fcaf2e5
EG
2854 REG_WR(bp, reg_addr, aeu_mask);
2855 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2856
2857 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2858 bp->attn_state &= ~deasserted;
2859 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2860}
2861
2862static void bnx2x_attn_int(struct bnx2x *bp)
2863{
2864 /* read local copy of bits */
68d59484
EG
2865 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2866 attn_bits);
2867 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2868 attn_bits_ack);
a2fbb9ea
ET
2869 u32 attn_state = bp->attn_state;
2870
2871 /* look for changed bits */
2872 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2873 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2874
2875 DP(NETIF_MSG_HW,
2876 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2877 attn_bits, attn_ack, asserted, deasserted);
2878
2879 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2880 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2881
2882 /* handle bits that were raised */
2883 if (asserted)
2884 bnx2x_attn_int_asserted(bp, asserted);
2885
2886 if (deasserted)
2887 bnx2x_attn_int_deasserted(bp, deasserted);
2888}
2889
2890static void bnx2x_sp_task(struct work_struct *work)
2891{
1cf167f2 2892 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2893 u16 status;
2894
34f80b04 2895
a2fbb9ea
ET
2896 /* Return here if interrupt is disabled */
2897 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2898 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2899 return;
2900 }
2901
2902 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2903/* if (status == 0) */
2904/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2905
3196a88a 2906 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2907
877e9aa4
ET
2908 /* HW attentions */
2909 if (status & 0x1)
a2fbb9ea 2910 bnx2x_attn_int(bp);
a2fbb9ea 2911
68d59484 2912 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2913 IGU_INT_NOP, 1);
2914 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2915 IGU_INT_NOP, 1);
2916 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2917 IGU_INT_NOP, 1);
2918 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2919 IGU_INT_NOP, 1);
2920 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2921 IGU_INT_ENABLE, 1);
877e9aa4 2922
a2fbb9ea
ET
2923}
2924
2925static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2926{
2927 struct net_device *dev = dev_instance;
2928 struct bnx2x *bp = netdev_priv(dev);
2929
2930 /* Return here if interrupt is disabled */
2931 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2932 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2933 return IRQ_HANDLED;
2934 }
2935
8d9c5f34 2936 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2937
2938#ifdef BNX2X_STOP_ON_ERROR
2939 if (unlikely(bp->panic))
2940 return IRQ_HANDLED;
2941#endif
2942
1cf167f2 2943 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2944
2945 return IRQ_HANDLED;
2946}
2947
2948/* end of slow path */
2949
2950/* Statistics */
2951
2952/****************************************************************************
2953* Macros
2954****************************************************************************/
2955
a2fbb9ea
ET
2956/* sum[hi:lo] += add[hi:lo] */
2957#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2958 do { \
2959 s_lo += a_lo; \
f5ba6772 2960 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2961 } while (0)
2962
2963/* difference = minuend - subtrahend */
2964#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2965 do { \
bb2a0f7a
YG
2966 if (m_lo < s_lo) { \
2967 /* underflow */ \
a2fbb9ea 2968 d_hi = m_hi - s_hi; \
bb2a0f7a 2969 if (d_hi > 0) { \
6378c025 2970 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2971 d_hi--; \
2972 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2973 } else { \
6378c025 2974 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2975 d_hi = 0; \
2976 d_lo = 0; \
2977 } \
bb2a0f7a
YG
2978 } else { \
2979 /* m_lo >= s_lo */ \
a2fbb9ea 2980 if (m_hi < s_hi) { \
bb2a0f7a
YG
2981 d_hi = 0; \
2982 d_lo = 0; \
2983 } else { \
6378c025 2984 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2985 d_hi = m_hi - s_hi; \
2986 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2987 } \
2988 } \
2989 } while (0)
2990
bb2a0f7a 2991#define UPDATE_STAT64(s, t) \
a2fbb9ea 2992 do { \
bb2a0f7a
YG
2993 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2994 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2995 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2996 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2997 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2998 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2999 } while (0)
3000
bb2a0f7a 3001#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3002 do { \
bb2a0f7a
YG
3003 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3004 diff.lo, new->s##_lo, old->s##_lo); \
3005 ADD_64(estats->t##_hi, diff.hi, \
3006 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3007 } while (0)
3008
3009/* sum[hi:lo] += add */
3010#define ADD_EXTEND_64(s_hi, s_lo, a) \
3011 do { \
3012 s_lo += a; \
3013 s_hi += (s_lo < a) ? 1 : 0; \
3014 } while (0)
3015
bb2a0f7a 3016#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3017 do { \
bb2a0f7a
YG
3018 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3019 pstats->mac_stx[1].s##_lo, \
3020 new->s); \
a2fbb9ea
ET
3021 } while (0)
3022
bb2a0f7a 3023#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3024 do { \
4781bfad
EG
3025 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3026 old_tclient->s = tclient->s; \
de832a55
EG
3027 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3028 } while (0)
3029
3030#define UPDATE_EXTEND_USTAT(s, t) \
3031 do { \
3032 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3033 old_uclient->s = uclient->s; \
3034 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3035 } while (0)
3036
3037#define UPDATE_EXTEND_XSTAT(s, t) \
3038 do { \
4781bfad
EG
3039 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3040 old_xclient->s = xclient->s; \
de832a55
EG
3041 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3042 } while (0)
3043
3044/* minuend -= subtrahend */
3045#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3046 do { \
3047 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3048 } while (0)
3049
3050/* minuend[hi:lo] -= subtrahend */
3051#define SUB_EXTEND_64(m_hi, m_lo, s) \
3052 do { \
3053 SUB_64(m_hi, 0, m_lo, s); \
3054 } while (0)
3055
3056#define SUB_EXTEND_USTAT(s, t) \
3057 do { \
3058 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3059 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3060 } while (0)
3061
3062/*
3063 * General service functions
3064 */
3065
3066static inline long bnx2x_hilo(u32 *hiref)
3067{
3068 u32 lo = *(hiref + 1);
3069#if (BITS_PER_LONG == 64)
3070 u32 hi = *hiref;
3071
3072 return HILO_U64(hi, lo);
3073#else
3074 return lo;
3075#endif
3076}
3077
3078/*
3079 * Init service functions
3080 */
3081
bb2a0f7a
YG
3082static void bnx2x_storm_stats_post(struct bnx2x *bp)
3083{
3084 if (!bp->stats_pending) {
3085 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3086 int i, rc;
bb2a0f7a
YG
3087
3088 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3089 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3090 for_each_queue(bp, i)
3091 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3092
3093 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3094 ((u32 *)&ramrod_data)[1],
3095 ((u32 *)&ramrod_data)[0], 0);
3096 if (rc == 0) {
3097 /* stats ramrod has it's own slot on the spq */
3098 bp->spq_left++;
3099 bp->stats_pending = 1;
3100 }
3101 }
3102}
3103
3104static void bnx2x_stats_init(struct bnx2x *bp)
3105{
3106 int port = BP_PORT(bp);
de832a55 3107 int i;
bb2a0f7a 3108
de832a55 3109 bp->stats_pending = 0;
bb2a0f7a
YG
3110 bp->executer_idx = 0;
3111 bp->stats_counter = 0;
3112
3113 /* port stats */
3114 if (!BP_NOMCP(bp))
3115 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3116 else
3117 bp->port.port_stx = 0;
3118 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3119
3120 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3121 bp->port.old_nig_stats.brb_discard =
3122 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3123 bp->port.old_nig_stats.brb_truncate =
3124 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3125 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3126 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3127 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3128 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3129
3130 /* function stats */
de832a55
EG
3131 for_each_queue(bp, i) {
3132 struct bnx2x_fastpath *fp = &bp->fp[i];
3133
3134 memset(&fp->old_tclient, 0,
3135 sizeof(struct tstorm_per_client_stats));
3136 memset(&fp->old_uclient, 0,
3137 sizeof(struct ustorm_per_client_stats));
3138 memset(&fp->old_xclient, 0,
3139 sizeof(struct xstorm_per_client_stats));
3140 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3141 }
3142
bb2a0f7a 3143 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3144 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3145
3146 bp->stats_state = STATS_STATE_DISABLED;
3147 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3148 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3149}
3150
3151static void bnx2x_hw_stats_post(struct bnx2x *bp)
3152{
3153 struct dmae_command *dmae = &bp->stats_dmae;
3154 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3155
3156 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3157 if (CHIP_REV_IS_SLOW(bp))
3158 return;
bb2a0f7a
YG
3159
3160 /* loader */
3161 if (bp->executer_idx) {
3162 int loader_idx = PMF_DMAE_C(bp);
3163
3164 memset(dmae, 0, sizeof(struct dmae_command));
3165
3166 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3167 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3168 DMAE_CMD_DST_RESET |
3169#ifdef __BIG_ENDIAN
3170 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3171#else
3172 DMAE_CMD_ENDIANITY_DW_SWAP |
3173#endif
3174 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3175 DMAE_CMD_PORT_0) |
3176 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3177 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3178 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3179 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3180 sizeof(struct dmae_command) *
3181 (loader_idx + 1)) >> 2;
3182 dmae->dst_addr_hi = 0;
3183 dmae->len = sizeof(struct dmae_command) >> 2;
3184 if (CHIP_IS_E1(bp))
3185 dmae->len--;
3186 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3187 dmae->comp_addr_hi = 0;
3188 dmae->comp_val = 1;
3189
3190 *stats_comp = 0;
3191 bnx2x_post_dmae(bp, dmae, loader_idx);
3192
3193 } else if (bp->func_stx) {
3194 *stats_comp = 0;
3195 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3196 }
3197}
3198
3199static int bnx2x_stats_comp(struct bnx2x *bp)
3200{
3201 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3202 int cnt = 10;
3203
3204 might_sleep();
3205 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3206 if (!cnt) {
3207 BNX2X_ERR("timeout waiting for stats finished\n");
3208 break;
3209 }
3210 cnt--;
12469401 3211 msleep(1);
bb2a0f7a
YG
3212 }
3213 return 1;
3214}
3215
3216/*
3217 * Statistics service functions
3218 */
3219
3220static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3221{
3222 struct dmae_command *dmae;
3223 u32 opcode;
3224 int loader_idx = PMF_DMAE_C(bp);
3225 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3226
3227 /* sanity */
3228 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3229 BNX2X_ERR("BUG!\n");
3230 return;
3231 }
3232
3233 bp->executer_idx = 0;
3234
3235 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3236 DMAE_CMD_C_ENABLE |
3237 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3238#ifdef __BIG_ENDIAN
3239 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3240#else
3241 DMAE_CMD_ENDIANITY_DW_SWAP |
3242#endif
3243 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3245
3246 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3247 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3248 dmae->src_addr_lo = bp->port.port_stx >> 2;
3249 dmae->src_addr_hi = 0;
3250 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->len = DMAE_LEN32_RD_MAX;
3253 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3254 dmae->comp_addr_hi = 0;
3255 dmae->comp_val = 1;
3256
3257 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3258 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3259 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3260 dmae->src_addr_hi = 0;
7a9b2557
VZ
3261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3262 DMAE_LEN32_RD_MAX * 4);
3263 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3264 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3265 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3266 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3267 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3268 dmae->comp_val = DMAE_COMP_VAL;
3269
3270 *stats_comp = 0;
3271 bnx2x_hw_stats_post(bp);
3272 bnx2x_stats_comp(bp);
3273}
3274
3275static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3276{
3277 struct dmae_command *dmae;
34f80b04 3278 int port = BP_PORT(bp);
bb2a0f7a 3279 int vn = BP_E1HVN(bp);
a2fbb9ea 3280 u32 opcode;
bb2a0f7a 3281 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3282 u32 mac_addr;
bb2a0f7a
YG
3283 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3284
3285 /* sanity */
3286 if (!bp->link_vars.link_up || !bp->port.pmf) {
3287 BNX2X_ERR("BUG!\n");
3288 return;
3289 }
a2fbb9ea
ET
3290
3291 bp->executer_idx = 0;
bb2a0f7a
YG
3292
3293 /* MCP */
3294 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3295 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3296 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3297#ifdef __BIG_ENDIAN
bb2a0f7a 3298 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3299#else
bb2a0f7a 3300 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3301#endif
bb2a0f7a
YG
3302 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3303 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3304
bb2a0f7a 3305 if (bp->port.port_stx) {
a2fbb9ea
ET
3306
3307 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3308 dmae->opcode = opcode;
bb2a0f7a
YG
3309 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3310 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3311 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3312 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3313 dmae->len = sizeof(struct host_port_stats) >> 2;
3314 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315 dmae->comp_addr_hi = 0;
3316 dmae->comp_val = 1;
a2fbb9ea
ET
3317 }
3318
bb2a0f7a
YG
3319 if (bp->func_stx) {
3320
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3324 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3325 dmae->dst_addr_lo = bp->func_stx >> 2;
3326 dmae->dst_addr_hi = 0;
3327 dmae->len = sizeof(struct host_func_stats) >> 2;
3328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329 dmae->comp_addr_hi = 0;
3330 dmae->comp_val = 1;
a2fbb9ea
ET
3331 }
3332
bb2a0f7a 3333 /* MAC */
a2fbb9ea
ET
3334 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3335 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3336 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3337#ifdef __BIG_ENDIAN
3338 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3339#else
3340 DMAE_CMD_ENDIANITY_DW_SWAP |
3341#endif
bb2a0f7a
YG
3342 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3343 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3344
c18487ee 3345 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3346
3347 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3348 NIG_REG_INGRESS_BMAC0_MEM);
3349
3350 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3351 BIGMAC_REGISTER_TX_STAT_GTBYT */
3352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353 dmae->opcode = opcode;
3354 dmae->src_addr_lo = (mac_addr +
3355 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3356 dmae->src_addr_hi = 0;
3357 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3358 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3359 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3360 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3361 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362 dmae->comp_addr_hi = 0;
3363 dmae->comp_val = 1;
3364
3365 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3366 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3373 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3375 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3376 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3377 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3379 dmae->comp_addr_hi = 0;
3380 dmae->comp_val = 1;
3381
c18487ee 3382 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3383
3384 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3385
3386 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388 dmae->opcode = opcode;
3389 dmae->src_addr_lo = (mac_addr +
3390 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3391 dmae->src_addr_hi = 0;
3392 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3393 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3394 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3395 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396 dmae->comp_addr_hi = 0;
3397 dmae->comp_val = 1;
3398
3399 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3400 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3401 dmae->opcode = opcode;
3402 dmae->src_addr_lo = (mac_addr +
3403 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3404 dmae->src_addr_hi = 0;
3405 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3406 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3407 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3408 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3409 dmae->len = 1;
3410 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3411 dmae->comp_addr_hi = 0;
3412 dmae->comp_val = 1;
3413
3414 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3415 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3416 dmae->opcode = opcode;
3417 dmae->src_addr_lo = (mac_addr +
3418 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3419 dmae->src_addr_hi = 0;
3420 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3421 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3422 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3423 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3424 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3425 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3426 dmae->comp_addr_hi = 0;
3427 dmae->comp_val = 1;
3428 }
3429
3430 /* NIG */
bb2a0f7a
YG
3431 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3432 dmae->opcode = opcode;
3433 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3434 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3435 dmae->src_addr_hi = 0;
3436 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3437 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3438 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3439 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3440 dmae->comp_addr_hi = 0;
3441 dmae->comp_val = 1;
3442
3443 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3444 dmae->opcode = opcode;
3445 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3446 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3447 dmae->src_addr_hi = 0;
3448 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3449 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3450 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3451 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3452 dmae->len = (2*sizeof(u32)) >> 2;
3453 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3454 dmae->comp_addr_hi = 0;
3455 dmae->comp_val = 1;
3456
a2fbb9ea
ET
3457 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3458 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3459 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3460 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3461#ifdef __BIG_ENDIAN
3462 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3463#else
3464 DMAE_CMD_ENDIANITY_DW_SWAP |
3465#endif
bb2a0f7a
YG
3466 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3467 (vn << DMAE_CMD_E1HVN_SHIFT));
3468 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3469 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3470 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3471 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3472 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3473 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3474 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3475 dmae->len = (2*sizeof(u32)) >> 2;
3476 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3477 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3478 dmae->comp_val = DMAE_COMP_VAL;
3479
3480 *stats_comp = 0;
a2fbb9ea
ET
3481}
3482
bb2a0f7a 3483static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3484{
bb2a0f7a
YG
3485 struct dmae_command *dmae = &bp->stats_dmae;
3486 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3487
bb2a0f7a
YG
3488 /* sanity */
3489 if (!bp->func_stx) {
3490 BNX2X_ERR("BUG!\n");
3491 return;
3492 }
a2fbb9ea 3493
bb2a0f7a
YG
3494 bp->executer_idx = 0;
3495 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3496
bb2a0f7a
YG
3497 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3498 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3499 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3500#ifdef __BIG_ENDIAN
3501 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3502#else
3503 DMAE_CMD_ENDIANITY_DW_SWAP |
3504#endif
3505 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3506 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3507 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3508 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3509 dmae->dst_addr_lo = bp->func_stx >> 2;
3510 dmae->dst_addr_hi = 0;
3511 dmae->len = sizeof(struct host_func_stats) >> 2;
3512 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3513 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3514 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3515
bb2a0f7a
YG
3516 *stats_comp = 0;
3517}
a2fbb9ea 3518
bb2a0f7a
YG
3519static void bnx2x_stats_start(struct bnx2x *bp)
3520{
3521 if (bp->port.pmf)
3522 bnx2x_port_stats_init(bp);
3523
3524 else if (bp->func_stx)
3525 bnx2x_func_stats_init(bp);
3526
3527 bnx2x_hw_stats_post(bp);
3528 bnx2x_storm_stats_post(bp);
3529}
3530
3531static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3532{
3533 bnx2x_stats_comp(bp);
3534 bnx2x_stats_pmf_update(bp);
3535 bnx2x_stats_start(bp);
3536}
3537
3538static void bnx2x_stats_restart(struct bnx2x *bp)
3539{
3540 bnx2x_stats_comp(bp);
3541 bnx2x_stats_start(bp);
3542}
3543
3544static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3545{
3546 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3547 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3548 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3549 struct {
3550 u32 lo;
3551 u32 hi;
3552 } diff;
bb2a0f7a
YG
3553
3554 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3555 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3556 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3557 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3558 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3559 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3560 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3561 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3562 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3563 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3564 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3565 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3566 UPDATE_STAT64(tx_stat_gt127,
3567 tx_stat_etherstatspkts65octetsto127octets);
3568 UPDATE_STAT64(tx_stat_gt255,
3569 tx_stat_etherstatspkts128octetsto255octets);
3570 UPDATE_STAT64(tx_stat_gt511,
3571 tx_stat_etherstatspkts256octetsto511octets);
3572 UPDATE_STAT64(tx_stat_gt1023,
3573 tx_stat_etherstatspkts512octetsto1023octets);
3574 UPDATE_STAT64(tx_stat_gt1518,
3575 tx_stat_etherstatspkts1024octetsto1522octets);
3576 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3577 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3578 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3579 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3580 UPDATE_STAT64(tx_stat_gterr,
3581 tx_stat_dot3statsinternalmactransmiterrors);
3582 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3583
3584 estats->pause_frames_received_hi =
3585 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3586 estats->pause_frames_received_lo =
3587 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3588
3589 estats->pause_frames_sent_hi =
3590 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3591 estats->pause_frames_sent_lo =
3592 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3593}
3594
3595static void bnx2x_emac_stats_update(struct bnx2x *bp)
3596{
3597 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3598 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3599 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3600
3601 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3602 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3603 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3605 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3606 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3607 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3608 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3609 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3610 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3611 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3612 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3613 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3614 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3615 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3616 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3617 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3618 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3622 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3623 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3629 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3630 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3631 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3632
3633 estats->pause_frames_received_hi =
3634 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3635 estats->pause_frames_received_lo =
3636 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3637 ADD_64(estats->pause_frames_received_hi,
3638 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3639 estats->pause_frames_received_lo,
3640 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3641
3642 estats->pause_frames_sent_hi =
3643 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3644 estats->pause_frames_sent_lo =
3645 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3646 ADD_64(estats->pause_frames_sent_hi,
3647 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3648 estats->pause_frames_sent_lo,
3649 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3650}
3651
3652static int bnx2x_hw_stats_update(struct bnx2x *bp)
3653{
3654 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3655 struct nig_stats *old = &(bp->port.old_nig_stats);
3656 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3657 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3658 struct {
3659 u32 lo;
3660 u32 hi;
3661 } diff;
de832a55 3662 u32 nig_timer_max;
bb2a0f7a
YG
3663
3664 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3665 bnx2x_bmac_stats_update(bp);
3666
3667 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3668 bnx2x_emac_stats_update(bp);
3669
3670 else { /* unreached */
c3eefaf6 3671 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3672 return -1;
3673 }
a2fbb9ea 3674
bb2a0f7a
YG
3675 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3676 new->brb_discard - old->brb_discard);
66e855f3
YG
3677 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3678 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3679
bb2a0f7a
YG
3680 UPDATE_STAT64_NIG(egress_mac_pkt0,
3681 etherstatspkts1024octetsto1522octets);
3682 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3683
bb2a0f7a 3684 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3685
bb2a0f7a
YG
3686 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3687 sizeof(struct mac_stx));
3688 estats->brb_drop_hi = pstats->brb_drop_hi;
3689 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3690
bb2a0f7a 3691 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3692
de832a55
EG
3693 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3694 if (nig_timer_max != estats->nig_timer_max) {
3695 estats->nig_timer_max = nig_timer_max;
3696 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3697 }
3698
bb2a0f7a 3699 return 0;
a2fbb9ea
ET
3700}
3701
bb2a0f7a 3702static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3703{
3704 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3705 struct tstorm_per_port_stats *tport =
de832a55 3706 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3707 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3708 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3709 int i;
3710
3711 memset(&(fstats->total_bytes_received_hi), 0,
3712 sizeof(struct host_func_stats) - 2*sizeof(u32));
3713 estats->error_bytes_received_hi = 0;
3714 estats->error_bytes_received_lo = 0;
3715 estats->etherstatsoverrsizepkts_hi = 0;
3716 estats->etherstatsoverrsizepkts_lo = 0;
3717 estats->no_buff_discard_hi = 0;
3718 estats->no_buff_discard_lo = 0;
a2fbb9ea 3719
de832a55
EG
3720 for_each_queue(bp, i) {
3721 struct bnx2x_fastpath *fp = &bp->fp[i];
3722 int cl_id = fp->cl_id;
3723 struct tstorm_per_client_stats *tclient =
3724 &stats->tstorm_common.client_statistics[cl_id];
3725 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3726 struct ustorm_per_client_stats *uclient =
3727 &stats->ustorm_common.client_statistics[cl_id];
3728 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3729 struct xstorm_per_client_stats *xclient =
3730 &stats->xstorm_common.client_statistics[cl_id];
3731 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3732 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3733 u32 diff;
3734
3735 /* are storm stats valid? */
3736 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3737 bp->stats_counter) {
de832a55
EG
3738 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3739 " xstorm counter (%d) != stats_counter (%d)\n",
3740 i, xclient->stats_counter, bp->stats_counter);
3741 return -1;
3742 }
3743 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3744 bp->stats_counter) {
de832a55
EG
3745 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3746 " tstorm counter (%d) != stats_counter (%d)\n",
3747 i, tclient->stats_counter, bp->stats_counter);
3748 return -2;
3749 }
3750 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3751 bp->stats_counter) {
3752 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3753 " ustorm counter (%d) != stats_counter (%d)\n",
3754 i, uclient->stats_counter, bp->stats_counter);
3755 return -4;
3756 }
a2fbb9ea 3757
de832a55
EG
3758 qstats->total_bytes_received_hi =
3759 qstats->valid_bytes_received_hi =
a2fbb9ea 3760 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3761 qstats->total_bytes_received_lo =
3762 qstats->valid_bytes_received_lo =
a2fbb9ea 3763 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3764
de832a55 3765 qstats->error_bytes_received_hi =
bb2a0f7a 3766 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3767 qstats->error_bytes_received_lo =
bb2a0f7a 3768 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3769
de832a55
EG
3770 ADD_64(qstats->total_bytes_received_hi,
3771 qstats->error_bytes_received_hi,
3772 qstats->total_bytes_received_lo,
3773 qstats->error_bytes_received_lo);
3774
3775 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3776 total_unicast_packets_received);
3777 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3778 total_multicast_packets_received);
3779 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3780 total_broadcast_packets_received);
3781 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3782 etherstatsoverrsizepkts);
3783 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3784
3785 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3786 total_unicast_packets_received);
3787 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3788 total_multicast_packets_received);
3789 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3790 total_broadcast_packets_received);
3791 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3792 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3793 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3794
3795 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3796 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3797 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3798 le32_to_cpu(xclient->total_sent_bytes.lo);
3799
de832a55
EG
3800 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3801 total_unicast_packets_transmitted);
3802 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3803 total_multicast_packets_transmitted);
3804 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3805 total_broadcast_packets_transmitted);
3806
3807 old_tclient->checksum_discard = tclient->checksum_discard;
3808 old_tclient->ttl0_discard = tclient->ttl0_discard;
3809
3810 ADD_64(fstats->total_bytes_received_hi,
3811 qstats->total_bytes_received_hi,
3812 fstats->total_bytes_received_lo,
3813 qstats->total_bytes_received_lo);
3814 ADD_64(fstats->total_bytes_transmitted_hi,
3815 qstats->total_bytes_transmitted_hi,
3816 fstats->total_bytes_transmitted_lo,
3817 qstats->total_bytes_transmitted_lo);
3818 ADD_64(fstats->total_unicast_packets_received_hi,
3819 qstats->total_unicast_packets_received_hi,
3820 fstats->total_unicast_packets_received_lo,
3821 qstats->total_unicast_packets_received_lo);
3822 ADD_64(fstats->total_multicast_packets_received_hi,
3823 qstats->total_multicast_packets_received_hi,
3824 fstats->total_multicast_packets_received_lo,
3825 qstats->total_multicast_packets_received_lo);
3826 ADD_64(fstats->total_broadcast_packets_received_hi,
3827 qstats->total_broadcast_packets_received_hi,
3828 fstats->total_broadcast_packets_received_lo,
3829 qstats->total_broadcast_packets_received_lo);
3830 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3831 qstats->total_unicast_packets_transmitted_hi,
3832 fstats->total_unicast_packets_transmitted_lo,
3833 qstats->total_unicast_packets_transmitted_lo);
3834 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3835 qstats->total_multicast_packets_transmitted_hi,
3836 fstats->total_multicast_packets_transmitted_lo,
3837 qstats->total_multicast_packets_transmitted_lo);
3838 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3839 qstats->total_broadcast_packets_transmitted_hi,
3840 fstats->total_broadcast_packets_transmitted_lo,
3841 qstats->total_broadcast_packets_transmitted_lo);
3842 ADD_64(fstats->valid_bytes_received_hi,
3843 qstats->valid_bytes_received_hi,
3844 fstats->valid_bytes_received_lo,
3845 qstats->valid_bytes_received_lo);
3846
3847 ADD_64(estats->error_bytes_received_hi,
3848 qstats->error_bytes_received_hi,
3849 estats->error_bytes_received_lo,
3850 qstats->error_bytes_received_lo);
3851 ADD_64(estats->etherstatsoverrsizepkts_hi,
3852 qstats->etherstatsoverrsizepkts_hi,
3853 estats->etherstatsoverrsizepkts_lo,
3854 qstats->etherstatsoverrsizepkts_lo);
3855 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3856 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3857 }
3858
3859 ADD_64(fstats->total_bytes_received_hi,
3860 estats->rx_stat_ifhcinbadoctets_hi,
3861 fstats->total_bytes_received_lo,
3862 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3863
3864 memcpy(estats, &(fstats->total_bytes_received_hi),
3865 sizeof(struct host_func_stats) - 2*sizeof(u32));
3866
de832a55
EG
3867 ADD_64(estats->etherstatsoverrsizepkts_hi,
3868 estats->rx_stat_dot3statsframestoolong_hi,
3869 estats->etherstatsoverrsizepkts_lo,
3870 estats->rx_stat_dot3statsframestoolong_lo);
3871 ADD_64(estats->error_bytes_received_hi,
3872 estats->rx_stat_ifhcinbadoctets_hi,
3873 estats->error_bytes_received_lo,
3874 estats->rx_stat_ifhcinbadoctets_lo);
3875
3876 if (bp->port.pmf) {
3877 estats->mac_filter_discard =
3878 le32_to_cpu(tport->mac_filter_discard);
3879 estats->xxoverflow_discard =
3880 le32_to_cpu(tport->xxoverflow_discard);
3881 estats->brb_truncate_discard =
bb2a0f7a 3882 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3883 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3884 }
bb2a0f7a
YG
3885
3886 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3887
de832a55
EG
3888 bp->stats_pending = 0;
3889
a2fbb9ea
ET
3890 return 0;
3891}
3892
bb2a0f7a 3893static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3894{
bb2a0f7a 3895 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3896 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3897 int i;
a2fbb9ea
ET
3898
3899 nstats->rx_packets =
3900 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3901 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3902 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3903
3904 nstats->tx_packets =
3905 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3906 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3907 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3908
de832a55 3909 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3910
0e39e645 3911 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3912
de832a55
EG
3913 nstats->rx_dropped = estats->mac_discard;
3914 for_each_queue(bp, i)
3915 nstats->rx_dropped +=
3916 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3917
a2fbb9ea
ET
3918 nstats->tx_dropped = 0;
3919
3920 nstats->multicast =
de832a55 3921 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3922
bb2a0f7a 3923 nstats->collisions =
de832a55 3924 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3925
3926 nstats->rx_length_errors =
de832a55
EG
3927 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3928 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3929 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3930 bnx2x_hilo(&estats->brb_truncate_hi);
3931 nstats->rx_crc_errors =
3932 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3933 nstats->rx_frame_errors =
3934 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3935 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3936 nstats->rx_missed_errors = estats->xxoverflow_discard;
3937
3938 nstats->rx_errors = nstats->rx_length_errors +
3939 nstats->rx_over_errors +
3940 nstats->rx_crc_errors +
3941 nstats->rx_frame_errors +
0e39e645
ET
3942 nstats->rx_fifo_errors +
3943 nstats->rx_missed_errors;
a2fbb9ea 3944
bb2a0f7a 3945 nstats->tx_aborted_errors =
de832a55
EG
3946 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3947 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3948 nstats->tx_carrier_errors =
3949 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3950 nstats->tx_fifo_errors = 0;
3951 nstats->tx_heartbeat_errors = 0;
3952 nstats->tx_window_errors = 0;
3953
3954 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3955 nstats->tx_carrier_errors +
3956 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3957}
3958
3959static void bnx2x_drv_stats_update(struct bnx2x *bp)
3960{
3961 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3962 int i;
3963
3964 estats->driver_xoff = 0;
3965 estats->rx_err_discard_pkt = 0;
3966 estats->rx_skb_alloc_failed = 0;
3967 estats->hw_csum_err = 0;
3968 for_each_queue(bp, i) {
3969 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3970
3971 estats->driver_xoff += qstats->driver_xoff;
3972 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3973 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3974 estats->hw_csum_err += qstats->hw_csum_err;
3975 }
a2fbb9ea
ET
3976}
3977
bb2a0f7a 3978static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3979{
bb2a0f7a 3980 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3981
bb2a0f7a
YG
3982 if (*stats_comp != DMAE_COMP_VAL)
3983 return;
3984
3985 if (bp->port.pmf)
de832a55 3986 bnx2x_hw_stats_update(bp);
a2fbb9ea 3987
de832a55
EG
3988 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3989 BNX2X_ERR("storm stats were not updated for 3 times\n");
3990 bnx2x_panic();
3991 return;
a2fbb9ea
ET
3992 }
3993
de832a55
EG
3994 bnx2x_net_stats_update(bp);
3995 bnx2x_drv_stats_update(bp);
3996
a2fbb9ea 3997 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3998 struct tstorm_per_client_stats *old_tclient =
3999 &bp->fp->old_tclient;
4000 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4001 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4002 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4003 int i;
a2fbb9ea
ET
4004
4005 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4006 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4007 " tx pkt (%lx)\n",
4008 bnx2x_tx_avail(bp->fp),
7a9b2557 4009 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4010 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4011 " rx pkt (%lx)\n",
7a9b2557
VZ
4012 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4013 bp->fp->rx_comp_cons),
4014 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4015 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4016 "brb truncate %u\n",
4017 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4018 qstats->driver_xoff,
4019 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4020 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4021 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4022 "mac_discard %u mac_filter_discard %u "
4023 "xxovrflow_discard %u brb_truncate_discard %u "
4024 "ttl0_discard %u\n",
4781bfad 4025 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4026 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4027 bnx2x_hilo(&qstats->no_buff_discard_hi),
4028 estats->mac_discard, estats->mac_filter_discard,
4029 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4030 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4031
4032 for_each_queue(bp, i) {
4033 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4034 bnx2x_fp(bp, i, tx_pkt),
4035 bnx2x_fp(bp, i, rx_pkt),
4036 bnx2x_fp(bp, i, rx_calls));
4037 }
4038 }
4039
bb2a0f7a
YG
4040 bnx2x_hw_stats_post(bp);
4041 bnx2x_storm_stats_post(bp);
4042}
a2fbb9ea 4043
bb2a0f7a
YG
4044static void bnx2x_port_stats_stop(struct bnx2x *bp)
4045{
4046 struct dmae_command *dmae;
4047 u32 opcode;
4048 int loader_idx = PMF_DMAE_C(bp);
4049 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4050
bb2a0f7a 4051 bp->executer_idx = 0;
a2fbb9ea 4052
bb2a0f7a
YG
4053 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4054 DMAE_CMD_C_ENABLE |
4055 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4056#ifdef __BIG_ENDIAN
bb2a0f7a 4057 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4058#else
bb2a0f7a 4059 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4060#endif
bb2a0f7a
YG
4061 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4062 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4063
4064 if (bp->port.port_stx) {
4065
4066 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4067 if (bp->func_stx)
4068 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4069 else
4070 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4071 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4072 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4073 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4074 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4075 dmae->len = sizeof(struct host_port_stats) >> 2;
4076 if (bp->func_stx) {
4077 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4078 dmae->comp_addr_hi = 0;
4079 dmae->comp_val = 1;
4080 } else {
4081 dmae->comp_addr_lo =
4082 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4083 dmae->comp_addr_hi =
4084 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4085 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4086
bb2a0f7a
YG
4087 *stats_comp = 0;
4088 }
a2fbb9ea
ET
4089 }
4090
bb2a0f7a
YG
4091 if (bp->func_stx) {
4092
4093 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4095 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4096 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4097 dmae->dst_addr_lo = bp->func_stx >> 2;
4098 dmae->dst_addr_hi = 0;
4099 dmae->len = sizeof(struct host_func_stats) >> 2;
4100 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4101 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4102 dmae->comp_val = DMAE_COMP_VAL;
4103
4104 *stats_comp = 0;
a2fbb9ea 4105 }
bb2a0f7a
YG
4106}
4107
4108static void bnx2x_stats_stop(struct bnx2x *bp)
4109{
4110 int update = 0;
4111
4112 bnx2x_stats_comp(bp);
4113
4114 if (bp->port.pmf)
4115 update = (bnx2x_hw_stats_update(bp) == 0);
4116
4117 update |= (bnx2x_storm_stats_update(bp) == 0);
4118
4119 if (update) {
4120 bnx2x_net_stats_update(bp);
a2fbb9ea 4121
bb2a0f7a
YG
4122 if (bp->port.pmf)
4123 bnx2x_port_stats_stop(bp);
4124
4125 bnx2x_hw_stats_post(bp);
4126 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4127 }
4128}
4129
bb2a0f7a
YG
4130static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4131{
4132}
4133
4134static const struct {
4135 void (*action)(struct bnx2x *bp);
4136 enum bnx2x_stats_state next_state;
4137} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4138/* state event */
4139{
4140/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4141/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4142/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4143/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4144},
4145{
4146/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4147/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4148/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4149/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4150}
4151};
4152
4153static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4154{
4155 enum bnx2x_stats_state state = bp->stats_state;
4156
4157 bnx2x_stats_stm[state][event].action(bp);
4158 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4159
4160 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4161 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4162 state, event, bp->stats_state);
4163}
4164
a2fbb9ea
ET
4165static void bnx2x_timer(unsigned long data)
4166{
4167 struct bnx2x *bp = (struct bnx2x *) data;
4168
4169 if (!netif_running(bp->dev))
4170 return;
4171
4172 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4173 goto timer_restart;
a2fbb9ea
ET
4174
4175 if (poll) {
4176 struct bnx2x_fastpath *fp = &bp->fp[0];
4177 int rc;
4178
7961f791 4179 bnx2x_tx_int(fp);
a2fbb9ea
ET
4180 rc = bnx2x_rx_int(fp, 1000);
4181 }
4182
34f80b04
EG
4183 if (!BP_NOMCP(bp)) {
4184 int func = BP_FUNC(bp);
a2fbb9ea
ET
4185 u32 drv_pulse;
4186 u32 mcp_pulse;
4187
4188 ++bp->fw_drv_pulse_wr_seq;
4189 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4190 /* TBD - add SYSTEM_TIME */
4191 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4192 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4193
34f80b04 4194 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4195 MCP_PULSE_SEQ_MASK);
4196 /* The delta between driver pulse and mcp response
4197 * should be 1 (before mcp response) or 0 (after mcp response)
4198 */
4199 if ((drv_pulse != mcp_pulse) &&
4200 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4201 /* someone lost a heartbeat... */
4202 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4203 drv_pulse, mcp_pulse);
4204 }
4205 }
4206
bb2a0f7a
YG
4207 if ((bp->state == BNX2X_STATE_OPEN) ||
4208 (bp->state == BNX2X_STATE_DISABLED))
4209 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4210
f1410647 4211timer_restart:
a2fbb9ea
ET
4212 mod_timer(&bp->timer, jiffies + bp->current_interval);
4213}
4214
4215/* end of Statistics */
4216
4217/* nic init */
4218
4219/*
4220 * nic init service functions
4221 */
4222
34f80b04 4223static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4224{
34f80b04
EG
4225 int port = BP_PORT(bp);
4226
490c3c9b 4227 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04 4228 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4229 sizeof(struct ustorm_status_block)/4);
490c3c9b 4230 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04 4231 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4232 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4233}
4234
5c862848
EG
4235static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4236 dma_addr_t mapping, int sb_id)
34f80b04
EG
4237{
4238 int port = BP_PORT(bp);
bb2a0f7a 4239 int func = BP_FUNC(bp);
a2fbb9ea 4240 int index;
34f80b04 4241 u64 section;
a2fbb9ea
ET
4242
4243 /* USTORM */
4244 section = ((u64)mapping) + offsetof(struct host_status_block,
4245 u_status_block);
34f80b04 4246 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4247
4248 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4249 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4250 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4251 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4252 U64_HI(section));
bb2a0f7a
YG
4253 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4254 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4255
4256 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4257 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4258 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4259
4260 /* CSTORM */
4261 section = ((u64)mapping) + offsetof(struct host_status_block,
4262 c_status_block);
34f80b04 4263 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4264
4265 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4266 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4267 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4268 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4269 U64_HI(section));
7a9b2557
VZ
4270 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4271 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4272
4273 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4274 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4275 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4276
4277 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4278}
4279
4280static void bnx2x_zero_def_sb(struct bnx2x *bp)
4281{
4282 int func = BP_FUNC(bp);
a2fbb9ea 4283
490c3c9b
EG
4284 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4285 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4286 sizeof(struct tstorm_def_status_block)/4);
4287 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04
EG
4288 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4289 sizeof(struct ustorm_def_status_block)/4);
490c3c9b 4290 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04
EG
4291 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4292 sizeof(struct cstorm_def_status_block)/4);
490c3c9b 4293 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
34f80b04
EG
4294 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4295 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4296}
4297
4298static void bnx2x_init_def_sb(struct bnx2x *bp,
4299 struct host_def_status_block *def_sb,
34f80b04 4300 dma_addr_t mapping, int sb_id)
a2fbb9ea 4301{
34f80b04
EG
4302 int port = BP_PORT(bp);
4303 int func = BP_FUNC(bp);
a2fbb9ea
ET
4304 int index, val, reg_offset;
4305 u64 section;
4306
4307 /* ATTN */
4308 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4309 atten_status_block);
34f80b04 4310 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4311
49d66772
ET
4312 bp->attn_state = 0;
4313
a2fbb9ea
ET
4314 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4315 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4316
34f80b04 4317 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4318 bp->attn_group[index].sig[0] = REG_RD(bp,
4319 reg_offset + 0x10*index);
4320 bp->attn_group[index].sig[1] = REG_RD(bp,
4321 reg_offset + 0x4 + 0x10*index);
4322 bp->attn_group[index].sig[2] = REG_RD(bp,
4323 reg_offset + 0x8 + 0x10*index);
4324 bp->attn_group[index].sig[3] = REG_RD(bp,
4325 reg_offset + 0xc + 0x10*index);
4326 }
4327
a2fbb9ea
ET
4328 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4329 HC_REG_ATTN_MSG0_ADDR_L);
4330
4331 REG_WR(bp, reg_offset, U64_LO(section));
4332 REG_WR(bp, reg_offset + 4, U64_HI(section));
4333
4334 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4335
4336 val = REG_RD(bp, reg_offset);
34f80b04 4337 val |= sb_id;
a2fbb9ea
ET
4338 REG_WR(bp, reg_offset, val);
4339
4340 /* USTORM */
4341 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4342 u_def_status_block);
34f80b04 4343 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4344
4345 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4346 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4347 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4348 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4349 U64_HI(section));
5c862848 4350 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4351 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4352
4353 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4354 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4355 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4356
4357 /* CSTORM */
4358 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4359 c_def_status_block);
34f80b04 4360 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4361
4362 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4363 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4364 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4365 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4366 U64_HI(section));
5c862848 4367 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4368 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4369
4370 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4371 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4372 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4373
4374 /* TSTORM */
4375 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4376 t_def_status_block);
34f80b04 4377 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4378
4379 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4380 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4381 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4382 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4383 U64_HI(section));
5c862848 4384 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4385 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4386
4387 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4388 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4389 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4390
4391 /* XSTORM */
4392 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4393 x_def_status_block);
34f80b04 4394 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4395
4396 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4397 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4398 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4399 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4400 U64_HI(section));
5c862848 4401 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4402 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4403
4404 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4405 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4406 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4407
bb2a0f7a 4408 bp->stats_pending = 0;
66e855f3 4409 bp->set_mac_pending = 0;
bb2a0f7a 4410
34f80b04 4411 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4412}
4413
4414static void bnx2x_update_coalesce(struct bnx2x *bp)
4415{
34f80b04 4416 int port = BP_PORT(bp);
a2fbb9ea
ET
4417 int i;
4418
4419 for_each_queue(bp, i) {
34f80b04 4420 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4421
4422 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4423 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4424 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4425 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4426 bp->rx_ticks/12);
a2fbb9ea 4427 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4428 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4429 U_SB_ETH_RX_CQ_INDEX),
4430 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4431
4432 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4433 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4434 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4435 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4436 bp->tx_ticks/12);
a2fbb9ea 4437 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4438 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4439 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4440 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4441 }
4442}
4443
7a9b2557
VZ
4444static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4445 struct bnx2x_fastpath *fp, int last)
4446{
4447 int i;
4448
4449 for (i = 0; i < last; i++) {
4450 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4451 struct sk_buff *skb = rx_buf->skb;
4452
4453 if (skb == NULL) {
4454 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4455 continue;
4456 }
4457
4458 if (fp->tpa_state[i] == BNX2X_TPA_START)
4459 pci_unmap_single(bp->pdev,
4460 pci_unmap_addr(rx_buf, mapping),
356e2385 4461 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4462
4463 dev_kfree_skb(skb);
4464 rx_buf->skb = NULL;
4465 }
4466}
4467
a2fbb9ea
ET
4468static void bnx2x_init_rx_rings(struct bnx2x *bp)
4469{
7a9b2557 4470 int func = BP_FUNC(bp);
32626230
EG
4471 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4472 ETH_MAX_AGGREGATION_QUEUES_E1H;
4473 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4474 int i, j;
a2fbb9ea 4475
87942b46 4476 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4477 DP(NETIF_MSG_IFUP,
4478 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4479
7a9b2557 4480 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4481
555f6c78 4482 for_each_rx_queue(bp, j) {
32626230 4483 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4484
32626230 4485 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4486 fp->tpa_pool[i].skb =
4487 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4488 if (!fp->tpa_pool[i].skb) {
4489 BNX2X_ERR("Failed to allocate TPA "
4490 "skb pool for queue[%d] - "
4491 "disabling TPA on this "
4492 "queue!\n", j);
4493 bnx2x_free_tpa_pool(bp, fp, i);
4494 fp->disable_tpa = 1;
4495 break;
4496 }
4497 pci_unmap_addr_set((struct sw_rx_bd *)
4498 &bp->fp->tpa_pool[i],
4499 mapping, 0);
4500 fp->tpa_state[i] = BNX2X_TPA_STOP;
4501 }
4502 }
4503 }
4504
555f6c78 4505 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4506 struct bnx2x_fastpath *fp = &bp->fp[j];
4507
4508 fp->rx_bd_cons = 0;
4509 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4510 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4511
4512 /* "next page" elements initialization */
4513 /* SGE ring */
4514 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4515 struct eth_rx_sge *sge;
4516
4517 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4518 sge->addr_hi =
4519 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4520 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4521 sge->addr_lo =
4522 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4523 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4524 }
4525
4526 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4527
7a9b2557 4528 /* RX BD ring */
a2fbb9ea
ET
4529 for (i = 1; i <= NUM_RX_RINGS; i++) {
4530 struct eth_rx_bd *rx_bd;
4531
4532 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4533 rx_bd->addr_hi =
4534 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4535 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4536 rx_bd->addr_lo =
4537 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4538 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4539 }
4540
34f80b04 4541 /* CQ ring */
a2fbb9ea
ET
4542 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4543 struct eth_rx_cqe_next_page *nextpg;
4544
4545 nextpg = (struct eth_rx_cqe_next_page *)
4546 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4547 nextpg->addr_hi =
4548 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4549 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4550 nextpg->addr_lo =
4551 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4552 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4553 }
4554
7a9b2557
VZ
4555 /* Allocate SGEs and initialize the ring elements */
4556 for (i = 0, ring_prod = 0;
4557 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4558
7a9b2557
VZ
4559 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4560 BNX2X_ERR("was only able to allocate "
4561 "%d rx sges\n", i);
4562 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4563 /* Cleanup already allocated elements */
4564 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4565 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4566 fp->disable_tpa = 1;
4567 ring_prod = 0;
4568 break;
4569 }
4570 ring_prod = NEXT_SGE_IDX(ring_prod);
4571 }
4572 fp->rx_sge_prod = ring_prod;
4573
4574 /* Allocate BDs and initialize BD ring */
66e855f3 4575 fp->rx_comp_cons = 0;
7a9b2557 4576 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4577 for (i = 0; i < bp->rx_ring_size; i++) {
4578 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4579 BNX2X_ERR("was only able to allocate "
de832a55
EG
4580 "%d rx skbs on queue[%d]\n", i, j);
4581 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4582 break;
4583 }
4584 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4585 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4586 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4587 }
4588
7a9b2557
VZ
4589 fp->rx_bd_prod = ring_prod;
4590 /* must not have more available CQEs than BDs */
4591 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4592 cqe_ring_prod);
a2fbb9ea
ET
4593 fp->rx_pkt = fp->rx_calls = 0;
4594
7a9b2557
VZ
4595 /* Warning!
4596 * this will generate an interrupt (to the TSTORM)
4597 * must only be done after chip is initialized
4598 */
4599 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4600 fp->rx_sge_prod);
a2fbb9ea
ET
4601 if (j != 0)
4602 continue;
4603
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4605 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4606 U64_LO(fp->rx_comp_mapping));
4607 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4608 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4609 U64_HI(fp->rx_comp_mapping));
4610 }
4611}
4612
4613static void bnx2x_init_tx_ring(struct bnx2x *bp)
4614{
4615 int i, j;
4616
555f6c78 4617 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4618 struct bnx2x_fastpath *fp = &bp->fp[j];
4619
4620 for (i = 1; i <= NUM_TX_RINGS; i++) {
4621 struct eth_tx_bd *tx_bd =
4622 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4623
4624 tx_bd->addr_hi =
4625 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4626 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4627 tx_bd->addr_lo =
4628 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4629 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4630 }
4631
4632 fp->tx_pkt_prod = 0;
4633 fp->tx_pkt_cons = 0;
4634 fp->tx_bd_prod = 0;
4635 fp->tx_bd_cons = 0;
4636 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4637 fp->tx_pkt = 0;
4638 }
4639}
4640
4641static void bnx2x_init_sp_ring(struct bnx2x *bp)
4642{
34f80b04 4643 int func = BP_FUNC(bp);
a2fbb9ea
ET
4644
4645 spin_lock_init(&bp->spq_lock);
4646
4647 bp->spq_left = MAX_SPQ_PENDING;
4648 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4649 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4650 bp->spq_prod_bd = bp->spq;
4651 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4652
34f80b04 4653 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4654 U64_LO(bp->spq_mapping));
34f80b04
EG
4655 REG_WR(bp,
4656 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4657 U64_HI(bp->spq_mapping));
4658
34f80b04 4659 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4660 bp->spq_prod_idx);
4661}
4662
4663static void bnx2x_init_context(struct bnx2x *bp)
4664{
4665 int i;
4666
4667 for_each_queue(bp, i) {
4668 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4669 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4670 u8 cl_id = fp->cl_id;
0626b899 4671 u8 sb_id = fp->sb_id;
a2fbb9ea 4672
34f80b04
EG
4673 context->ustorm_st_context.common.sb_index_numbers =
4674 BNX2X_RX_SB_INDEX_NUM;
0626b899 4675 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4676 context->ustorm_st_context.common.status_block_id = sb_id;
4677 context->ustorm_st_context.common.flags =
de832a55
EG
4678 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4679 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4680 context->ustorm_st_context.common.statistics_counter_id =
4681 cl_id;
8d9c5f34 4682 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4683 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4684 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4685 bp->rx_buf_size;
34f80b04 4686 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4687 U64_HI(fp->rx_desc_mapping);
34f80b04 4688 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4689 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4690 if (!fp->disable_tpa) {
4691 context->ustorm_st_context.common.flags |=
4692 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4693 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4694 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4695 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4696 (u32)0xffff);
7a9b2557
VZ
4697 context->ustorm_st_context.common.sge_page_base_hi =
4698 U64_HI(fp->rx_sge_mapping);
4699 context->ustorm_st_context.common.sge_page_base_lo =
4700 U64_LO(fp->rx_sge_mapping);
4701 }
4702
8d9c5f34
EG
4703 context->ustorm_ag_context.cdu_usage =
4704 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4705 CDU_REGION_NUMBER_UCM_AG,
4706 ETH_CONNECTION_TYPE);
4707
4708 context->xstorm_st_context.tx_bd_page_base_hi =
4709 U64_HI(fp->tx_desc_mapping);
4710 context->xstorm_st_context.tx_bd_page_base_lo =
4711 U64_LO(fp->tx_desc_mapping);
4712 context->xstorm_st_context.db_data_addr_hi =
4713 U64_HI(fp->tx_prods_mapping);
4714 context->xstorm_st_context.db_data_addr_lo =
4715 U64_LO(fp->tx_prods_mapping);
0626b899 4716 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4717 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4718 context->cstorm_st_context.sb_index_number =
5c862848 4719 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4720 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4721
4722 context->xstorm_ag_context.cdu_reserved =
4723 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4724 CDU_REGION_NUMBER_XCM_AG,
4725 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4726 }
4727}
4728
4729static void bnx2x_init_ind_table(struct bnx2x *bp)
4730{
26c8fa4d 4731 int func = BP_FUNC(bp);
a2fbb9ea
ET
4732 int i;
4733
555f6c78 4734 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4735 return;
4736
555f6c78
EG
4737 DP(NETIF_MSG_IFUP,
4738 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4739 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4740 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4741 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4742 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4743}
4744
49d66772
ET
4745static void bnx2x_set_client_config(struct bnx2x *bp)
4746{
49d66772 4747 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4748 int port = BP_PORT(bp);
4749 int i;
49d66772 4750
e7799c5f 4751 tstorm_client.mtu = bp->dev->mtu;
49d66772 4752 tstorm_client.config_flags =
de832a55
EG
4753 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4754 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4755#ifdef BCM_VLAN
0c6671b0 4756 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4757 tstorm_client.config_flags |=
8d9c5f34 4758 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4759 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4760 }
4761#endif
49d66772 4762
7a9b2557
VZ
4763 if (bp->flags & TPA_ENABLE_FLAG) {
4764 tstorm_client.max_sges_for_packet =
4f40f2cb 4765 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4766 tstorm_client.max_sges_for_packet =
4767 ((tstorm_client.max_sges_for_packet +
4768 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4769 PAGES_PER_SGE_SHIFT;
4770
4771 tstorm_client.config_flags |=
4772 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4773 }
4774
49d66772 4775 for_each_queue(bp, i) {
de832a55
EG
4776 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4777
49d66772 4778 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4779 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4780 ((u32 *)&tstorm_client)[0]);
4781 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4782 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4783 ((u32 *)&tstorm_client)[1]);
4784 }
4785
34f80b04
EG
4786 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4787 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4788}
4789
a2fbb9ea
ET
4790static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4791{
a2fbb9ea 4792 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4793 int mode = bp->rx_mode;
4794 int mask = (1 << BP_L_ID(bp));
4795 int func = BP_FUNC(bp);
a2fbb9ea
ET
4796 int i;
4797
3196a88a 4798 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4799
4800 switch (mode) {
4801 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4802 tstorm_mac_filter.ucast_drop_all = mask;
4803 tstorm_mac_filter.mcast_drop_all = mask;
4804 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4805 break;
356e2385 4806
a2fbb9ea 4807 case BNX2X_RX_MODE_NORMAL:
34f80b04 4808 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4809 break;
356e2385 4810
a2fbb9ea 4811 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4812 tstorm_mac_filter.mcast_accept_all = mask;
4813 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4814 break;
356e2385 4815
a2fbb9ea 4816 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4817 tstorm_mac_filter.ucast_accept_all = mask;
4818 tstorm_mac_filter.mcast_accept_all = mask;
4819 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4820 break;
356e2385 4821
a2fbb9ea 4822 default:
34f80b04
EG
4823 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4824 break;
a2fbb9ea
ET
4825 }
4826
4827 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4828 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4829 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4830 ((u32 *)&tstorm_mac_filter)[i]);
4831
34f80b04 4832/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4833 ((u32 *)&tstorm_mac_filter)[i]); */
4834 }
a2fbb9ea 4835
49d66772
ET
4836 if (mode != BNX2X_RX_MODE_NONE)
4837 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4838}
4839
471de716
EG
4840static void bnx2x_init_internal_common(struct bnx2x *bp)
4841{
4842 int i;
4843
3cdf1db7
YG
4844 if (bp->flags & TPA_ENABLE_FLAG) {
4845 struct tstorm_eth_tpa_exist tpa = {0};
4846
4847 tpa.tpa_exist = 1;
4848
4849 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4850 ((u32 *)&tpa)[0]);
4851 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4852 ((u32 *)&tpa)[1]);
4853 }
4854
471de716
EG
4855 /* Zero this manually as its initialization is
4856 currently missing in the initTool */
4857 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4858 REG_WR(bp, BAR_USTRORM_INTMEM +
4859 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4860}
4861
4862static void bnx2x_init_internal_port(struct bnx2x *bp)
4863{
4864 int port = BP_PORT(bp);
4865
4866 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4870}
4871
8a1c38d1
EG
4872/* Calculates the sum of vn_min_rates.
4873 It's needed for further normalizing of the min_rates.
4874 Returns:
4875 sum of vn_min_rates.
4876 or
4877 0 - if all the min_rates are 0.
4878 In the later case fainess algorithm should be deactivated.
4879 If not all min_rates are zero then those that are zeroes will be set to 1.
4880 */
4881static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4882{
4883 int all_zero = 1;
4884 int port = BP_PORT(bp);
4885 int vn;
4886
4887 bp->vn_weight_sum = 0;
4888 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4889 int func = 2*vn + port;
4890 u32 vn_cfg =
4891 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4892 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4893 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4894
4895 /* Skip hidden vns */
4896 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4897 continue;
4898
4899 /* If min rate is zero - set it to 1 */
4900 if (!vn_min_rate)
4901 vn_min_rate = DEF_MIN_RATE;
4902 else
4903 all_zero = 0;
4904
4905 bp->vn_weight_sum += vn_min_rate;
4906 }
4907
4908 /* ... only if all min rates are zeros - disable fairness */
4909 if (all_zero)
4910 bp->vn_weight_sum = 0;
4911}
4912
471de716 4913static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4914{
a2fbb9ea
ET
4915 struct tstorm_eth_function_common_config tstorm_config = {0};
4916 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4917 int port = BP_PORT(bp);
4918 int func = BP_FUNC(bp);
de832a55
EG
4919 int i, j;
4920 u32 offset;
471de716 4921 u16 max_agg_size;
a2fbb9ea
ET
4922
4923 if (is_multi(bp)) {
555f6c78 4924 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4925 tstorm_config.rss_result_mask = MULTI_MASK;
4926 }
8d9c5f34
EG
4927 if (IS_E1HMF(bp))
4928 tstorm_config.config_flags |=
4929 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4930
34f80b04
EG
4931 tstorm_config.leading_client_id = BP_L_ID(bp);
4932
a2fbb9ea 4933 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4934 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4935 (*(u32 *)&tstorm_config));
4936
c14423fe 4937 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4938 bnx2x_set_storm_rx_mode(bp);
4939
de832a55
EG
4940 for_each_queue(bp, i) {
4941 u8 cl_id = bp->fp[i].cl_id;
4942
4943 /* reset xstorm per client statistics */
4944 offset = BAR_XSTRORM_INTMEM +
4945 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4946 for (j = 0;
4947 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4948 REG_WR(bp, offset + j*4, 0);
4949
4950 /* reset tstorm per client statistics */
4951 offset = BAR_TSTRORM_INTMEM +
4952 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953 for (j = 0;
4954 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
4956
4957 /* reset ustorm per client statistics */
4958 offset = BAR_USTRORM_INTMEM +
4959 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4960 for (j = 0;
4961 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4962 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4963 }
4964
4965 /* Init statistics related context */
34f80b04 4966 stats_flags.collect_eth = 1;
a2fbb9ea 4967
66e855f3 4968 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4969 ((u32 *)&stats_flags)[0]);
66e855f3 4970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4971 ((u32 *)&stats_flags)[1]);
4972
66e855f3 4973 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4974 ((u32 *)&stats_flags)[0]);
66e855f3 4975 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4976 ((u32 *)&stats_flags)[1]);
4977
de832a55
EG
4978 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4979 ((u32 *)&stats_flags)[0]);
4980 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4981 ((u32 *)&stats_flags)[1]);
4982
66e855f3 4983 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4984 ((u32 *)&stats_flags)[0]);
66e855f3 4985 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4986 ((u32 *)&stats_flags)[1]);
4987
66e855f3
YG
4988 REG_WR(bp, BAR_XSTRORM_INTMEM +
4989 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991 REG_WR(bp, BAR_XSTRORM_INTMEM +
4992 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4994
4995 REG_WR(bp, BAR_TSTRORM_INTMEM +
4996 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_TSTRORM_INTMEM +
4999 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5001
de832a55
EG
5002 REG_WR(bp, BAR_USTRORM_INTMEM +
5003 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005 REG_WR(bp, BAR_USTRORM_INTMEM +
5006 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5008
34f80b04
EG
5009 if (CHIP_IS_E1H(bp)) {
5010 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5011 IS_E1HMF(bp));
5012 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5013 IS_E1HMF(bp));
5014 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5015 IS_E1HMF(bp));
5016 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5017 IS_E1HMF(bp));
5018
7a9b2557
VZ
5019 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5020 bp->e1hov);
34f80b04
EG
5021 }
5022
4f40f2cb
EG
5023 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5024 max_agg_size =
5025 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5026 SGE_PAGE_SIZE * PAGES_PER_SGE),
5027 (u32)0xffff);
555f6c78 5028 for_each_rx_queue(bp, i) {
7a9b2557 5029 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5030
5031 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5032 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5033 U64_LO(fp->rx_comp_mapping));
5034 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5035 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5036 U64_HI(fp->rx_comp_mapping));
5037
7a9b2557 5038 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5039 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5040 max_agg_size);
5041 }
8a1c38d1 5042
1c06328c
EG
5043 /* dropless flow control */
5044 if (CHIP_IS_E1H(bp)) {
5045 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5046
5047 rx_pause.bd_thr_low = 250;
5048 rx_pause.cqe_thr_low = 250;
5049 rx_pause.cos = 1;
5050 rx_pause.sge_thr_low = 0;
5051 rx_pause.bd_thr_high = 350;
5052 rx_pause.cqe_thr_high = 350;
5053 rx_pause.sge_thr_high = 0;
5054
5055 for_each_rx_queue(bp, i) {
5056 struct bnx2x_fastpath *fp = &bp->fp[i];
5057
5058 if (!fp->disable_tpa) {
5059 rx_pause.sge_thr_low = 150;
5060 rx_pause.sge_thr_high = 250;
5061 }
5062
5063
5064 offset = BAR_USTRORM_INTMEM +
5065 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5066 fp->cl_id);
5067 for (j = 0;
5068 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5069 j++)
5070 REG_WR(bp, offset + j*4,
5071 ((u32 *)&rx_pause)[j]);
5072 }
5073 }
5074
8a1c38d1
EG
5075 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5076
5077 /* Init rate shaping and fairness contexts */
5078 if (IS_E1HMF(bp)) {
5079 int vn;
5080
5081 /* During init there is no active link
5082 Until link is up, set link rate to 10Gbps */
5083 bp->link_vars.line_speed = SPEED_10000;
5084 bnx2x_init_port_minmax(bp);
5085
5086 bnx2x_calc_vn_weight_sum(bp);
5087
5088 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5089 bnx2x_init_vn_minmax(bp, 2*vn + port);
5090
5091 /* Enable rate shaping and fairness */
5092 bp->cmng.flags.cmng_enables =
5093 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5094 if (bp->vn_weight_sum)
5095 bp->cmng.flags.cmng_enables |=
5096 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5097 else
5098 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5099 " fairness will be disabled\n");
5100 } else {
5101 /* rate shaping and fairness are disabled */
5102 DP(NETIF_MSG_IFUP,
5103 "single function mode minmax will be disabled\n");
5104 }
5105
5106
5107 /* Store it to internal memory */
5108 if (bp->port.pmf)
5109 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5110 REG_WR(bp, BAR_XSTRORM_INTMEM +
5111 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5112 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5113}
5114
471de716
EG
5115static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5116{
5117 switch (load_code) {
5118 case FW_MSG_CODE_DRV_LOAD_COMMON:
5119 bnx2x_init_internal_common(bp);
5120 /* no break */
5121
5122 case FW_MSG_CODE_DRV_LOAD_PORT:
5123 bnx2x_init_internal_port(bp);
5124 /* no break */
5125
5126 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5127 bnx2x_init_internal_func(bp);
5128 break;
5129
5130 default:
5131 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5132 break;
5133 }
5134}
5135
5136static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5137{
5138 int i;
5139
5140 for_each_queue(bp, i) {
5141 struct bnx2x_fastpath *fp = &bp->fp[i];
5142
34f80b04 5143 fp->bp = bp;
a2fbb9ea 5144 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5145 fp->index = i;
34f80b04
EG
5146 fp->cl_id = BP_L_ID(bp) + i;
5147 fp->sb_id = fp->cl_id;
5148 DP(NETIF_MSG_IFUP,
f5372251
EG
5149 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5150 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5151 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5152 fp->sb_id);
5c862848 5153 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5154 }
5155
16119785
EG
5156 /* ensure status block indices were read */
5157 rmb();
5158
5159
5c862848
EG
5160 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5161 DEF_SB_ID);
5162 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5163 bnx2x_update_coalesce(bp);
5164 bnx2x_init_rx_rings(bp);
5165 bnx2x_init_tx_ring(bp);
5166 bnx2x_init_sp_ring(bp);
5167 bnx2x_init_context(bp);
471de716 5168 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5169 bnx2x_init_ind_table(bp);
0ef00459
EG
5170 bnx2x_stats_init(bp);
5171
5172 /* At this point, we are ready for interrupts */
5173 atomic_set(&bp->intr_sem, 0);
5174
5175 /* flush all before enabling interrupts */
5176 mb();
5177 mmiowb();
5178
615f8fd9 5179 bnx2x_int_enable(bp);
a2fbb9ea
ET
5180}
5181
5182/* end of nic init */
5183
5184/*
5185 * gzip service functions
5186 */
5187
5188static int bnx2x_gunzip_init(struct bnx2x *bp)
5189{
5190 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5191 &bp->gunzip_mapping);
5192 if (bp->gunzip_buf == NULL)
5193 goto gunzip_nomem1;
5194
5195 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5196 if (bp->strm == NULL)
5197 goto gunzip_nomem2;
5198
5199 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5200 GFP_KERNEL);
5201 if (bp->strm->workspace == NULL)
5202 goto gunzip_nomem3;
5203
5204 return 0;
5205
5206gunzip_nomem3:
5207 kfree(bp->strm);
5208 bp->strm = NULL;
5209
5210gunzip_nomem2:
5211 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5212 bp->gunzip_mapping);
5213 bp->gunzip_buf = NULL;
5214
5215gunzip_nomem1:
5216 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5217 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5218 return -ENOMEM;
5219}
5220
5221static void bnx2x_gunzip_end(struct bnx2x *bp)
5222{
5223 kfree(bp->strm->workspace);
5224
5225 kfree(bp->strm);
5226 bp->strm = NULL;
5227
5228 if (bp->gunzip_buf) {
5229 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5230 bp->gunzip_mapping);
5231 bp->gunzip_buf = NULL;
5232 }
5233}
5234
5235static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5236{
5237 int n, rc;
5238
5239 /* check gzip header */
5240 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5241 return -EINVAL;
5242
5243 n = 10;
5244
34f80b04 5245#define FNAME 0x8
a2fbb9ea
ET
5246
5247 if (zbuf[3] & FNAME)
5248 while ((zbuf[n++] != 0) && (n < len));
5249
5250 bp->strm->next_in = zbuf + n;
5251 bp->strm->avail_in = len - n;
5252 bp->strm->next_out = bp->gunzip_buf;
5253 bp->strm->avail_out = FW_BUF_SIZE;
5254
5255 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5256 if (rc != Z_OK)
5257 return rc;
5258
5259 rc = zlib_inflate(bp->strm, Z_FINISH);
5260 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5261 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5262 bp->dev->name, bp->strm->msg);
5263
5264 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5265 if (bp->gunzip_outlen & 0x3)
5266 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5267 " gunzip_outlen (%d) not aligned\n",
5268 bp->dev->name, bp->gunzip_outlen);
5269 bp->gunzip_outlen >>= 2;
5270
5271 zlib_inflateEnd(bp->strm);
5272
5273 if (rc == Z_STREAM_END)
5274 return 0;
5275
5276 return rc;
5277}
5278
5279/* nic load/unload */
5280
5281/*
34f80b04 5282 * General service functions
a2fbb9ea
ET
5283 */
5284
5285/* send a NIG loopback debug packet */
5286static void bnx2x_lb_pckt(struct bnx2x *bp)
5287{
a2fbb9ea 5288 u32 wb_write[3];
a2fbb9ea
ET
5289
5290 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5291 wb_write[0] = 0x55555555;
5292 wb_write[1] = 0x55555555;
34f80b04 5293 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5294 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5295
5296 /* NON-IP protocol */
a2fbb9ea
ET
5297 wb_write[0] = 0x09000000;
5298 wb_write[1] = 0x55555555;
34f80b04 5299 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5300 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5301}
5302
5303/* some of the internal memories
5304 * are not directly readable from the driver
5305 * to test them we send debug packets
5306 */
5307static int bnx2x_int_mem_test(struct bnx2x *bp)
5308{
5309 int factor;
5310 int count, i;
5311 u32 val = 0;
5312
ad8d3948 5313 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5314 factor = 120;
ad8d3948
EG
5315 else if (CHIP_REV_IS_EMUL(bp))
5316 factor = 200;
5317 else
a2fbb9ea 5318 factor = 1;
a2fbb9ea
ET
5319
5320 DP(NETIF_MSG_HW, "start part1\n");
5321
5322 /* Disable inputs of parser neighbor blocks */
5323 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5324 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5325 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5326 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5327
5328 /* Write 0 to parser credits for CFC search request */
5329 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5330
5331 /* send Ethernet packet */
5332 bnx2x_lb_pckt(bp);
5333
5334 /* TODO do i reset NIG statistic? */
5335 /* Wait until NIG register shows 1 packet of size 0x10 */
5336 count = 1000 * factor;
5337 while (count) {
34f80b04 5338
a2fbb9ea
ET
5339 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5340 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5341 if (val == 0x10)
5342 break;
5343
5344 msleep(10);
5345 count--;
5346 }
5347 if (val != 0x10) {
5348 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5349 return -1;
5350 }
5351
5352 /* Wait until PRS register shows 1 packet */
5353 count = 1000 * factor;
5354 while (count) {
5355 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5356 if (val == 1)
5357 break;
5358
5359 msleep(10);
5360 count--;
5361 }
5362 if (val != 0x1) {
5363 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5364 return -2;
5365 }
5366
5367 /* Reset and init BRB, PRS */
34f80b04 5368 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5369 msleep(50);
34f80b04 5370 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5371 msleep(50);
5372 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5373 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5374
5375 DP(NETIF_MSG_HW, "part2\n");
5376
5377 /* Disable inputs of parser neighbor blocks */
5378 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5379 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5380 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5381 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5382
5383 /* Write 0 to parser credits for CFC search request */
5384 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5385
5386 /* send 10 Ethernet packets */
5387 for (i = 0; i < 10; i++)
5388 bnx2x_lb_pckt(bp);
5389
5390 /* Wait until NIG register shows 10 + 1
5391 packets of size 11*0x10 = 0xb0 */
5392 count = 1000 * factor;
5393 while (count) {
34f80b04 5394
a2fbb9ea
ET
5395 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5396 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5397 if (val == 0xb0)
5398 break;
5399
5400 msleep(10);
5401 count--;
5402 }
5403 if (val != 0xb0) {
5404 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5405 return -3;
5406 }
5407
5408 /* Wait until PRS register shows 2 packets */
5409 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5410 if (val != 2)
5411 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5412
5413 /* Write 1 to parser credits for CFC search request */
5414 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5415
5416 /* Wait until PRS register shows 3 packets */
5417 msleep(10 * factor);
5418 /* Wait until NIG register shows 1 packet of size 0x10 */
5419 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5420 if (val != 3)
5421 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5422
5423 /* clear NIG EOP FIFO */
5424 for (i = 0; i < 11; i++)
5425 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5426 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5427 if (val != 1) {
5428 BNX2X_ERR("clear of NIG failed\n");
5429 return -4;
5430 }
5431
5432 /* Reset and init BRB, PRS, NIG */
5433 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5434 msleep(50);
5435 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5436 msleep(50);
5437 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5438 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5439#ifndef BCM_ISCSI
5440 /* set NIC mode */
5441 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5442#endif
5443
5444 /* Enable inputs of parser neighbor blocks */
5445 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5446 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5447 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5448 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5449
5450 DP(NETIF_MSG_HW, "done\n");
5451
5452 return 0; /* OK */
5453}
5454
5455static void enable_blocks_attention(struct bnx2x *bp)
5456{
5457 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5458 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5459 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5460 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5461 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5462 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5463 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5464 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5465 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5466/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5467/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5468 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5469 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5470 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5471/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5472/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5473 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5474 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5475 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5476 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5477/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5478/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5479 if (CHIP_REV_IS_FPGA(bp))
5480 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5481 else
5482 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5483 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5484 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5485 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5486/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5487/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5488 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5489 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5490/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5491 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5492}
5493
34f80b04 5494
81f75bbf
EG
5495static void bnx2x_reset_common(struct bnx2x *bp)
5496{
5497 /* reset_common */
5498 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5499 0xd3ffff7f);
5500 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5501}
5502
34f80b04 5503static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5504{
a2fbb9ea 5505 u32 val, i;
a2fbb9ea 5506
34f80b04 5507 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5508
81f75bbf 5509 bnx2x_reset_common(bp);
34f80b04
EG
5510 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5511 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5512
34f80b04
EG
5513 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5514 if (CHIP_IS_E1H(bp))
5515 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5516
34f80b04
EG
5517 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5518 msleep(30);
5519 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5520
34f80b04
EG
5521 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5522 if (CHIP_IS_E1(bp)) {
5523 /* enable HW interrupt from PXP on USDM overflow
5524 bit 16 on INT_MASK_0 */
5525 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5526 }
a2fbb9ea 5527
34f80b04
EG
5528 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5529 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5530
5531#ifdef __BIG_ENDIAN
34f80b04
EG
5532 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5533 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5534 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5535 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5536 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5537 /* make sure this value is 0 */
5538 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5539
5540/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5541 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5542 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5543 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5544 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5545#endif
5546
34f80b04 5547 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5548#ifdef BCM_ISCSI
34f80b04
EG
5549 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5550 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5551 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5552#endif
5553
34f80b04
EG
5554 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5555 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5556
34f80b04
EG
5557 /* let the HW do it's magic ... */
5558 msleep(100);
5559 /* finish PXP init */
5560 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5561 if (val != 1) {
5562 BNX2X_ERR("PXP2 CFG failed\n");
5563 return -EBUSY;
5564 }
5565 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5566 if (val != 1) {
5567 BNX2X_ERR("PXP2 RD_INIT failed\n");
5568 return -EBUSY;
5569 }
a2fbb9ea 5570
34f80b04
EG
5571 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5572 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5573
34f80b04 5574 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5575
34f80b04
EG
5576 /* clean the DMAE memory */
5577 bp->dmae_ready = 1;
5578 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5579
34f80b04
EG
5580 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5581 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5582 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5583 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5584
34f80b04
EG
5585 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5586 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5587 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5588 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5589
5590 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5591 /* soft reset pulse */
5592 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5593 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5594
5595#ifdef BCM_ISCSI
34f80b04 5596 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5597#endif
a2fbb9ea 5598
34f80b04
EG
5599 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5600 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5601 if (!CHIP_REV_IS_SLOW(bp)) {
5602 /* enable hw interrupt from doorbell Q */
5603 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5604 }
a2fbb9ea 5605
34f80b04 5606 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5607 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5608 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5609 /* set NIC mode */
5610 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5611 if (CHIP_IS_E1H(bp))
5612 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5613
34f80b04
EG
5614 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5615 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5616 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5617 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5618
490c3c9b
EG
5619 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5620 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5621 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5622 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5623
34f80b04
EG
5624 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5625 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5626 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5627 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5628
34f80b04
EG
5629 /* sync semi rtc */
5630 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5631 0x80000000);
5632 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5633 0x80000000);
a2fbb9ea 5634
34f80b04
EG
5635 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5636 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5637 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5638
34f80b04
EG
5639 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5640 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5641 REG_WR(bp, i, 0xc0cac01a);
5642 /* TODO: replace with something meaningful */
5643 }
8d9c5f34 5644 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5645 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5646
34f80b04
EG
5647 if (sizeof(union cdu_context) != 1024)
5648 /* we currently assume that a context is 1024 bytes */
5649 printk(KERN_ALERT PFX "please adjust the size of"
5650 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5651
34f80b04
EG
5652 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5653 val = (4 << 24) + (0 << 12) + 1024;
5654 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5655 if (CHIP_IS_E1(bp)) {
5656 /* !!! fix pxp client crdit until excel update */
5657 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5658 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5659 }
a2fbb9ea 5660
34f80b04
EG
5661 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5662 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5663 /* enable context validation interrupt from CFC */
5664 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5665
5666 /* set the thresholds to prevent CFC/CDU race */
5667 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5668
34f80b04
EG
5669 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5670 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5671
34f80b04
EG
5672 /* PXPCS COMMON comes here */
5673 /* Reset PCIE errors for debug */
5674 REG_WR(bp, 0x2814, 0xffffffff);
5675 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5676
34f80b04
EG
5677 /* EMAC0 COMMON comes here */
5678 /* EMAC1 COMMON comes here */
5679 /* DBU COMMON comes here */
5680 /* DBG COMMON comes here */
5681
5682 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5683 if (CHIP_IS_E1H(bp)) {
5684 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5685 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5686 }
5687
5688 if (CHIP_REV_IS_SLOW(bp))
5689 msleep(200);
5690
5691 /* finish CFC init */
5692 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5693 if (val != 1) {
5694 BNX2X_ERR("CFC LL_INIT failed\n");
5695 return -EBUSY;
5696 }
5697 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5698 if (val != 1) {
5699 BNX2X_ERR("CFC AC_INIT failed\n");
5700 return -EBUSY;
5701 }
5702 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5703 if (val != 1) {
5704 BNX2X_ERR("CFC CAM_INIT failed\n");
5705 return -EBUSY;
5706 }
5707 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5708
34f80b04
EG
5709 /* read NIG statistic
5710 to see if this is our first up since powerup */
5711 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5712 val = *bnx2x_sp(bp, wb_data[0]);
5713
5714 /* do internal memory self test */
5715 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5716 BNX2X_ERR("internal mem self test failed\n");
5717 return -EBUSY;
5718 }
5719
35b19ba5 5720 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5723 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5724 bp->port.need_hw_lock = 1;
5725 break;
5726
35b19ba5 5727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5728 /* Fan failure is indicated by SPIO 5 */
5729 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5730 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5731
5732 /* set to active low mode */
5733 val = REG_RD(bp, MISC_REG_SPIO_INT);
5734 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5735 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5736 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5737
34f80b04
EG
5738 /* enable interrupt to signal the IGU */
5739 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5740 val |= (1 << MISC_REGISTERS_SPIO_5);
5741 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5742 break;
f1410647 5743
34f80b04
EG
5744 default:
5745 break;
5746 }
f1410647 5747
34f80b04
EG
5748 /* clear PXP2 attentions */
5749 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5750
34f80b04 5751 enable_blocks_attention(bp);
a2fbb9ea 5752
6bbca910
YR
5753 if (!BP_NOMCP(bp)) {
5754 bnx2x_acquire_phy_lock(bp);
5755 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5756 bnx2x_release_phy_lock(bp);
5757 } else
5758 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5759
34f80b04
EG
5760 return 0;
5761}
a2fbb9ea 5762
34f80b04
EG
5763static int bnx2x_init_port(struct bnx2x *bp)
5764{
5765 int port = BP_PORT(bp);
1c06328c 5766 u32 low, high;
34f80b04 5767 u32 val;
a2fbb9ea 5768
34f80b04
EG
5769 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5770
5771 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5772
5773 /* Port PXP comes here */
5774 /* Port PXP2 comes here */
a2fbb9ea
ET
5775#ifdef BCM_ISCSI
5776 /* Port0 1
5777 * Port1 385 */
5778 i++;
5779 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5780 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5781 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5782 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5783
5784 /* Port0 2
5785 * Port1 386 */
5786 i++;
5787 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5788 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5789 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5790 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5791
5792 /* Port0 3
5793 * Port1 387 */
5794 i++;
5795 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5796 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5797 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5798 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5799#endif
34f80b04 5800 /* Port CMs come here */
8d9c5f34
EG
5801 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5802 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5803
5804 /* Port QM comes here */
a2fbb9ea
ET
5805#ifdef BCM_ISCSI
5806 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5807 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5808
5809 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5810 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5811#endif
5812 /* Port DQ comes here */
1c06328c
EG
5813
5814 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5815 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5816 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5817 /* no pause for emulation and FPGA */
5818 low = 0;
5819 high = 513;
5820 } else {
5821 if (IS_E1HMF(bp))
5822 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5823 else if (bp->dev->mtu > 4096) {
5824 if (bp->flags & ONE_PORT_FLAG)
5825 low = 160;
5826 else {
5827 val = bp->dev->mtu;
5828 /* (24*1024 + val*4)/256 */
5829 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5830 }
5831 } else
5832 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5833 high = low + 56; /* 14*1024/256 */
5834 }
5835 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5836 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5837
5838
ad8d3948 5839 /* Port PRS comes here */
a2fbb9ea
ET
5840 /* Port TSDM comes here */
5841 /* Port CSDM comes here */
5842 /* Port USDM comes here */
5843 /* Port XSDM comes here */
356e2385 5844
34f80b04
EG
5845 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5846 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5847 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5848 port ? USEM_PORT1_END : USEM_PORT0_END);
5849 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5850 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5851 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5852 port ? XSEM_PORT1_END : XSEM_PORT0_END);
356e2385 5853
a2fbb9ea 5854 /* Port UPB comes here */
34f80b04
EG
5855 /* Port XPB comes here */
5856
5857 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5858 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5859
5860 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5861 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5862
5863 /* update threshold */
34f80b04 5864 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5865 /* update init credit */
34f80b04 5866 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5867
5868 /* probe changes */
34f80b04 5869 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5870 msleep(5);
34f80b04 5871 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5872
5873#ifdef BCM_ISCSI
5874 /* tell the searcher where the T2 table is */
5875 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5876
5877 wb_write[0] = U64_LO(bp->t2_mapping);
5878 wb_write[1] = U64_HI(bp->t2_mapping);
5879 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5880 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5881 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5882 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5883
5884 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5885 /* Port SRCH comes here */
5886#endif
5887 /* Port CDU comes here */
5888 /* Port CFC comes here */
34f80b04
EG
5889
5890 if (CHIP_IS_E1(bp)) {
5891 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5892 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5893 }
5894 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5895 port ? HC_PORT1_END : HC_PORT0_END);
5896
5897 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5898 MISC_AEU_PORT0_START,
34f80b04
EG
5899 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5900 /* init aeu_mask_attn_func_0/1:
5901 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5902 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5903 * bits 4-7 are used for "per vn group attention" */
5904 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5905 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5906
a2fbb9ea
ET
5907 /* Port PXPCS comes here */
5908 /* Port EMAC0 comes here */
5909 /* Port EMAC1 comes here */
5910 /* Port DBU comes here */
5911 /* Port DBG comes here */
356e2385 5912
34f80b04
EG
5913 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5914 port ? NIG_PORT1_END : NIG_PORT0_END);
5915
5916 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5917
5918 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5919 /* 0x2 disable e1hov, 0x1 enable */
5920 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5921 (IS_E1HMF(bp) ? 0x1 : 0x2));
5922
1c06328c
EG
5923 /* support pause requests from USDM, TSDM and BRB */
5924 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5925
5926 {
5927 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5928 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5929 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5930 }
34f80b04
EG
5931 }
5932
a2fbb9ea
ET
5933 /* Port MCP comes here */
5934 /* Port DMAE comes here */
5935
35b19ba5 5936 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5937 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5938 {
5939 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5940
5941 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5942 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5943
5944 /* The GPIO should be swapped if the swap register is
5945 set and active */
5946 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5947 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5948
5949 /* Select function upon port-swap configuration */
5950 if (port == 0) {
5951 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5952 aeu_gpio_mask = (swap_val && swap_override) ?
5953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5954 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5955 } else {
5956 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5957 aeu_gpio_mask = (swap_val && swap_override) ?
5958 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5959 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5960 }
5961 val = REG_RD(bp, offset);
5962 /* add GPIO3 to group */
5963 val |= aeu_gpio_mask;
5964 REG_WR(bp, offset, val);
5965 }
5966 break;
5967
35b19ba5 5968 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5969 /* add SPIO 5 to group 0 */
5970 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5971 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5972 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5973 break;
5974
5975 default:
5976 break;
5977 }
5978
c18487ee 5979 bnx2x__link_reset(bp);
a2fbb9ea 5980
34f80b04
EG
5981 return 0;
5982}
5983
5984#define ILT_PER_FUNC (768/2)
5985#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5986/* the phys address is shifted right 12 bits and has an added
5987 1=valid bit added to the 53rd bit
5988 then since this is a wide register(TM)
5989 we split it into two 32 bit writes
5990 */
5991#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5992#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5993#define PXP_ONE_ILT(x) (((x) << 10) | x)
5994#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5995
5996#define CNIC_ILT_LINES 0
5997
5998static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5999{
6000 int reg;
6001
6002 if (CHIP_IS_E1H(bp))
6003 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6004 else /* E1 */
6005 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6006
6007 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6008}
6009
6010static int bnx2x_init_func(struct bnx2x *bp)
6011{
6012 int port = BP_PORT(bp);
6013 int func = BP_FUNC(bp);
8badd27a 6014 u32 addr, val;
34f80b04
EG
6015 int i;
6016
6017 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6018
8badd27a
EG
6019 /* set MSI reconfigure capability */
6020 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6021 val = REG_RD(bp, addr);
6022 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6023 REG_WR(bp, addr, val);
6024
34f80b04
EG
6025 i = FUNC_ILT_BASE(func);
6026
6027 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6028 if (CHIP_IS_E1H(bp)) {
6029 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6030 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6031 } else /* E1 */
6032 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6033 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6034
6035
6036 if (CHIP_IS_E1H(bp)) {
6037 for (i = 0; i < 9; i++)
6038 bnx2x_init_block(bp,
6039 cm_start[func][i], cm_end[func][i]);
6040
6041 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6042 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6043 }
6044
6045 /* HC init per function */
6046 if (CHIP_IS_E1H(bp)) {
6047 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6048
6049 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6050 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6051 }
6052 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6053
c14423fe 6054 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6055 REG_WR(bp, 0x2114, 0xffffffff);
6056 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6057
34f80b04
EG
6058 return 0;
6059}
6060
6061static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6062{
6063 int i, rc = 0;
a2fbb9ea 6064
34f80b04
EG
6065 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6066 BP_FUNC(bp), load_code);
a2fbb9ea 6067
34f80b04
EG
6068 bp->dmae_ready = 0;
6069 mutex_init(&bp->dmae_mutex);
6070 bnx2x_gunzip_init(bp);
a2fbb9ea 6071
34f80b04
EG
6072 switch (load_code) {
6073 case FW_MSG_CODE_DRV_LOAD_COMMON:
6074 rc = bnx2x_init_common(bp);
6075 if (rc)
6076 goto init_hw_err;
6077 /* no break */
6078
6079 case FW_MSG_CODE_DRV_LOAD_PORT:
6080 bp->dmae_ready = 1;
6081 rc = bnx2x_init_port(bp);
6082 if (rc)
6083 goto init_hw_err;
6084 /* no break */
6085
6086 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6087 bp->dmae_ready = 1;
6088 rc = bnx2x_init_func(bp);
6089 if (rc)
6090 goto init_hw_err;
6091 break;
6092
6093 default:
6094 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6095 break;
6096 }
6097
6098 if (!BP_NOMCP(bp)) {
6099 int func = BP_FUNC(bp);
a2fbb9ea
ET
6100
6101 bp->fw_drv_pulse_wr_seq =
34f80b04 6102 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6103 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6104 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6105 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6106 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6107 } else
6108 bp->func_stx = 0;
a2fbb9ea 6109
34f80b04
EG
6110 /* this needs to be done before gunzip end */
6111 bnx2x_zero_def_sb(bp);
6112 for_each_queue(bp, i)
6113 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6114
6115init_hw_err:
6116 bnx2x_gunzip_end(bp);
6117
6118 return rc;
a2fbb9ea
ET
6119}
6120
c14423fe 6121/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6122static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6123{
34f80b04 6124 int func = BP_FUNC(bp);
f1410647
ET
6125 u32 seq = ++bp->fw_seq;
6126 u32 rc = 0;
19680c48
EG
6127 u32 cnt = 1;
6128 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6129
34f80b04 6130 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6131 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6132
19680c48
EG
6133 do {
6134 /* let the FW do it's magic ... */
6135 msleep(delay);
a2fbb9ea 6136
19680c48 6137 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6138
19680c48
EG
6139 /* Give the FW up to 2 second (200*10ms) */
6140 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6141
6142 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6143 cnt*delay, rc, seq);
a2fbb9ea
ET
6144
6145 /* is this a reply to our command? */
6146 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6147 rc &= FW_MSG_CODE_MASK;
f1410647 6148
a2fbb9ea
ET
6149 } else {
6150 /* FW BUG! */
6151 BNX2X_ERR("FW failed to respond!\n");
6152 bnx2x_fw_dump(bp);
6153 rc = 0;
6154 }
f1410647 6155
a2fbb9ea
ET
6156 return rc;
6157}
6158
6159static void bnx2x_free_mem(struct bnx2x *bp)
6160{
6161
6162#define BNX2X_PCI_FREE(x, y, size) \
6163 do { \
6164 if (x) { \
6165 pci_free_consistent(bp->pdev, size, x, y); \
6166 x = NULL; \
6167 y = 0; \
6168 } \
6169 } while (0)
6170
6171#define BNX2X_FREE(x) \
6172 do { \
6173 if (x) { \
6174 vfree(x); \
6175 x = NULL; \
6176 } \
6177 } while (0)
6178
6179 int i;
6180
6181 /* fastpath */
555f6c78 6182 /* Common */
a2fbb9ea
ET
6183 for_each_queue(bp, i) {
6184
555f6c78 6185 /* status blocks */
a2fbb9ea
ET
6186 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6187 bnx2x_fp(bp, i, status_blk_mapping),
6188 sizeof(struct host_status_block) +
6189 sizeof(struct eth_tx_db_data));
555f6c78
EG
6190 }
6191 /* Rx */
6192 for_each_rx_queue(bp, i) {
a2fbb9ea 6193
555f6c78 6194 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6195 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6196 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6197 bnx2x_fp(bp, i, rx_desc_mapping),
6198 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6199
6200 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6201 bnx2x_fp(bp, i, rx_comp_mapping),
6202 sizeof(struct eth_fast_path_rx_cqe) *
6203 NUM_RCQ_BD);
a2fbb9ea 6204
7a9b2557 6205 /* SGE ring */
32626230 6206 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6207 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6208 bnx2x_fp(bp, i, rx_sge_mapping),
6209 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6210 }
555f6c78
EG
6211 /* Tx */
6212 for_each_tx_queue(bp, i) {
6213
6214 /* fastpath tx rings: tx_buf tx_desc */
6215 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6216 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6217 bnx2x_fp(bp, i, tx_desc_mapping),
6218 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6219 }
a2fbb9ea
ET
6220 /* end of fastpath */
6221
6222 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6223 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6224
6225 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6226 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6227
6228#ifdef BCM_ISCSI
6229 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6230 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6231 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6232 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6233#endif
7a9b2557 6234 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6235
6236#undef BNX2X_PCI_FREE
6237#undef BNX2X_KFREE
6238}
6239
6240static int bnx2x_alloc_mem(struct bnx2x *bp)
6241{
6242
6243#define BNX2X_PCI_ALLOC(x, y, size) \
6244 do { \
6245 x = pci_alloc_consistent(bp->pdev, size, y); \
6246 if (x == NULL) \
6247 goto alloc_mem_err; \
6248 memset(x, 0, size); \
6249 } while (0)
6250
6251#define BNX2X_ALLOC(x, size) \
6252 do { \
6253 x = vmalloc(size); \
6254 if (x == NULL) \
6255 goto alloc_mem_err; \
6256 memset(x, 0, size); \
6257 } while (0)
6258
6259 int i;
6260
6261 /* fastpath */
555f6c78 6262 /* Common */
a2fbb9ea
ET
6263 for_each_queue(bp, i) {
6264 bnx2x_fp(bp, i, bp) = bp;
6265
555f6c78 6266 /* status blocks */
a2fbb9ea
ET
6267 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6268 &bnx2x_fp(bp, i, status_blk_mapping),
6269 sizeof(struct host_status_block) +
6270 sizeof(struct eth_tx_db_data));
555f6c78
EG
6271 }
6272 /* Rx */
6273 for_each_rx_queue(bp, i) {
a2fbb9ea 6274
555f6c78 6275 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6276 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6277 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6278 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6279 &bnx2x_fp(bp, i, rx_desc_mapping),
6280 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6281
6282 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6283 &bnx2x_fp(bp, i, rx_comp_mapping),
6284 sizeof(struct eth_fast_path_rx_cqe) *
6285 NUM_RCQ_BD);
6286
7a9b2557
VZ
6287 /* SGE ring */
6288 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6289 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6290 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6291 &bnx2x_fp(bp, i, rx_sge_mapping),
6292 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6293 }
555f6c78
EG
6294 /* Tx */
6295 for_each_tx_queue(bp, i) {
6296
6297 bnx2x_fp(bp, i, hw_tx_prods) =
6298 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6299
6300 bnx2x_fp(bp, i, tx_prods_mapping) =
6301 bnx2x_fp(bp, i, status_blk_mapping) +
6302 sizeof(struct host_status_block);
6303
6304 /* fastpath tx rings: tx_buf tx_desc */
6305 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6306 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6307 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6308 &bnx2x_fp(bp, i, tx_desc_mapping),
6309 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6310 }
a2fbb9ea
ET
6311 /* end of fastpath */
6312
6313 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6314 sizeof(struct host_def_status_block));
6315
6316 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6317 sizeof(struct bnx2x_slowpath));
6318
6319#ifdef BCM_ISCSI
6320 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6321
6322 /* Initialize T1 */
6323 for (i = 0; i < 64*1024; i += 64) {
6324 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6325 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6326 }
6327
6328 /* allocate searcher T2 table
6329 we allocate 1/4 of alloc num for T2
6330 (which is not entered into the ILT) */
6331 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6332
6333 /* Initialize T2 */
6334 for (i = 0; i < 16*1024; i += 64)
6335 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6336
c14423fe 6337 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6338 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6339
6340 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6341 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6342
6343 /* QM queues (128*MAX_CONN) */
6344 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6345#endif
6346
6347 /* Slow path ring */
6348 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6349
6350 return 0;
6351
6352alloc_mem_err:
6353 bnx2x_free_mem(bp);
6354 return -ENOMEM;
6355
6356#undef BNX2X_PCI_ALLOC
6357#undef BNX2X_ALLOC
6358}
6359
6360static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6361{
6362 int i;
6363
555f6c78 6364 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6365 struct bnx2x_fastpath *fp = &bp->fp[i];
6366
6367 u16 bd_cons = fp->tx_bd_cons;
6368 u16 sw_prod = fp->tx_pkt_prod;
6369 u16 sw_cons = fp->tx_pkt_cons;
6370
a2fbb9ea
ET
6371 while (sw_cons != sw_prod) {
6372 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6373 sw_cons++;
6374 }
6375 }
6376}
6377
6378static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6379{
6380 int i, j;
6381
555f6c78 6382 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6383 struct bnx2x_fastpath *fp = &bp->fp[j];
6384
a2fbb9ea
ET
6385 for (i = 0; i < NUM_RX_BD; i++) {
6386 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6387 struct sk_buff *skb = rx_buf->skb;
6388
6389 if (skb == NULL)
6390 continue;
6391
6392 pci_unmap_single(bp->pdev,
6393 pci_unmap_addr(rx_buf, mapping),
356e2385 6394 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6395
6396 rx_buf->skb = NULL;
6397 dev_kfree_skb(skb);
6398 }
7a9b2557 6399 if (!fp->disable_tpa)
32626230
EG
6400 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6401 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6402 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6403 }
6404}
6405
6406static void bnx2x_free_skbs(struct bnx2x *bp)
6407{
6408 bnx2x_free_tx_skbs(bp);
6409 bnx2x_free_rx_skbs(bp);
6410}
6411
6412static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6413{
34f80b04 6414 int i, offset = 1;
a2fbb9ea
ET
6415
6416 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6417 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6418 bp->msix_table[0].vector);
6419
6420 for_each_queue(bp, i) {
c14423fe 6421 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6422 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6423 bnx2x_fp(bp, i, state));
6424
34f80b04 6425 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6426 }
a2fbb9ea
ET
6427}
6428
6429static void bnx2x_free_irq(struct bnx2x *bp)
6430{
a2fbb9ea 6431 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6432 bnx2x_free_msix_irqs(bp);
6433 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6434 bp->flags &= ~USING_MSIX_FLAG;
6435
8badd27a
EG
6436 } else if (bp->flags & USING_MSI_FLAG) {
6437 free_irq(bp->pdev->irq, bp->dev);
6438 pci_disable_msi(bp->pdev);
6439 bp->flags &= ~USING_MSI_FLAG;
6440
a2fbb9ea
ET
6441 } else
6442 free_irq(bp->pdev->irq, bp->dev);
6443}
6444
6445static int bnx2x_enable_msix(struct bnx2x *bp)
6446{
8badd27a
EG
6447 int i, rc, offset = 1;
6448 int igu_vec = 0;
a2fbb9ea 6449
8badd27a
EG
6450 bp->msix_table[0].entry = igu_vec;
6451 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6452
34f80b04 6453 for_each_queue(bp, i) {
8badd27a 6454 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6455 bp->msix_table[i + offset].entry = igu_vec;
6456 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6457 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6458 }
6459
34f80b04 6460 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6461 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6462 if (rc) {
8badd27a
EG
6463 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6464 return rc;
34f80b04 6465 }
8badd27a 6466
a2fbb9ea
ET
6467 bp->flags |= USING_MSIX_FLAG;
6468
6469 return 0;
a2fbb9ea
ET
6470}
6471
a2fbb9ea
ET
6472static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6473{
34f80b04 6474 int i, rc, offset = 1;
a2fbb9ea 6475
a2fbb9ea
ET
6476 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6477 bp->dev->name, bp->dev);
a2fbb9ea
ET
6478 if (rc) {
6479 BNX2X_ERR("request sp irq failed\n");
6480 return -EBUSY;
6481 }
6482
6483 for_each_queue(bp, i) {
555f6c78
EG
6484 struct bnx2x_fastpath *fp = &bp->fp[i];
6485
6486 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6487 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6488 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6489 if (rc) {
555f6c78 6490 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6491 bnx2x_free_msix_irqs(bp);
6492 return -EBUSY;
6493 }
6494
555f6c78 6495 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6496 }
6497
555f6c78
EG
6498 i = BNX2X_NUM_QUEUES(bp);
6499 if (is_multi(bp))
6500 printk(KERN_INFO PFX
6501 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6502 bp->dev->name, bp->msix_table[0].vector,
6503 bp->msix_table[offset].vector,
6504 bp->msix_table[offset + i - 1].vector);
6505 else
6506 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6507 bp->dev->name, bp->msix_table[0].vector,
6508 bp->msix_table[offset + i - 1].vector);
6509
a2fbb9ea 6510 return 0;
a2fbb9ea
ET
6511}
6512
8badd27a
EG
6513static int bnx2x_enable_msi(struct bnx2x *bp)
6514{
6515 int rc;
6516
6517 rc = pci_enable_msi(bp->pdev);
6518 if (rc) {
6519 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6520 return -1;
6521 }
6522 bp->flags |= USING_MSI_FLAG;
6523
6524 return 0;
6525}
6526
a2fbb9ea
ET
6527static int bnx2x_req_irq(struct bnx2x *bp)
6528{
8badd27a 6529 unsigned long flags;
34f80b04 6530 int rc;
a2fbb9ea 6531
8badd27a
EG
6532 if (bp->flags & USING_MSI_FLAG)
6533 flags = 0;
6534 else
6535 flags = IRQF_SHARED;
6536
6537 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6538 bp->dev->name, bp->dev);
a2fbb9ea
ET
6539 if (!rc)
6540 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6541
6542 return rc;
a2fbb9ea
ET
6543}
6544
65abd74d
YG
6545static void bnx2x_napi_enable(struct bnx2x *bp)
6546{
6547 int i;
6548
555f6c78 6549 for_each_rx_queue(bp, i)
65abd74d
YG
6550 napi_enable(&bnx2x_fp(bp, i, napi));
6551}
6552
6553static void bnx2x_napi_disable(struct bnx2x *bp)
6554{
6555 int i;
6556
555f6c78 6557 for_each_rx_queue(bp, i)
65abd74d
YG
6558 napi_disable(&bnx2x_fp(bp, i, napi));
6559}
6560
6561static void bnx2x_netif_start(struct bnx2x *bp)
6562{
6563 if (atomic_dec_and_test(&bp->intr_sem)) {
6564 if (netif_running(bp->dev)) {
65abd74d
YG
6565 bnx2x_napi_enable(bp);
6566 bnx2x_int_enable(bp);
555f6c78
EG
6567 if (bp->state == BNX2X_STATE_OPEN)
6568 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6569 }
6570 }
6571}
6572
f8ef6e44 6573static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6574{
f8ef6e44 6575 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6576 bnx2x_napi_disable(bp);
762d5f6c
EG
6577 netif_tx_disable(bp->dev);
6578 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6579}
6580
a2fbb9ea
ET
6581/*
6582 * Init service functions
6583 */
6584
3101c2bc 6585static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6586{
6587 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6588 int port = BP_PORT(bp);
a2fbb9ea
ET
6589
6590 /* CAM allocation
6591 * unicasts 0-31:port0 32-63:port1
6592 * multicast 64-127:port0 128-191:port1
6593 */
8d9c5f34 6594 config->hdr.length = 2;
af246401 6595 config->hdr.offset = port ? 32 : 0;
0626b899 6596 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6597 config->hdr.reserved1 = 0;
6598
6599 /* primary MAC */
6600 config->config_table[0].cam_entry.msb_mac_addr =
6601 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6602 config->config_table[0].cam_entry.middle_mac_addr =
6603 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6604 config->config_table[0].cam_entry.lsb_mac_addr =
6605 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6606 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6607 if (set)
6608 config->config_table[0].target_table_entry.flags = 0;
6609 else
6610 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6611 config->config_table[0].target_table_entry.client_id = 0;
6612 config->config_table[0].target_table_entry.vlan_id = 0;
6613
3101c2bc
YG
6614 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6615 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6616 config->config_table[0].cam_entry.msb_mac_addr,
6617 config->config_table[0].cam_entry.middle_mac_addr,
6618 config->config_table[0].cam_entry.lsb_mac_addr);
6619
6620 /* broadcast */
4781bfad
EG
6621 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6622 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6623 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6624 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6625 if (set)
6626 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6627 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6628 else
6629 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6630 config->config_table[1].target_table_entry.client_id = 0;
6631 config->config_table[1].target_table_entry.vlan_id = 0;
6632
6633 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6634 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6635 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6636}
6637
3101c2bc 6638static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6639{
6640 struct mac_configuration_cmd_e1h *config =
6641 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6642
3101c2bc 6643 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6644 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6645 return;
6646 }
6647
6648 /* CAM allocation for E1H
6649 * unicasts: by func number
6650 * multicast: 20+FUNC*20, 20 each
6651 */
8d9c5f34 6652 config->hdr.length = 1;
34f80b04 6653 config->hdr.offset = BP_FUNC(bp);
0626b899 6654 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6655 config->hdr.reserved1 = 0;
6656
6657 /* primary MAC */
6658 config->config_table[0].msb_mac_addr =
6659 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6660 config->config_table[0].middle_mac_addr =
6661 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6662 config->config_table[0].lsb_mac_addr =
6663 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6664 config->config_table[0].client_id = BP_L_ID(bp);
6665 config->config_table[0].vlan_id = 0;
6666 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6667 if (set)
6668 config->config_table[0].flags = BP_PORT(bp);
6669 else
6670 config->config_table[0].flags =
6671 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6672
3101c2bc
YG
6673 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6674 (set ? "setting" : "clearing"),
34f80b04
EG
6675 config->config_table[0].msb_mac_addr,
6676 config->config_table[0].middle_mac_addr,
6677 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6678
6679 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6680 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6681 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6682}
6683
a2fbb9ea
ET
6684static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6685 int *state_p, int poll)
6686{
6687 /* can take a while if any port is running */
8b3a0f0b 6688 int cnt = 5000;
a2fbb9ea 6689
c14423fe
ET
6690 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6691 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6692
6693 might_sleep();
34f80b04 6694 while (cnt--) {
a2fbb9ea
ET
6695 if (poll) {
6696 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6697 /* if index is different from 0
6698 * the reply for some commands will
3101c2bc 6699 * be on the non default queue
a2fbb9ea
ET
6700 */
6701 if (idx)
6702 bnx2x_rx_int(&bp->fp[idx], 10);
6703 }
a2fbb9ea 6704
3101c2bc 6705 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6706 if (*state_p == state) {
6707#ifdef BNX2X_STOP_ON_ERROR
6708 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6709#endif
a2fbb9ea 6710 return 0;
8b3a0f0b 6711 }
a2fbb9ea 6712
a2fbb9ea 6713 msleep(1);
a2fbb9ea
ET
6714 }
6715
a2fbb9ea 6716 /* timeout! */
49d66772
ET
6717 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6718 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6719#ifdef BNX2X_STOP_ON_ERROR
6720 bnx2x_panic();
6721#endif
a2fbb9ea 6722
49d66772 6723 return -EBUSY;
a2fbb9ea
ET
6724}
6725
6726static int bnx2x_setup_leading(struct bnx2x *bp)
6727{
34f80b04 6728 int rc;
a2fbb9ea 6729
c14423fe 6730 /* reset IGU state */
34f80b04 6731 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6732
6733 /* SETUP ramrod */
6734 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6735
34f80b04
EG
6736 /* Wait for completion */
6737 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6738
34f80b04 6739 return rc;
a2fbb9ea
ET
6740}
6741
6742static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6743{
555f6c78
EG
6744 struct bnx2x_fastpath *fp = &bp->fp[index];
6745
a2fbb9ea 6746 /* reset IGU state */
555f6c78 6747 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6748
228241eb 6749 /* SETUP ramrod */
555f6c78
EG
6750 fp->state = BNX2X_FP_STATE_OPENING;
6751 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6752 fp->cl_id, 0);
a2fbb9ea
ET
6753
6754 /* Wait for completion */
6755 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6756 &(fp->state), 0);
a2fbb9ea
ET
6757}
6758
a2fbb9ea 6759static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6760
8badd27a 6761static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6762{
555f6c78 6763 int num_queues;
a2fbb9ea 6764
8badd27a
EG
6765 switch (int_mode) {
6766 case INT_MODE_INTx:
6767 case INT_MODE_MSI:
555f6c78
EG
6768 num_queues = 1;
6769 bp->num_rx_queues = num_queues;
6770 bp->num_tx_queues = num_queues;
6771 DP(NETIF_MSG_IFUP,
6772 "set number of queues to %d\n", num_queues);
8badd27a
EG
6773 break;
6774
6775 case INT_MODE_MSIX:
6776 default:
555f6c78
EG
6777 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6778 num_queues = min_t(u32, num_online_cpus(),
6779 BNX2X_MAX_QUEUES(bp));
34f80b04 6780 else
555f6c78
EG
6781 num_queues = 1;
6782 bp->num_rx_queues = num_queues;
6783 bp->num_tx_queues = num_queues;
6784 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6785 " number of tx queues to %d\n",
6786 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6787 /* if we can't use MSI-X we only need one fp,
6788 * so try to enable MSI-X with the requested number of fp's
6789 * and fallback to MSI or legacy INTx with one fp
6790 */
8badd27a 6791 if (bnx2x_enable_msix(bp)) {
34f80b04 6792 /* failed to enable MSI-X */
555f6c78
EG
6793 num_queues = 1;
6794 bp->num_rx_queues = num_queues;
6795 bp->num_tx_queues = num_queues;
6796 if (bp->multi_mode)
6797 BNX2X_ERR("Multi requested but failed to "
6798 "enable MSI-X set number of "
6799 "queues to %d\n", num_queues);
a2fbb9ea 6800 }
8badd27a 6801 break;
a2fbb9ea 6802 }
555f6c78 6803 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6804}
6805
6806static void bnx2x_set_rx_mode(struct net_device *dev);
6807
6808/* must be called with rtnl_lock */
6809static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6810{
6811 u32 load_code;
6812 int i, rc = 0;
6813#ifdef BNX2X_STOP_ON_ERROR
6814 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6815 if (unlikely(bp->panic))
6816 return -EPERM;
6817#endif
6818
6819 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6820
6821 bnx2x_set_int_mode(bp);
c14423fe 6822
a2fbb9ea
ET
6823 if (bnx2x_alloc_mem(bp))
6824 return -ENOMEM;
6825
555f6c78 6826 for_each_rx_queue(bp, i)
7a9b2557
VZ
6827 bnx2x_fp(bp, i, disable_tpa) =
6828 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6829
555f6c78 6830 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6831 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6832 bnx2x_poll, 128);
6833
6834#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6835 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6836 struct bnx2x_fastpath *fp = &bp->fp[i];
6837
6838 fp->poll_no_work = 0;
6839 fp->poll_calls = 0;
6840 fp->poll_max_calls = 0;
6841 fp->poll_complete = 0;
6842 fp->poll_exit = 0;
6843 }
6844#endif
6845 bnx2x_napi_enable(bp);
6846
34f80b04
EG
6847 if (bp->flags & USING_MSIX_FLAG) {
6848 rc = bnx2x_req_msix_irqs(bp);
6849 if (rc) {
6850 pci_disable_msix(bp->pdev);
2dfe0e1f 6851 goto load_error1;
34f80b04
EG
6852 }
6853 } else {
8badd27a
EG
6854 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6855 bnx2x_enable_msi(bp);
34f80b04
EG
6856 bnx2x_ack_int(bp);
6857 rc = bnx2x_req_irq(bp);
6858 if (rc) {
2dfe0e1f 6859 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6860 if (bp->flags & USING_MSI_FLAG)
6861 pci_disable_msi(bp->pdev);
2dfe0e1f 6862 goto load_error1;
a2fbb9ea 6863 }
8badd27a
EG
6864 if (bp->flags & USING_MSI_FLAG) {
6865 bp->dev->irq = bp->pdev->irq;
6866 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6867 bp->dev->name, bp->pdev->irq);
6868 }
a2fbb9ea
ET
6869 }
6870
2dfe0e1f
EG
6871 /* Send LOAD_REQUEST command to MCP
6872 Returns the type of LOAD command:
6873 if it is the first port to be initialized
6874 common blocks should be initialized, otherwise - not
6875 */
6876 if (!BP_NOMCP(bp)) {
6877 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6878 if (!load_code) {
6879 BNX2X_ERR("MCP response failure, aborting\n");
6880 rc = -EBUSY;
6881 goto load_error2;
6882 }
6883 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6884 rc = -EBUSY; /* other port in diagnostic mode */
6885 goto load_error2;
6886 }
6887
6888 } else {
6889 int port = BP_PORT(bp);
6890
f5372251 6891 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6892 load_count[0], load_count[1], load_count[2]);
6893 load_count[0]++;
6894 load_count[1 + port]++;
f5372251 6895 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6896 load_count[0], load_count[1], load_count[2]);
6897 if (load_count[0] == 1)
6898 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6899 else if (load_count[1 + port] == 1)
6900 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6901 else
6902 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6903 }
6904
6905 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6906 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6907 bp->port.pmf = 1;
6908 else
6909 bp->port.pmf = 0;
6910 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6911
a2fbb9ea 6912 /* Initialize HW */
34f80b04
EG
6913 rc = bnx2x_init_hw(bp, load_code);
6914 if (rc) {
a2fbb9ea 6915 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6916 goto load_error2;
a2fbb9ea
ET
6917 }
6918
a2fbb9ea 6919 /* Setup NIC internals and enable interrupts */
471de716 6920 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6921
6922 /* Send LOAD_DONE command to MCP */
34f80b04 6923 if (!BP_NOMCP(bp)) {
228241eb
ET
6924 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6925 if (!load_code) {
da5a662a 6926 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6927 rc = -EBUSY;
2dfe0e1f 6928 goto load_error3;
a2fbb9ea
ET
6929 }
6930 }
6931
6932 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6933
34f80b04
EG
6934 rc = bnx2x_setup_leading(bp);
6935 if (rc) {
da5a662a 6936 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6937 goto load_error3;
34f80b04 6938 }
a2fbb9ea 6939
34f80b04
EG
6940 if (CHIP_IS_E1H(bp))
6941 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 6942 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
6943 bp->state = BNX2X_STATE_DISABLED;
6944 }
a2fbb9ea 6945
34f80b04
EG
6946 if (bp->state == BNX2X_STATE_OPEN)
6947 for_each_nondefault_queue(bp, i) {
6948 rc = bnx2x_setup_multi(bp, i);
6949 if (rc)
2dfe0e1f 6950 goto load_error3;
34f80b04 6951 }
a2fbb9ea 6952
34f80b04 6953 if (CHIP_IS_E1(bp))
3101c2bc 6954 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6955 else
3101c2bc 6956 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6957
6958 if (bp->port.pmf)
b5bf9068 6959 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6960
6961 /* Start fast path */
34f80b04
EG
6962 switch (load_mode) {
6963 case LOAD_NORMAL:
6964 /* Tx queue should be only reenabled */
555f6c78 6965 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6966 /* Initialize the receive filter. */
34f80b04
EG
6967 bnx2x_set_rx_mode(bp->dev);
6968 break;
6969
6970 case LOAD_OPEN:
555f6c78 6971 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6972 /* Initialize the receive filter. */
34f80b04 6973 bnx2x_set_rx_mode(bp->dev);
34f80b04 6974 break;
a2fbb9ea 6975
34f80b04 6976 case LOAD_DIAG:
2dfe0e1f 6977 /* Initialize the receive filter. */
a2fbb9ea 6978 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6979 bp->state = BNX2X_STATE_DIAG;
6980 break;
6981
6982 default:
6983 break;
a2fbb9ea
ET
6984 }
6985
34f80b04
EG
6986 if (!bp->port.pmf)
6987 bnx2x__link_status_update(bp);
6988
a2fbb9ea
ET
6989 /* start the timer */
6990 mod_timer(&bp->timer, jiffies + bp->current_interval);
6991
34f80b04 6992
a2fbb9ea
ET
6993 return 0;
6994
2dfe0e1f
EG
6995load_error3:
6996 bnx2x_int_disable_sync(bp, 1);
6997 if (!BP_NOMCP(bp)) {
6998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6999 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7000 }
7001 bp->port.pmf = 0;
7a9b2557
VZ
7002 /* Free SKBs, SGEs, TPA pool and driver internals */
7003 bnx2x_free_skbs(bp);
555f6c78 7004 for_each_rx_queue(bp, i)
3196a88a 7005 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7006load_error2:
d1014634
YG
7007 /* Release IRQs */
7008 bnx2x_free_irq(bp);
2dfe0e1f
EG
7009load_error1:
7010 bnx2x_napi_disable(bp);
555f6c78 7011 for_each_rx_queue(bp, i)
7cde1c8b 7012 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7013 bnx2x_free_mem(bp);
7014
34f80b04 7015 return rc;
a2fbb9ea
ET
7016}
7017
7018static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7019{
555f6c78 7020 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7021 int rc;
7022
c14423fe 7023 /* halt the connection */
555f6c78
EG
7024 fp->state = BNX2X_FP_STATE_HALTING;
7025 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7026
34f80b04 7027 /* Wait for completion */
a2fbb9ea 7028 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7029 &(fp->state), 1);
c14423fe 7030 if (rc) /* timeout */
a2fbb9ea
ET
7031 return rc;
7032
7033 /* delete cfc entry */
7034 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7035
34f80b04
EG
7036 /* Wait for completion */
7037 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7038 &(fp->state), 1);
34f80b04 7039 return rc;
a2fbb9ea
ET
7040}
7041
da5a662a 7042static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7043{
4781bfad 7044 __le16 dsb_sp_prod_idx;
c14423fe 7045 /* if the other port is handling traffic,
a2fbb9ea 7046 this can take a lot of time */
34f80b04
EG
7047 int cnt = 500;
7048 int rc;
a2fbb9ea
ET
7049
7050 might_sleep();
7051
7052 /* Send HALT ramrod */
7053 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7054 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7055
34f80b04
EG
7056 /* Wait for completion */
7057 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7058 &(bp->fp[0].state), 1);
7059 if (rc) /* timeout */
da5a662a 7060 return rc;
a2fbb9ea 7061
49d66772 7062 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7063
228241eb 7064 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7065 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7066
49d66772 7067 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7068 we are going to reset the chip anyway
7069 so there is not much to do if this times out
7070 */
34f80b04 7071 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7072 if (!cnt) {
7073 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7074 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7075 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7076#ifdef BNX2X_STOP_ON_ERROR
7077 bnx2x_panic();
7078#endif
36e552ab 7079 rc = -EBUSY;
34f80b04
EG
7080 break;
7081 }
7082 cnt--;
da5a662a 7083 msleep(1);
5650d9d4 7084 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7085 }
7086 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7087 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7088
7089 return rc;
a2fbb9ea
ET
7090}
7091
34f80b04
EG
7092static void bnx2x_reset_func(struct bnx2x *bp)
7093{
7094 int port = BP_PORT(bp);
7095 int func = BP_FUNC(bp);
7096 int base, i;
7097
7098 /* Configure IGU */
7099 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7100 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7101
34f80b04
EG
7102 /* Clear ILT */
7103 base = FUNC_ILT_BASE(func);
7104 for (i = base; i < base + ILT_PER_FUNC; i++)
7105 bnx2x_ilt_wr(bp, i, 0);
7106}
7107
7108static void bnx2x_reset_port(struct bnx2x *bp)
7109{
7110 int port = BP_PORT(bp);
7111 u32 val;
7112
7113 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7114
7115 /* Do not rcv packets to BRB */
7116 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7117 /* Do not direct rcv packets that are not for MCP to the BRB */
7118 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7119 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7120
7121 /* Configure AEU */
7122 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7123
7124 msleep(100);
7125 /* Check for BRB port occupancy */
7126 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7127 if (val)
7128 DP(NETIF_MSG_IFDOWN,
33471629 7129 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7130
7131 /* TODO: Close Doorbell port? */
7132}
7133
34f80b04
EG
7134static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7135{
7136 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7137 BP_FUNC(bp), reset_code);
7138
7139 switch (reset_code) {
7140 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7141 bnx2x_reset_port(bp);
7142 bnx2x_reset_func(bp);
7143 bnx2x_reset_common(bp);
7144 break;
7145
7146 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7147 bnx2x_reset_port(bp);
7148 bnx2x_reset_func(bp);
7149 break;
7150
7151 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7152 bnx2x_reset_func(bp);
7153 break;
49d66772 7154
34f80b04
EG
7155 default:
7156 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7157 break;
7158 }
7159}
7160
33471629 7161/* must be called with rtnl_lock */
34f80b04 7162static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7163{
da5a662a 7164 int port = BP_PORT(bp);
a2fbb9ea 7165 u32 reset_code = 0;
da5a662a 7166 int i, cnt, rc;
a2fbb9ea
ET
7167
7168 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7169
228241eb
ET
7170 bp->rx_mode = BNX2X_RX_MODE_NONE;
7171 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7172
f8ef6e44 7173 bnx2x_netif_stop(bp, 1);
e94d8af3 7174
34f80b04
EG
7175 del_timer_sync(&bp->timer);
7176 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7177 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7178 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7179
70b9986c
EG
7180 /* Release IRQs */
7181 bnx2x_free_irq(bp);
7182
555f6c78
EG
7183 /* Wait until tx fastpath tasks complete */
7184 for_each_tx_queue(bp, i) {
228241eb
ET
7185 struct bnx2x_fastpath *fp = &bp->fp[i];
7186
34f80b04 7187 cnt = 1000;
e8b5fc51 7188 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7189
7961f791 7190 bnx2x_tx_int(fp);
34f80b04
EG
7191 if (!cnt) {
7192 BNX2X_ERR("timeout waiting for queue[%d]\n",
7193 i);
7194#ifdef BNX2X_STOP_ON_ERROR
7195 bnx2x_panic();
7196 return -EBUSY;
7197#else
7198 break;
7199#endif
7200 }
7201 cnt--;
da5a662a 7202 msleep(1);
34f80b04 7203 }
228241eb 7204 }
da5a662a
VZ
7205 /* Give HW time to discard old tx messages */
7206 msleep(1);
a2fbb9ea 7207
3101c2bc
YG
7208 if (CHIP_IS_E1(bp)) {
7209 struct mac_configuration_cmd *config =
7210 bnx2x_sp(bp, mcast_config);
7211
7212 bnx2x_set_mac_addr_e1(bp, 0);
7213
8d9c5f34 7214 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7215 CAM_INVALIDATE(config->config_table[i]);
7216
8d9c5f34 7217 config->hdr.length = i;
3101c2bc
YG
7218 if (CHIP_REV_IS_SLOW(bp))
7219 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7220 else
7221 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7222 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7223 config->hdr.reserved1 = 0;
7224
7225 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7226 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7227 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7228
7229 } else { /* E1H */
65abd74d
YG
7230 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7231
3101c2bc
YG
7232 bnx2x_set_mac_addr_e1h(bp, 0);
7233
7234 for (i = 0; i < MC_HASH_SIZE; i++)
7235 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7236 }
7237
65abd74d
YG
7238 if (unload_mode == UNLOAD_NORMAL)
7239 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7240
7241 else if (bp->flags & NO_WOL_FLAG) {
7242 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7243 if (CHIP_IS_E1H(bp))
7244 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7245
7246 } else if (bp->wol) {
7247 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7248 u8 *mac_addr = bp->dev->dev_addr;
7249 u32 val;
7250 /* The mac address is written to entries 1-4 to
7251 preserve entry 0 which is used by the PMF */
7252 u8 entry = (BP_E1HVN(bp) + 1)*8;
7253
7254 val = (mac_addr[0] << 8) | mac_addr[1];
7255 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7256
7257 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7258 (mac_addr[4] << 8) | mac_addr[5];
7259 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7260
7261 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7262
7263 } else
7264 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7265
34f80b04
EG
7266 /* Close multi and leading connections
7267 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7268 for_each_nondefault_queue(bp, i)
7269 if (bnx2x_stop_multi(bp, i))
228241eb 7270 goto unload_error;
a2fbb9ea 7271
da5a662a
VZ
7272 rc = bnx2x_stop_leading(bp);
7273 if (rc) {
34f80b04 7274 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7275#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7276 return -EBUSY;
da5a662a
VZ
7277#else
7278 goto unload_error;
34f80b04 7279#endif
228241eb
ET
7280 }
7281
7282unload_error:
34f80b04 7283 if (!BP_NOMCP(bp))
228241eb 7284 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7285 else {
f5372251 7286 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7287 load_count[0], load_count[1], load_count[2]);
7288 load_count[0]--;
da5a662a 7289 load_count[1 + port]--;
f5372251 7290 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7291 load_count[0], load_count[1], load_count[2]);
7292 if (load_count[0] == 0)
7293 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7294 else if (load_count[1 + port] == 0)
34f80b04
EG
7295 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7296 else
7297 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7298 }
a2fbb9ea 7299
34f80b04
EG
7300 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7301 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7302 bnx2x__link_reset(bp);
a2fbb9ea
ET
7303
7304 /* Reset the chip */
228241eb 7305 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7306
7307 /* Report UNLOAD_DONE to MCP */
34f80b04 7308 if (!BP_NOMCP(bp))
a2fbb9ea 7309 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7310
9a035440 7311 bp->port.pmf = 0;
a2fbb9ea 7312
7a9b2557 7313 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7314 bnx2x_free_skbs(bp);
555f6c78 7315 for_each_rx_queue(bp, i)
3196a88a 7316 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7317 for_each_rx_queue(bp, i)
7cde1c8b 7318 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7319 bnx2x_free_mem(bp);
7320
7321 bp->state = BNX2X_STATE_CLOSED;
228241eb 7322
a2fbb9ea
ET
7323 netif_carrier_off(bp->dev);
7324
7325 return 0;
7326}
7327
34f80b04
EG
7328static void bnx2x_reset_task(struct work_struct *work)
7329{
7330 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7331
7332#ifdef BNX2X_STOP_ON_ERROR
7333 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7334 " so reset not done to allow debug dump,\n"
7335 KERN_ERR " you will need to reboot when done\n");
7336 return;
7337#endif
7338
7339 rtnl_lock();
7340
7341 if (!netif_running(bp->dev))
7342 goto reset_task_exit;
7343
7344 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7345 bnx2x_nic_load(bp, LOAD_NORMAL);
7346
7347reset_task_exit:
7348 rtnl_unlock();
7349}
7350
a2fbb9ea
ET
7351/* end of nic load/unload */
7352
7353/* ethtool_ops */
7354
7355/*
7356 * Init service functions
7357 */
7358
f1ef27ef
EG
7359static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7360{
7361 switch (func) {
7362 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7363 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7364 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7365 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7366 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7367 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7368 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7369 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7370 default:
7371 BNX2X_ERR("Unsupported function index: %d\n", func);
7372 return (u32)(-1);
7373 }
7374}
7375
7376static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7377{
7378 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7379
7380 /* Flush all outstanding writes */
7381 mmiowb();
7382
7383 /* Pretend to be function 0 */
7384 REG_WR(bp, reg, 0);
7385 /* Flush the GRC transaction (in the chip) */
7386 new_val = REG_RD(bp, reg);
7387 if (new_val != 0) {
7388 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7389 new_val);
7390 BUG();
7391 }
7392
7393 /* From now we are in the "like-E1" mode */
7394 bnx2x_int_disable(bp);
7395
7396 /* Flush all outstanding writes */
7397 mmiowb();
7398
7399 /* Restore the original funtion settings */
7400 REG_WR(bp, reg, orig_func);
7401 new_val = REG_RD(bp, reg);
7402 if (new_val != orig_func) {
7403 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7404 orig_func, new_val);
7405 BUG();
7406 }
7407}
7408
7409static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7410{
7411 if (CHIP_IS_E1H(bp))
7412 bnx2x_undi_int_disable_e1h(bp, func);
7413 else
7414 bnx2x_int_disable(bp);
7415}
7416
34f80b04
EG
7417static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7418{
7419 u32 val;
7420
7421 /* Check if there is any driver already loaded */
7422 val = REG_RD(bp, MISC_REG_UNPREPARED);
7423 if (val == 0x1) {
7424 /* Check if it is the UNDI driver
7425 * UNDI driver initializes CID offset for normal bell to 0x7
7426 */
4a37fb66 7427 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7428 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7429 if (val == 0x7) {
7430 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7431 /* save our func */
34f80b04 7432 int func = BP_FUNC(bp);
da5a662a
VZ
7433 u32 swap_en;
7434 u32 swap_val;
34f80b04 7435
b4661739
EG
7436 /* clear the UNDI indication */
7437 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7438
34f80b04
EG
7439 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7440
7441 /* try unload UNDI on port 0 */
7442 bp->func = 0;
da5a662a
VZ
7443 bp->fw_seq =
7444 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7445 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7446 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7447
7448 /* if UNDI is loaded on the other port */
7449 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7450
da5a662a
VZ
7451 /* send "DONE" for previous unload */
7452 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7453
7454 /* unload UNDI on port 1 */
34f80b04 7455 bp->func = 1;
da5a662a
VZ
7456 bp->fw_seq =
7457 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7458 DRV_MSG_SEQ_NUMBER_MASK);
7459 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7460
7461 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7462 }
7463
b4661739
EG
7464 /* now it's safe to release the lock */
7465 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7466
f1ef27ef 7467 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7468
7469 /* close input traffic and wait for it */
7470 /* Do not rcv packets to BRB */
7471 REG_WR(bp,
7472 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7473 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7474 /* Do not direct rcv packets that are not for MCP to
7475 * the BRB */
7476 REG_WR(bp,
7477 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7478 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7479 /* clear AEU */
7480 REG_WR(bp,
7481 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7482 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7483 msleep(10);
7484
7485 /* save NIG port swap info */
7486 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7487 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7488 /* reset device */
7489 REG_WR(bp,
7490 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7491 0xd3ffffff);
34f80b04
EG
7492 REG_WR(bp,
7493 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7494 0x1403);
da5a662a
VZ
7495 /* take the NIG out of reset and restore swap values */
7496 REG_WR(bp,
7497 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7498 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7499 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7500 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7501
7502 /* send unload done to the MCP */
7503 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7504
7505 /* restore our func and fw_seq */
7506 bp->func = func;
7507 bp->fw_seq =
7508 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7509 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7510
7511 } else
7512 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7513 }
7514}
7515
7516static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7517{
7518 u32 val, val2, val3, val4, id;
72ce58c3 7519 u16 pmc;
34f80b04
EG
7520
7521 /* Get the chip revision id and number. */
7522 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7523 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7524 id = ((val & 0xffff) << 16);
7525 val = REG_RD(bp, MISC_REG_CHIP_REV);
7526 id |= ((val & 0xf) << 12);
7527 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7528 id |= ((val & 0xff) << 4);
5a40e08e 7529 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7530 id |= (val & 0xf);
7531 bp->common.chip_id = id;
7532 bp->link_params.chip_id = bp->common.chip_id;
7533 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7534
1c06328c
EG
7535 val = (REG_RD(bp, 0x2874) & 0x55);
7536 if ((bp->common.chip_id & 0x1) ||
7537 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7538 bp->flags |= ONE_PORT_FLAG;
7539 BNX2X_DEV_INFO("single port device\n");
7540 }
7541
34f80b04
EG
7542 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7543 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7544 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7545 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7546 bp->common.flash_size, bp->common.flash_size);
7547
7548 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7549 bp->link_params.shmem_base = bp->common.shmem_base;
7550 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7551
7552 if (!bp->common.shmem_base ||
7553 (bp->common.shmem_base < 0xA0000) ||
7554 (bp->common.shmem_base >= 0xC0000)) {
7555 BNX2X_DEV_INFO("MCP not active\n");
7556 bp->flags |= NO_MCP_FLAG;
7557 return;
7558 }
7559
7560 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7561 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7562 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7563 BNX2X_ERR("BAD MCP validity signature\n");
7564
7565 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7566 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7567
7568 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7569 SHARED_HW_CFG_LED_MODE_MASK) >>
7570 SHARED_HW_CFG_LED_MODE_SHIFT);
7571
c2c8b03e
EG
7572 bp->link_params.feature_config_flags = 0;
7573 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7574 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7575 bp->link_params.feature_config_flags |=
7576 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7577 else
7578 bp->link_params.feature_config_flags &=
7579 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7580
34f80b04
EG
7581 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7582 bp->common.bc_ver = val;
7583 BNX2X_DEV_INFO("bc_ver %X\n", val);
7584 if (val < BNX2X_BC_VER) {
7585 /* for now only warn
7586 * later we might need to enforce this */
7587 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7588 " please upgrade BC\n", BNX2X_BC_VER, val);
7589 }
72ce58c3
EG
7590
7591 if (BP_E1HVN(bp) == 0) {
7592 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7593 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7594 } else {
7595 /* no WOL capability for E1HVN != 0 */
7596 bp->flags |= NO_WOL_FLAG;
7597 }
7598 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7599 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7600
7601 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7602 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7603 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7604 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7605
7606 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7607 val, val2, val3, val4);
7608}
7609
7610static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7611 u32 switch_cfg)
a2fbb9ea 7612{
34f80b04 7613 int port = BP_PORT(bp);
a2fbb9ea
ET
7614 u32 ext_phy_type;
7615
a2fbb9ea
ET
7616 switch (switch_cfg) {
7617 case SWITCH_CFG_1G:
7618 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7619
c18487ee
YR
7620 ext_phy_type =
7621 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7622 switch (ext_phy_type) {
7623 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7624 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7625 ext_phy_type);
7626
34f80b04
EG
7627 bp->port.supported |= (SUPPORTED_10baseT_Half |
7628 SUPPORTED_10baseT_Full |
7629 SUPPORTED_100baseT_Half |
7630 SUPPORTED_100baseT_Full |
7631 SUPPORTED_1000baseT_Full |
7632 SUPPORTED_2500baseX_Full |
7633 SUPPORTED_TP |
7634 SUPPORTED_FIBRE |
7635 SUPPORTED_Autoneg |
7636 SUPPORTED_Pause |
7637 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7638 break;
7639
7640 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7641 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7642 ext_phy_type);
7643
34f80b04
EG
7644 bp->port.supported |= (SUPPORTED_10baseT_Half |
7645 SUPPORTED_10baseT_Full |
7646 SUPPORTED_100baseT_Half |
7647 SUPPORTED_100baseT_Full |
7648 SUPPORTED_1000baseT_Full |
7649 SUPPORTED_TP |
7650 SUPPORTED_FIBRE |
7651 SUPPORTED_Autoneg |
7652 SUPPORTED_Pause |
7653 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7654 break;
7655
7656 default:
7657 BNX2X_ERR("NVRAM config error. "
7658 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7659 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7660 return;
7661 }
7662
34f80b04
EG
7663 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7664 port*0x10);
7665 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7666 break;
7667
7668 case SWITCH_CFG_10G:
7669 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7670
c18487ee
YR
7671 ext_phy_type =
7672 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7673 switch (ext_phy_type) {
7674 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7675 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7676 ext_phy_type);
7677
34f80b04
EG
7678 bp->port.supported |= (SUPPORTED_10baseT_Half |
7679 SUPPORTED_10baseT_Full |
7680 SUPPORTED_100baseT_Half |
7681 SUPPORTED_100baseT_Full |
7682 SUPPORTED_1000baseT_Full |
7683 SUPPORTED_2500baseX_Full |
7684 SUPPORTED_10000baseT_Full |
7685 SUPPORTED_TP |
7686 SUPPORTED_FIBRE |
7687 SUPPORTED_Autoneg |
7688 SUPPORTED_Pause |
7689 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7690 break;
7691
589abe3a
EG
7692 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7693 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7694 ext_phy_type);
f1410647 7695
34f80b04 7696 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7697 SUPPORTED_1000baseT_Full |
34f80b04 7698 SUPPORTED_FIBRE |
589abe3a 7699 SUPPORTED_Autoneg |
34f80b04
EG
7700 SUPPORTED_Pause |
7701 SUPPORTED_Asym_Pause);
f1410647
ET
7702 break;
7703
589abe3a
EG
7704 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7705 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7706 ext_phy_type);
7707
34f80b04 7708 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7709 SUPPORTED_2500baseX_Full |
34f80b04 7710 SUPPORTED_1000baseT_Full |
589abe3a
EG
7711 SUPPORTED_FIBRE |
7712 SUPPORTED_Autoneg |
7713 SUPPORTED_Pause |
7714 SUPPORTED_Asym_Pause);
7715 break;
7716
7717 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7718 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7719 ext_phy_type);
7720
7721 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7722 SUPPORTED_FIBRE |
7723 SUPPORTED_Pause |
7724 SUPPORTED_Asym_Pause);
f1410647
ET
7725 break;
7726
589abe3a
EG
7727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7728 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7729 ext_phy_type);
7730
34f80b04
EG
7731 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7732 SUPPORTED_1000baseT_Full |
7733 SUPPORTED_FIBRE |
34f80b04
EG
7734 SUPPORTED_Pause |
7735 SUPPORTED_Asym_Pause);
f1410647
ET
7736 break;
7737
589abe3a
EG
7738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7739 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7740 ext_phy_type);
7741
34f80b04 7742 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7743 SUPPORTED_1000baseT_Full |
34f80b04 7744 SUPPORTED_Autoneg |
589abe3a 7745 SUPPORTED_FIBRE |
34f80b04
EG
7746 SUPPORTED_Pause |
7747 SUPPORTED_Asym_Pause);
c18487ee
YR
7748 break;
7749
f1410647
ET
7750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7751 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7752 ext_phy_type);
7753
34f80b04
EG
7754 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7755 SUPPORTED_TP |
7756 SUPPORTED_Autoneg |
7757 SUPPORTED_Pause |
7758 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7759 break;
7760
28577185
EG
7761 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7762 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7763 ext_phy_type);
7764
7765 bp->port.supported |= (SUPPORTED_10baseT_Half |
7766 SUPPORTED_10baseT_Full |
7767 SUPPORTED_100baseT_Half |
7768 SUPPORTED_100baseT_Full |
7769 SUPPORTED_1000baseT_Full |
7770 SUPPORTED_10000baseT_Full |
7771 SUPPORTED_TP |
7772 SUPPORTED_Autoneg |
7773 SUPPORTED_Pause |
7774 SUPPORTED_Asym_Pause);
7775 break;
7776
c18487ee
YR
7777 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7778 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7779 bp->link_params.ext_phy_config);
7780 break;
7781
a2fbb9ea
ET
7782 default:
7783 BNX2X_ERR("NVRAM config error. "
7784 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7785 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7786 return;
7787 }
7788
34f80b04
EG
7789 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7790 port*0x18);
7791 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7792
a2fbb9ea
ET
7793 break;
7794
7795 default:
7796 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7797 bp->port.link_config);
a2fbb9ea
ET
7798 return;
7799 }
34f80b04 7800 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7801
7802 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7803 if (!(bp->link_params.speed_cap_mask &
7804 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7805 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7806
c18487ee
YR
7807 if (!(bp->link_params.speed_cap_mask &
7808 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7809 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7810
c18487ee
YR
7811 if (!(bp->link_params.speed_cap_mask &
7812 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7813 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7814
c18487ee
YR
7815 if (!(bp->link_params.speed_cap_mask &
7816 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7817 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7818
c18487ee
YR
7819 if (!(bp->link_params.speed_cap_mask &
7820 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7821 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7822 SUPPORTED_1000baseT_Full);
a2fbb9ea 7823
c18487ee
YR
7824 if (!(bp->link_params.speed_cap_mask &
7825 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7826 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7827
c18487ee
YR
7828 if (!(bp->link_params.speed_cap_mask &
7829 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7830 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7831
34f80b04 7832 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7833}
7834
34f80b04 7835static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7836{
c18487ee 7837 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7838
34f80b04 7839 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7840 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7841 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7842 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7843 bp->port.advertising = bp->port.supported;
a2fbb9ea 7844 } else {
c18487ee
YR
7845 u32 ext_phy_type =
7846 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7847
7848 if ((ext_phy_type ==
7849 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7850 (ext_phy_type ==
7851 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7852 /* force 10G, no AN */
c18487ee 7853 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7854 bp->port.advertising =
a2fbb9ea
ET
7855 (ADVERTISED_10000baseT_Full |
7856 ADVERTISED_FIBRE);
7857 break;
7858 }
7859 BNX2X_ERR("NVRAM config error. "
7860 "Invalid link_config 0x%x"
7861 " Autoneg not supported\n",
34f80b04 7862 bp->port.link_config);
a2fbb9ea
ET
7863 return;
7864 }
7865 break;
7866
7867 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7868 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7869 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7870 bp->port.advertising = (ADVERTISED_10baseT_Full |
7871 ADVERTISED_TP);
a2fbb9ea
ET
7872 } else {
7873 BNX2X_ERR("NVRAM config error. "
7874 "Invalid link_config 0x%x"
7875 " speed_cap_mask 0x%x\n",
34f80b04 7876 bp->port.link_config,
c18487ee 7877 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7878 return;
7879 }
7880 break;
7881
7882 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7883 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7884 bp->link_params.req_line_speed = SPEED_10;
7885 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7886 bp->port.advertising = (ADVERTISED_10baseT_Half |
7887 ADVERTISED_TP);
a2fbb9ea
ET
7888 } else {
7889 BNX2X_ERR("NVRAM config error. "
7890 "Invalid link_config 0x%x"
7891 " speed_cap_mask 0x%x\n",
34f80b04 7892 bp->port.link_config,
c18487ee 7893 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7894 return;
7895 }
7896 break;
7897
7898 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7899 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7900 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7901 bp->port.advertising = (ADVERTISED_100baseT_Full |
7902 ADVERTISED_TP);
a2fbb9ea
ET
7903 } else {
7904 BNX2X_ERR("NVRAM config error. "
7905 "Invalid link_config 0x%x"
7906 " speed_cap_mask 0x%x\n",
34f80b04 7907 bp->port.link_config,
c18487ee 7908 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7909 return;
7910 }
7911 break;
7912
7913 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7914 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7915 bp->link_params.req_line_speed = SPEED_100;
7916 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7917 bp->port.advertising = (ADVERTISED_100baseT_Half |
7918 ADVERTISED_TP);
a2fbb9ea
ET
7919 } else {
7920 BNX2X_ERR("NVRAM config error. "
7921 "Invalid link_config 0x%x"
7922 " speed_cap_mask 0x%x\n",
34f80b04 7923 bp->port.link_config,
c18487ee 7924 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7925 return;
7926 }
7927 break;
7928
7929 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7930 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7931 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7932 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7933 ADVERTISED_TP);
a2fbb9ea
ET
7934 } else {
7935 BNX2X_ERR("NVRAM config error. "
7936 "Invalid link_config 0x%x"
7937 " speed_cap_mask 0x%x\n",
34f80b04 7938 bp->port.link_config,
c18487ee 7939 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7940 return;
7941 }
7942 break;
7943
7944 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7945 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7946 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7947 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7948 ADVERTISED_TP);
a2fbb9ea
ET
7949 } else {
7950 BNX2X_ERR("NVRAM config error. "
7951 "Invalid link_config 0x%x"
7952 " speed_cap_mask 0x%x\n",
34f80b04 7953 bp->port.link_config,
c18487ee 7954 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7955 return;
7956 }
7957 break;
7958
7959 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7960 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7961 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7962 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7963 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7964 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7965 ADVERTISED_FIBRE);
a2fbb9ea
ET
7966 } else {
7967 BNX2X_ERR("NVRAM config error. "
7968 "Invalid link_config 0x%x"
7969 " speed_cap_mask 0x%x\n",
34f80b04 7970 bp->port.link_config,
c18487ee 7971 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7972 return;
7973 }
7974 break;
7975
7976 default:
7977 BNX2X_ERR("NVRAM config error. "
7978 "BAD link speed link_config 0x%x\n",
34f80b04 7979 bp->port.link_config);
c18487ee 7980 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7981 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7982 break;
7983 }
a2fbb9ea 7984
34f80b04
EG
7985 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7986 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7987 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7988 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7989 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7990
c18487ee 7991 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7992 " advertising 0x%x\n",
c18487ee
YR
7993 bp->link_params.req_line_speed,
7994 bp->link_params.req_duplex,
34f80b04 7995 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7996}
7997
34f80b04 7998static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7999{
34f80b04
EG
8000 int port = BP_PORT(bp);
8001 u32 val, val2;
589abe3a 8002 u32 config;
c2c8b03e 8003 u16 i;
a2fbb9ea 8004
c18487ee 8005 bp->link_params.bp = bp;
34f80b04 8006 bp->link_params.port = port;
c18487ee 8007
c18487ee 8008 bp->link_params.lane_config =
a2fbb9ea 8009 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8010 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8011 SHMEM_RD(bp,
8012 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8013 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8014 SHMEM_RD(bp,
8015 dev_info.port_hw_config[port].speed_capability_mask);
8016
34f80b04 8017 bp->port.link_config =
a2fbb9ea
ET
8018 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8019
c2c8b03e
EG
8020 /* Get the 4 lanes xgxs config rx and tx */
8021 for (i = 0; i < 2; i++) {
8022 val = SHMEM_RD(bp,
8023 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8024 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8025 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8026
8027 val = SHMEM_RD(bp,
8028 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8029 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8030 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8031 }
8032
589abe3a
EG
8033 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8034 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8035 bp->link_params.feature_config_flags |=
8036 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8037 else
8038 bp->link_params.feature_config_flags &=
8039 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8040
3ce2c3f9
EG
8041 /* If the device is capable of WoL, set the default state according
8042 * to the HW
8043 */
8044 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8045 (config & PORT_FEATURE_WOL_ENABLED));
8046
c2c8b03e
EG
8047 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8048 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8049 bp->link_params.lane_config,
8050 bp->link_params.ext_phy_config,
34f80b04 8051 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8052
34f80b04 8053 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8054 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8055 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8056
8057 bnx2x_link_settings_requested(bp);
8058
8059 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8060 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8061 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8062 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8063 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8064 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8065 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8066 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8067 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8068 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8069}
8070
8071static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8072{
8073 int func = BP_FUNC(bp);
8074 u32 val, val2;
8075 int rc = 0;
a2fbb9ea 8076
34f80b04 8077 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8078
34f80b04
EG
8079 bp->e1hov = 0;
8080 bp->e1hmf = 0;
8081 if (CHIP_IS_E1H(bp)) {
8082 bp->mf_config =
8083 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8084
3196a88a
EG
8085 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8086 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8087 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8088
34f80b04
EG
8089 bp->e1hov = val;
8090 bp->e1hmf = 1;
8091 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8092 "(0x%04x)\n",
8093 func, bp->e1hov, bp->e1hov);
8094 } else {
f5372251 8095 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8096 if (BP_E1HVN(bp)) {
8097 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8098 " aborting\n", func);
8099 rc = -EPERM;
8100 }
8101 }
8102 }
a2fbb9ea 8103
34f80b04
EG
8104 if (!BP_NOMCP(bp)) {
8105 bnx2x_get_port_hwinfo(bp);
8106
8107 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8108 DRV_MSG_SEQ_NUMBER_MASK);
8109 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8110 }
8111
8112 if (IS_E1HMF(bp)) {
8113 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8114 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8115 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8116 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8117 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8118 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8119 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8120 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8121 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8122 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8123 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8124 ETH_ALEN);
8125 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8126 ETH_ALEN);
a2fbb9ea 8127 }
34f80b04
EG
8128
8129 return rc;
a2fbb9ea
ET
8130 }
8131
34f80b04
EG
8132 if (BP_NOMCP(bp)) {
8133 /* only supposed to happen on emulation/FPGA */
33471629 8134 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8135 random_ether_addr(bp->dev->dev_addr);
8136 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8137 }
a2fbb9ea 8138
34f80b04
EG
8139 return rc;
8140}
8141
8142static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8143{
8144 int func = BP_FUNC(bp);
87942b46 8145 int timer_interval;
34f80b04
EG
8146 int rc;
8147
da5a662a
VZ
8148 /* Disable interrupt handling until HW is initialized */
8149 atomic_set(&bp->intr_sem, 1);
8150
34f80b04 8151 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8152
1cf167f2 8153 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8154 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8155
8156 rc = bnx2x_get_hwinfo(bp);
8157
8158 /* need to reset chip if undi was active */
8159 if (!BP_NOMCP(bp))
8160 bnx2x_undi_unload(bp);
8161
8162 if (CHIP_REV_IS_FPGA(bp))
8163 printk(KERN_ERR PFX "FPGA detected\n");
8164
8165 if (BP_NOMCP(bp) && (func == 0))
8166 printk(KERN_ERR PFX
8167 "MCP disabled, must load devices in order!\n");
8168
555f6c78 8169 /* Set multi queue mode */
8badd27a
EG
8170 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8171 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8172 printk(KERN_ERR PFX
8badd27a 8173 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8174 multi_mode = ETH_RSS_MODE_DISABLED;
8175 }
8176 bp->multi_mode = multi_mode;
8177
8178
7a9b2557
VZ
8179 /* Set TPA flags */
8180 if (disable_tpa) {
8181 bp->flags &= ~TPA_ENABLE_FLAG;
8182 bp->dev->features &= ~NETIF_F_LRO;
8183 } else {
8184 bp->flags |= TPA_ENABLE_FLAG;
8185 bp->dev->features |= NETIF_F_LRO;
8186 }
8187
8d5726c4 8188 bp->mrrs = mrrs;
7a9b2557 8189
34f80b04
EG
8190 bp->tx_ring_size = MAX_TX_AVAIL;
8191 bp->rx_ring_size = MAX_RX_AVAIL;
8192
8193 bp->rx_csum = 1;
34f80b04
EG
8194
8195 bp->tx_ticks = 50;
8196 bp->rx_ticks = 25;
8197
87942b46
EG
8198 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8199 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8200
8201 init_timer(&bp->timer);
8202 bp->timer.expires = jiffies + bp->current_interval;
8203 bp->timer.data = (unsigned long) bp;
8204 bp->timer.function = bnx2x_timer;
8205
8206 return rc;
a2fbb9ea
ET
8207}
8208
8209/*
8210 * ethtool service functions
8211 */
8212
8213/* All ethtool functions called with rtnl_lock */
8214
8215static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8216{
8217 struct bnx2x *bp = netdev_priv(dev);
8218
34f80b04
EG
8219 cmd->supported = bp->port.supported;
8220 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8221
8222 if (netif_carrier_ok(dev)) {
c18487ee
YR
8223 cmd->speed = bp->link_vars.line_speed;
8224 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8225 } else {
c18487ee
YR
8226 cmd->speed = bp->link_params.req_line_speed;
8227 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8228 }
34f80b04
EG
8229 if (IS_E1HMF(bp)) {
8230 u16 vn_max_rate;
8231
8232 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8233 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8234 if (vn_max_rate < cmd->speed)
8235 cmd->speed = vn_max_rate;
8236 }
a2fbb9ea 8237
c18487ee
YR
8238 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8239 u32 ext_phy_type =
8240 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8241
8242 switch (ext_phy_type) {
8243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8244 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8245 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8246 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8247 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8248 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8249 cmd->port = PORT_FIBRE;
8250 break;
8251
8252 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8253 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8254 cmd->port = PORT_TP;
8255 break;
8256
c18487ee
YR
8257 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8258 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8259 bp->link_params.ext_phy_config);
8260 break;
8261
f1410647
ET
8262 default:
8263 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8264 bp->link_params.ext_phy_config);
8265 break;
f1410647
ET
8266 }
8267 } else
a2fbb9ea 8268 cmd->port = PORT_TP;
a2fbb9ea 8269
34f80b04 8270 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8271 cmd->transceiver = XCVR_INTERNAL;
8272
c18487ee 8273 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8274 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8275 else
a2fbb9ea 8276 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8277
8278 cmd->maxtxpkt = 0;
8279 cmd->maxrxpkt = 0;
8280
8281 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8282 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8283 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8284 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8285 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8286 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8287 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8288
8289 return 0;
8290}
8291
8292static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8293{
8294 struct bnx2x *bp = netdev_priv(dev);
8295 u32 advertising;
8296
34f80b04
EG
8297 if (IS_E1HMF(bp))
8298 return 0;
8299
a2fbb9ea
ET
8300 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8301 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8302 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8303 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8304 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8305 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8306 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8307
a2fbb9ea 8308 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8309 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8310 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8311 return -EINVAL;
f1410647 8312 }
a2fbb9ea
ET
8313
8314 /* advertise the requested speed and duplex if supported */
34f80b04 8315 cmd->advertising &= bp->port.supported;
a2fbb9ea 8316
c18487ee
YR
8317 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8318 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8319 bp->port.advertising |= (ADVERTISED_Autoneg |
8320 cmd->advertising);
a2fbb9ea
ET
8321
8322 } else { /* forced speed */
8323 /* advertise the requested speed and duplex if supported */
8324 switch (cmd->speed) {
8325 case SPEED_10:
8326 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8327 if (!(bp->port.supported &
f1410647
ET
8328 SUPPORTED_10baseT_Full)) {
8329 DP(NETIF_MSG_LINK,
8330 "10M full not supported\n");
a2fbb9ea 8331 return -EINVAL;
f1410647 8332 }
a2fbb9ea
ET
8333
8334 advertising = (ADVERTISED_10baseT_Full |
8335 ADVERTISED_TP);
8336 } else {
34f80b04 8337 if (!(bp->port.supported &
f1410647
ET
8338 SUPPORTED_10baseT_Half)) {
8339 DP(NETIF_MSG_LINK,
8340 "10M half not supported\n");
a2fbb9ea 8341 return -EINVAL;
f1410647 8342 }
a2fbb9ea
ET
8343
8344 advertising = (ADVERTISED_10baseT_Half |
8345 ADVERTISED_TP);
8346 }
8347 break;
8348
8349 case SPEED_100:
8350 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8351 if (!(bp->port.supported &
f1410647
ET
8352 SUPPORTED_100baseT_Full)) {
8353 DP(NETIF_MSG_LINK,
8354 "100M full not supported\n");
a2fbb9ea 8355 return -EINVAL;
f1410647 8356 }
a2fbb9ea
ET
8357
8358 advertising = (ADVERTISED_100baseT_Full |
8359 ADVERTISED_TP);
8360 } else {
34f80b04 8361 if (!(bp->port.supported &
f1410647
ET
8362 SUPPORTED_100baseT_Half)) {
8363 DP(NETIF_MSG_LINK,
8364 "100M half not supported\n");
a2fbb9ea 8365 return -EINVAL;
f1410647 8366 }
a2fbb9ea
ET
8367
8368 advertising = (ADVERTISED_100baseT_Half |
8369 ADVERTISED_TP);
8370 }
8371 break;
8372
8373 case SPEED_1000:
f1410647
ET
8374 if (cmd->duplex != DUPLEX_FULL) {
8375 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8376 return -EINVAL;
f1410647 8377 }
a2fbb9ea 8378
34f80b04 8379 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8380 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8381 return -EINVAL;
f1410647 8382 }
a2fbb9ea
ET
8383
8384 advertising = (ADVERTISED_1000baseT_Full |
8385 ADVERTISED_TP);
8386 break;
8387
8388 case SPEED_2500:
f1410647
ET
8389 if (cmd->duplex != DUPLEX_FULL) {
8390 DP(NETIF_MSG_LINK,
8391 "2.5G half not supported\n");
a2fbb9ea 8392 return -EINVAL;
f1410647 8393 }
a2fbb9ea 8394
34f80b04 8395 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8396 DP(NETIF_MSG_LINK,
8397 "2.5G full not supported\n");
a2fbb9ea 8398 return -EINVAL;
f1410647 8399 }
a2fbb9ea 8400
f1410647 8401 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8402 ADVERTISED_TP);
8403 break;
8404
8405 case SPEED_10000:
f1410647
ET
8406 if (cmd->duplex != DUPLEX_FULL) {
8407 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8408 return -EINVAL;
f1410647 8409 }
a2fbb9ea 8410
34f80b04 8411 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8412 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8413 return -EINVAL;
f1410647 8414 }
a2fbb9ea
ET
8415
8416 advertising = (ADVERTISED_10000baseT_Full |
8417 ADVERTISED_FIBRE);
8418 break;
8419
8420 default:
f1410647 8421 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8422 return -EINVAL;
8423 }
8424
c18487ee
YR
8425 bp->link_params.req_line_speed = cmd->speed;
8426 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8427 bp->port.advertising = advertising;
a2fbb9ea
ET
8428 }
8429
c18487ee 8430 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8431 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8432 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8433 bp->port.advertising);
a2fbb9ea 8434
34f80b04 8435 if (netif_running(dev)) {
bb2a0f7a 8436 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8437 bnx2x_link_set(bp);
8438 }
a2fbb9ea
ET
8439
8440 return 0;
8441}
8442
c18487ee
YR
8443#define PHY_FW_VER_LEN 10
8444
a2fbb9ea
ET
8445static void bnx2x_get_drvinfo(struct net_device *dev,
8446 struct ethtool_drvinfo *info)
8447{
8448 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8449 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8450
8451 strcpy(info->driver, DRV_MODULE_NAME);
8452 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8453
8454 phy_fw_ver[0] = '\0';
34f80b04 8455 if (bp->port.pmf) {
4a37fb66 8456 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8457 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8458 (bp->state != BNX2X_STATE_CLOSED),
8459 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8460 bnx2x_release_phy_lock(bp);
34f80b04 8461 }
c18487ee 8462
f0e53a84
EG
8463 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8464 (bp->common.bc_ver & 0xff0000) >> 16,
8465 (bp->common.bc_ver & 0xff00) >> 8,
8466 (bp->common.bc_ver & 0xff),
8467 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8468 strcpy(info->bus_info, pci_name(bp->pdev));
8469 info->n_stats = BNX2X_NUM_STATS;
8470 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8471 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8472 info->regdump_len = 0;
8473}
8474
0a64ea57
EG
8475#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8476#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8477
8478static int bnx2x_get_regs_len(struct net_device *dev)
8479{
8480 static u32 regdump_len;
8481 struct bnx2x *bp = netdev_priv(dev);
8482 int i;
8483
8484 if (regdump_len)
8485 return regdump_len;
8486
8487 if (CHIP_IS_E1(bp)) {
8488 for (i = 0; i < REGS_COUNT; i++)
8489 if (IS_E1_ONLINE(reg_addrs[i].info))
8490 regdump_len += reg_addrs[i].size;
8491
8492 for (i = 0; i < WREGS_COUNT_E1; i++)
8493 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8494 regdump_len += wreg_addrs_e1[i].size *
8495 (1 + wreg_addrs_e1[i].read_regs_count);
8496
8497 } else { /* E1H */
8498 for (i = 0; i < REGS_COUNT; i++)
8499 if (IS_E1H_ONLINE(reg_addrs[i].info))
8500 regdump_len += reg_addrs[i].size;
8501
8502 for (i = 0; i < WREGS_COUNT_E1H; i++)
8503 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8504 regdump_len += wreg_addrs_e1h[i].size *
8505 (1 + wreg_addrs_e1h[i].read_regs_count);
8506 }
8507 regdump_len *= 4;
8508 regdump_len += sizeof(struct dump_hdr);
8509
8510 return regdump_len;
8511}
8512
8513static void bnx2x_get_regs(struct net_device *dev,
8514 struct ethtool_regs *regs, void *_p)
8515{
8516 u32 *p = _p, i, j;
8517 struct bnx2x *bp = netdev_priv(dev);
8518 struct dump_hdr dump_hdr = {0};
8519
8520 regs->version = 0;
8521 memset(p, 0, regs->len);
8522
8523 if (!netif_running(bp->dev))
8524 return;
8525
8526 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8527 dump_hdr.dump_sign = dump_sign_all;
8528 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8529 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8530 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8531 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8532 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8533
8534 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8535 p += dump_hdr.hdr_size + 1;
8536
8537 if (CHIP_IS_E1(bp)) {
8538 for (i = 0; i < REGS_COUNT; i++)
8539 if (IS_E1_ONLINE(reg_addrs[i].info))
8540 for (j = 0; j < reg_addrs[i].size; j++)
8541 *p++ = REG_RD(bp,
8542 reg_addrs[i].addr + j*4);
8543
8544 } else { /* E1H */
8545 for (i = 0; i < REGS_COUNT; i++)
8546 if (IS_E1H_ONLINE(reg_addrs[i].info))
8547 for (j = 0; j < reg_addrs[i].size; j++)
8548 *p++ = REG_RD(bp,
8549 reg_addrs[i].addr + j*4);
8550 }
8551}
8552
a2fbb9ea
ET
8553static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8554{
8555 struct bnx2x *bp = netdev_priv(dev);
8556
8557 if (bp->flags & NO_WOL_FLAG) {
8558 wol->supported = 0;
8559 wol->wolopts = 0;
8560 } else {
8561 wol->supported = WAKE_MAGIC;
8562 if (bp->wol)
8563 wol->wolopts = WAKE_MAGIC;
8564 else
8565 wol->wolopts = 0;
8566 }
8567 memset(&wol->sopass, 0, sizeof(wol->sopass));
8568}
8569
8570static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8571{
8572 struct bnx2x *bp = netdev_priv(dev);
8573
8574 if (wol->wolopts & ~WAKE_MAGIC)
8575 return -EINVAL;
8576
8577 if (wol->wolopts & WAKE_MAGIC) {
8578 if (bp->flags & NO_WOL_FLAG)
8579 return -EINVAL;
8580
8581 bp->wol = 1;
34f80b04 8582 } else
a2fbb9ea 8583 bp->wol = 0;
34f80b04 8584
a2fbb9ea
ET
8585 return 0;
8586}
8587
8588static u32 bnx2x_get_msglevel(struct net_device *dev)
8589{
8590 struct bnx2x *bp = netdev_priv(dev);
8591
8592 return bp->msglevel;
8593}
8594
8595static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8596{
8597 struct bnx2x *bp = netdev_priv(dev);
8598
8599 if (capable(CAP_NET_ADMIN))
8600 bp->msglevel = level;
8601}
8602
8603static int bnx2x_nway_reset(struct net_device *dev)
8604{
8605 struct bnx2x *bp = netdev_priv(dev);
8606
34f80b04
EG
8607 if (!bp->port.pmf)
8608 return 0;
a2fbb9ea 8609
34f80b04 8610 if (netif_running(dev)) {
bb2a0f7a 8611 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8612 bnx2x_link_set(bp);
8613 }
a2fbb9ea
ET
8614
8615 return 0;
8616}
8617
8618static int bnx2x_get_eeprom_len(struct net_device *dev)
8619{
8620 struct bnx2x *bp = netdev_priv(dev);
8621
34f80b04 8622 return bp->common.flash_size;
a2fbb9ea
ET
8623}
8624
8625static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8626{
34f80b04 8627 int port = BP_PORT(bp);
a2fbb9ea
ET
8628 int count, i;
8629 u32 val = 0;
8630
8631 /* adjust timeout for emulation/FPGA */
8632 count = NVRAM_TIMEOUT_COUNT;
8633 if (CHIP_REV_IS_SLOW(bp))
8634 count *= 100;
8635
8636 /* request access to nvram interface */
8637 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8638 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8639
8640 for (i = 0; i < count*10; i++) {
8641 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8642 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8643 break;
8644
8645 udelay(5);
8646 }
8647
8648 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8649 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8650 return -EBUSY;
8651 }
8652
8653 return 0;
8654}
8655
8656static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8657{
34f80b04 8658 int port = BP_PORT(bp);
a2fbb9ea
ET
8659 int count, i;
8660 u32 val = 0;
8661
8662 /* adjust timeout for emulation/FPGA */
8663 count = NVRAM_TIMEOUT_COUNT;
8664 if (CHIP_REV_IS_SLOW(bp))
8665 count *= 100;
8666
8667 /* relinquish nvram interface */
8668 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8669 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8670
8671 for (i = 0; i < count*10; i++) {
8672 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8673 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8674 break;
8675
8676 udelay(5);
8677 }
8678
8679 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8680 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8681 return -EBUSY;
8682 }
8683
8684 return 0;
8685}
8686
8687static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8688{
8689 u32 val;
8690
8691 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8692
8693 /* enable both bits, even on read */
8694 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8695 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8696 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8697}
8698
8699static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8700{
8701 u32 val;
8702
8703 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8704
8705 /* disable both bits, even after read */
8706 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8707 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8708 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8709}
8710
4781bfad 8711static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8712 u32 cmd_flags)
8713{
f1410647 8714 int count, i, rc;
a2fbb9ea
ET
8715 u32 val;
8716
8717 /* build the command word */
8718 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8719
8720 /* need to clear DONE bit separately */
8721 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8722
8723 /* address of the NVRAM to read from */
8724 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8725 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8726
8727 /* issue a read command */
8728 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8729
8730 /* adjust timeout for emulation/FPGA */
8731 count = NVRAM_TIMEOUT_COUNT;
8732 if (CHIP_REV_IS_SLOW(bp))
8733 count *= 100;
8734
8735 /* wait for completion */
8736 *ret_val = 0;
8737 rc = -EBUSY;
8738 for (i = 0; i < count; i++) {
8739 udelay(5);
8740 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8741
8742 if (val & MCPR_NVM_COMMAND_DONE) {
8743 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8744 /* we read nvram data in cpu order
8745 * but ethtool sees it as an array of bytes
8746 * converting to big-endian will do the work */
4781bfad 8747 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8748 rc = 0;
8749 break;
8750 }
8751 }
8752
8753 return rc;
8754}
8755
8756static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8757 int buf_size)
8758{
8759 int rc;
8760 u32 cmd_flags;
4781bfad 8761 __be32 val;
a2fbb9ea
ET
8762
8763 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8764 DP(BNX2X_MSG_NVM,
c14423fe 8765 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8766 offset, buf_size);
8767 return -EINVAL;
8768 }
8769
34f80b04
EG
8770 if (offset + buf_size > bp->common.flash_size) {
8771 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8772 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8773 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8774 return -EINVAL;
8775 }
8776
8777 /* request access to nvram interface */
8778 rc = bnx2x_acquire_nvram_lock(bp);
8779 if (rc)
8780 return rc;
8781
8782 /* enable access to nvram interface */
8783 bnx2x_enable_nvram_access(bp);
8784
8785 /* read the first word(s) */
8786 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8787 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8788 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8789 memcpy(ret_buf, &val, 4);
8790
8791 /* advance to the next dword */
8792 offset += sizeof(u32);
8793 ret_buf += sizeof(u32);
8794 buf_size -= sizeof(u32);
8795 cmd_flags = 0;
8796 }
8797
8798 if (rc == 0) {
8799 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8800 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8801 memcpy(ret_buf, &val, 4);
8802 }
8803
8804 /* disable access to nvram interface */
8805 bnx2x_disable_nvram_access(bp);
8806 bnx2x_release_nvram_lock(bp);
8807
8808 return rc;
8809}
8810
8811static int bnx2x_get_eeprom(struct net_device *dev,
8812 struct ethtool_eeprom *eeprom, u8 *eebuf)
8813{
8814 struct bnx2x *bp = netdev_priv(dev);
8815 int rc;
8816
2add3acb
EG
8817 if (!netif_running(dev))
8818 return -EAGAIN;
8819
34f80b04 8820 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8821 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8822 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8823 eeprom->len, eeprom->len);
8824
8825 /* parameters already validated in ethtool_get_eeprom */
8826
8827 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8828
8829 return rc;
8830}
8831
8832static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8833 u32 cmd_flags)
8834{
f1410647 8835 int count, i, rc;
a2fbb9ea
ET
8836
8837 /* build the command word */
8838 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8839
8840 /* need to clear DONE bit separately */
8841 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8842
8843 /* write the data */
8844 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8845
8846 /* address of the NVRAM to write to */
8847 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8848 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8849
8850 /* issue the write command */
8851 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8852
8853 /* adjust timeout for emulation/FPGA */
8854 count = NVRAM_TIMEOUT_COUNT;
8855 if (CHIP_REV_IS_SLOW(bp))
8856 count *= 100;
8857
8858 /* wait for completion */
8859 rc = -EBUSY;
8860 for (i = 0; i < count; i++) {
8861 udelay(5);
8862 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8863 if (val & MCPR_NVM_COMMAND_DONE) {
8864 rc = 0;
8865 break;
8866 }
8867 }
8868
8869 return rc;
8870}
8871
f1410647 8872#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8873
8874static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8875 int buf_size)
8876{
8877 int rc;
8878 u32 cmd_flags;
8879 u32 align_offset;
4781bfad 8880 __be32 val;
a2fbb9ea 8881
34f80b04
EG
8882 if (offset + buf_size > bp->common.flash_size) {
8883 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8884 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8885 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8886 return -EINVAL;
8887 }
8888
8889 /* request access to nvram interface */
8890 rc = bnx2x_acquire_nvram_lock(bp);
8891 if (rc)
8892 return rc;
8893
8894 /* enable access to nvram interface */
8895 bnx2x_enable_nvram_access(bp);
8896
8897 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8898 align_offset = (offset & ~0x03);
8899 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8900
8901 if (rc == 0) {
8902 val &= ~(0xff << BYTE_OFFSET(offset));
8903 val |= (*data_buf << BYTE_OFFSET(offset));
8904
8905 /* nvram data is returned as an array of bytes
8906 * convert it back to cpu order */
8907 val = be32_to_cpu(val);
8908
a2fbb9ea
ET
8909 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8910 cmd_flags);
8911 }
8912
8913 /* disable access to nvram interface */
8914 bnx2x_disable_nvram_access(bp);
8915 bnx2x_release_nvram_lock(bp);
8916
8917 return rc;
8918}
8919
8920static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8921 int buf_size)
8922{
8923 int rc;
8924 u32 cmd_flags;
8925 u32 val;
8926 u32 written_so_far;
8927
34f80b04 8928 if (buf_size == 1) /* ethtool */
a2fbb9ea 8929 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8930
8931 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8932 DP(BNX2X_MSG_NVM,
c14423fe 8933 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8934 offset, buf_size);
8935 return -EINVAL;
8936 }
8937
34f80b04
EG
8938 if (offset + buf_size > bp->common.flash_size) {
8939 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8940 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8941 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8942 return -EINVAL;
8943 }
8944
8945 /* request access to nvram interface */
8946 rc = bnx2x_acquire_nvram_lock(bp);
8947 if (rc)
8948 return rc;
8949
8950 /* enable access to nvram interface */
8951 bnx2x_enable_nvram_access(bp);
8952
8953 written_so_far = 0;
8954 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8955 while ((written_so_far < buf_size) && (rc == 0)) {
8956 if (written_so_far == (buf_size - sizeof(u32)))
8957 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8958 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8959 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8960 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8961 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8962
8963 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8964
8965 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8966
8967 /* advance to the next dword */
8968 offset += sizeof(u32);
8969 data_buf += sizeof(u32);
8970 written_so_far += sizeof(u32);
8971 cmd_flags = 0;
8972 }
8973
8974 /* disable access to nvram interface */
8975 bnx2x_disable_nvram_access(bp);
8976 bnx2x_release_nvram_lock(bp);
8977
8978 return rc;
8979}
8980
8981static int bnx2x_set_eeprom(struct net_device *dev,
8982 struct ethtool_eeprom *eeprom, u8 *eebuf)
8983{
8984 struct bnx2x *bp = netdev_priv(dev);
8985 int rc;
8986
9f4c9583
EG
8987 if (!netif_running(dev))
8988 return -EAGAIN;
8989
34f80b04 8990 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8991 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8992 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8993 eeprom->len, eeprom->len);
8994
8995 /* parameters already validated in ethtool_set_eeprom */
8996
c18487ee 8997 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8998 if (eeprom->magic == 0x00504859)
8999 if (bp->port.pmf) {
9000
4a37fb66 9001 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9002 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9003 bp->link_params.ext_phy_config,
9004 (bp->state != BNX2X_STATE_CLOSED),
9005 eebuf, eeprom->len);
bb2a0f7a
YG
9006 if ((bp->state == BNX2X_STATE_OPEN) ||
9007 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9008 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9009 &bp->link_vars, 1);
34f80b04
EG
9010 rc |= bnx2x_phy_init(&bp->link_params,
9011 &bp->link_vars);
bb2a0f7a 9012 }
4a37fb66 9013 bnx2x_release_phy_lock(bp);
34f80b04
EG
9014
9015 } else /* Only the PMF can access the PHY */
9016 return -EINVAL;
9017 else
c18487ee 9018 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9019
9020 return rc;
9021}
9022
9023static int bnx2x_get_coalesce(struct net_device *dev,
9024 struct ethtool_coalesce *coal)
9025{
9026 struct bnx2x *bp = netdev_priv(dev);
9027
9028 memset(coal, 0, sizeof(struct ethtool_coalesce));
9029
9030 coal->rx_coalesce_usecs = bp->rx_ticks;
9031 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9032
9033 return 0;
9034}
9035
9036static int bnx2x_set_coalesce(struct net_device *dev,
9037 struct ethtool_coalesce *coal)
9038{
9039 struct bnx2x *bp = netdev_priv(dev);
9040
9041 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9042 if (bp->rx_ticks > 3000)
9043 bp->rx_ticks = 3000;
9044
9045 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9046 if (bp->tx_ticks > 0x3000)
9047 bp->tx_ticks = 0x3000;
9048
34f80b04 9049 if (netif_running(dev))
a2fbb9ea
ET
9050 bnx2x_update_coalesce(bp);
9051
9052 return 0;
9053}
9054
9055static void bnx2x_get_ringparam(struct net_device *dev,
9056 struct ethtool_ringparam *ering)
9057{
9058 struct bnx2x *bp = netdev_priv(dev);
9059
9060 ering->rx_max_pending = MAX_RX_AVAIL;
9061 ering->rx_mini_max_pending = 0;
9062 ering->rx_jumbo_max_pending = 0;
9063
9064 ering->rx_pending = bp->rx_ring_size;
9065 ering->rx_mini_pending = 0;
9066 ering->rx_jumbo_pending = 0;
9067
9068 ering->tx_max_pending = MAX_TX_AVAIL;
9069 ering->tx_pending = bp->tx_ring_size;
9070}
9071
9072static int bnx2x_set_ringparam(struct net_device *dev,
9073 struct ethtool_ringparam *ering)
9074{
9075 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9076 int rc = 0;
a2fbb9ea
ET
9077
9078 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9079 (ering->tx_pending > MAX_TX_AVAIL) ||
9080 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9081 return -EINVAL;
9082
9083 bp->rx_ring_size = ering->rx_pending;
9084 bp->tx_ring_size = ering->tx_pending;
9085
34f80b04
EG
9086 if (netif_running(dev)) {
9087 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9088 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9089 }
9090
34f80b04 9091 return rc;
a2fbb9ea
ET
9092}
9093
9094static void bnx2x_get_pauseparam(struct net_device *dev,
9095 struct ethtool_pauseparam *epause)
9096{
9097 struct bnx2x *bp = netdev_priv(dev);
9098
356e2385
EG
9099 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9100 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9101 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9102
c0700f90
DM
9103 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9104 BNX2X_FLOW_CTRL_RX);
9105 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9106 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9107
9108 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9109 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9110 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9111}
9112
9113static int bnx2x_set_pauseparam(struct net_device *dev,
9114 struct ethtool_pauseparam *epause)
9115{
9116 struct bnx2x *bp = netdev_priv(dev);
9117
34f80b04
EG
9118 if (IS_E1HMF(bp))
9119 return 0;
9120
a2fbb9ea
ET
9121 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9122 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9123 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9124
c0700f90 9125 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9126
f1410647 9127 if (epause->rx_pause)
c0700f90 9128 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9129
f1410647 9130 if (epause->tx_pause)
c0700f90 9131 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9132
c0700f90
DM
9133 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9134 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9135
c18487ee 9136 if (epause->autoneg) {
34f80b04 9137 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9138 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9139 return -EINVAL;
9140 }
a2fbb9ea 9141
c18487ee 9142 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9143 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9144 }
a2fbb9ea 9145
c18487ee
YR
9146 DP(NETIF_MSG_LINK,
9147 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9148
9149 if (netif_running(dev)) {
bb2a0f7a 9150 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9151 bnx2x_link_set(bp);
9152 }
a2fbb9ea
ET
9153
9154 return 0;
9155}
9156
df0f2343
VZ
9157static int bnx2x_set_flags(struct net_device *dev, u32 data)
9158{
9159 struct bnx2x *bp = netdev_priv(dev);
9160 int changed = 0;
9161 int rc = 0;
9162
9163 /* TPA requires Rx CSUM offloading */
9164 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9165 if (!(dev->features & NETIF_F_LRO)) {
9166 dev->features |= NETIF_F_LRO;
9167 bp->flags |= TPA_ENABLE_FLAG;
9168 changed = 1;
9169 }
9170
9171 } else if (dev->features & NETIF_F_LRO) {
9172 dev->features &= ~NETIF_F_LRO;
9173 bp->flags &= ~TPA_ENABLE_FLAG;
9174 changed = 1;
9175 }
9176
9177 if (changed && netif_running(dev)) {
9178 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9179 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9180 }
9181
9182 return rc;
9183}
9184
a2fbb9ea
ET
9185static u32 bnx2x_get_rx_csum(struct net_device *dev)
9186{
9187 struct bnx2x *bp = netdev_priv(dev);
9188
9189 return bp->rx_csum;
9190}
9191
9192static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9193{
9194 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9195 int rc = 0;
a2fbb9ea
ET
9196
9197 bp->rx_csum = data;
df0f2343
VZ
9198
9199 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9200 TPA'ed packets will be discarded due to wrong TCP CSUM */
9201 if (!data) {
9202 u32 flags = ethtool_op_get_flags(dev);
9203
9204 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9205 }
9206
9207 return rc;
a2fbb9ea
ET
9208}
9209
9210static int bnx2x_set_tso(struct net_device *dev, u32 data)
9211{
755735eb 9212 if (data) {
a2fbb9ea 9213 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9214 dev->features |= NETIF_F_TSO6;
9215 } else {
a2fbb9ea 9216 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9217 dev->features &= ~NETIF_F_TSO6;
9218 }
9219
a2fbb9ea
ET
9220 return 0;
9221}
9222
f3c87cdd 9223static const struct {
a2fbb9ea
ET
9224 char string[ETH_GSTRING_LEN];
9225} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9226 { "register_test (offline)" },
9227 { "memory_test (offline)" },
9228 { "loopback_test (offline)" },
9229 { "nvram_test (online)" },
9230 { "interrupt_test (online)" },
9231 { "link_test (online)" },
d3d4f495 9232 { "idle check (online)" }
a2fbb9ea
ET
9233};
9234
9235static int bnx2x_self_test_count(struct net_device *dev)
9236{
9237 return BNX2X_NUM_TESTS;
9238}
9239
f3c87cdd
YG
9240static int bnx2x_test_registers(struct bnx2x *bp)
9241{
9242 int idx, i, rc = -ENODEV;
9243 u32 wr_val = 0;
9dabc424 9244 int port = BP_PORT(bp);
f3c87cdd
YG
9245 static const struct {
9246 u32 offset0;
9247 u32 offset1;
9248 u32 mask;
9249 } reg_tbl[] = {
9250/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9251 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9252 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9253 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9254 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9255 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9256 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9257 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9258 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9259 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9260/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9261 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9262 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9263 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9264 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9265 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9266 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9267 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9268 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9269 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9270/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9271 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9272 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9273 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9274 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9275 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9276 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9277 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9278 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9279 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9280/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9281 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9282 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9283 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9284 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9285 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9286 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9287 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9288
9289 { 0xffffffff, 0, 0x00000000 }
9290 };
9291
9292 if (!netif_running(bp->dev))
9293 return rc;
9294
9295 /* Repeat the test twice:
9296 First by writing 0x00000000, second by writing 0xffffffff */
9297 for (idx = 0; idx < 2; idx++) {
9298
9299 switch (idx) {
9300 case 0:
9301 wr_val = 0;
9302 break;
9303 case 1:
9304 wr_val = 0xffffffff;
9305 break;
9306 }
9307
9308 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9309 u32 offset, mask, save_val, val;
f3c87cdd
YG
9310
9311 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9312 mask = reg_tbl[i].mask;
9313
9314 save_val = REG_RD(bp, offset);
9315
9316 REG_WR(bp, offset, wr_val);
9317 val = REG_RD(bp, offset);
9318
9319 /* Restore the original register's value */
9320 REG_WR(bp, offset, save_val);
9321
9322 /* verify that value is as expected value */
9323 if ((val & mask) != (wr_val & mask))
9324 goto test_reg_exit;
9325 }
9326 }
9327
9328 rc = 0;
9329
9330test_reg_exit:
9331 return rc;
9332}
9333
9334static int bnx2x_test_memory(struct bnx2x *bp)
9335{
9336 int i, j, rc = -ENODEV;
9337 u32 val;
9338 static const struct {
9339 u32 offset;
9340 int size;
9341 } mem_tbl[] = {
9342 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9343 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9344 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9345 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9346 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9347 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9348 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9349
9350 { 0xffffffff, 0 }
9351 };
9352 static const struct {
9353 char *name;
9354 u32 offset;
9dabc424
YG
9355 u32 e1_mask;
9356 u32 e1h_mask;
f3c87cdd 9357 } prty_tbl[] = {
9dabc424
YG
9358 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9359 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9360 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9361 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9362 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9363 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9364
9365 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9366 };
9367
9368 if (!netif_running(bp->dev))
9369 return rc;
9370
9371 /* Go through all the memories */
9372 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9373 for (j = 0; j < mem_tbl[i].size; j++)
9374 REG_RD(bp, mem_tbl[i].offset + j*4);
9375
9376 /* Check the parity status */
9377 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9378 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9379 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9380 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9381 DP(NETIF_MSG_HW,
9382 "%s is 0x%x\n", prty_tbl[i].name, val);
9383 goto test_mem_exit;
9384 }
9385 }
9386
9387 rc = 0;
9388
9389test_mem_exit:
9390 return rc;
9391}
9392
f3c87cdd
YG
9393static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9394{
9395 int cnt = 1000;
9396
9397 if (link_up)
9398 while (bnx2x_link_test(bp) && cnt--)
9399 msleep(10);
9400}
9401
9402static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9403{
9404 unsigned int pkt_size, num_pkts, i;
9405 struct sk_buff *skb;
9406 unsigned char *packet;
9407 struct bnx2x_fastpath *fp = &bp->fp[0];
9408 u16 tx_start_idx, tx_idx;
9409 u16 rx_start_idx, rx_idx;
9410 u16 pkt_prod;
9411 struct sw_tx_bd *tx_buf;
9412 struct eth_tx_bd *tx_bd;
9413 dma_addr_t mapping;
9414 union eth_rx_cqe *cqe;
9415 u8 cqe_fp_flags;
9416 struct sw_rx_bd *rx_buf;
9417 u16 len;
9418 int rc = -ENODEV;
9419
b5bf9068
EG
9420 /* check the loopback mode */
9421 switch (loopback_mode) {
9422 case BNX2X_PHY_LOOPBACK:
9423 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9424 return -EINVAL;
9425 break;
9426 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9427 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9428 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9429 break;
9430 default:
f3c87cdd 9431 return -EINVAL;
b5bf9068 9432 }
f3c87cdd 9433
b5bf9068
EG
9434 /* prepare the loopback packet */
9435 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9436 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9437 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9438 if (!skb) {
9439 rc = -ENOMEM;
9440 goto test_loopback_exit;
9441 }
9442 packet = skb_put(skb, pkt_size);
9443 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9444 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9445 for (i = ETH_HLEN; i < pkt_size; i++)
9446 packet[i] = (unsigned char) (i & 0xff);
9447
b5bf9068 9448 /* send the loopback packet */
f3c87cdd
YG
9449 num_pkts = 0;
9450 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9451 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9452
9453 pkt_prod = fp->tx_pkt_prod++;
9454 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9455 tx_buf->first_bd = fp->tx_bd_prod;
9456 tx_buf->skb = skb;
9457
9458 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9459 mapping = pci_map_single(bp->pdev, skb->data,
9460 skb_headlen(skb), PCI_DMA_TODEVICE);
9461 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9462 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9463 tx_bd->nbd = cpu_to_le16(1);
9464 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9465 tx_bd->vlan = cpu_to_le16(pkt_prod);
9466 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9467 ETH_TX_BD_FLAGS_END_BD);
9468 tx_bd->general_data = ((UNICAST_ADDRESS <<
9469 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9470
58f4c4cf
EG
9471 wmb();
9472
4781bfad 9473 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9474 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9475 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9476 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9477
9478 mmiowb();
9479
9480 num_pkts++;
9481 fp->tx_bd_prod++;
9482 bp->dev->trans_start = jiffies;
9483
9484 udelay(100);
9485
9486 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9487 if (tx_idx != tx_start_idx + num_pkts)
9488 goto test_loopback_exit;
9489
9490 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9491 if (rx_idx != rx_start_idx + num_pkts)
9492 goto test_loopback_exit;
9493
9494 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9495 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9496 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9497 goto test_loopback_rx_exit;
9498
9499 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9500 if (len != pkt_size)
9501 goto test_loopback_rx_exit;
9502
9503 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9504 skb = rx_buf->skb;
9505 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9506 for (i = ETH_HLEN; i < pkt_size; i++)
9507 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9508 goto test_loopback_rx_exit;
9509
9510 rc = 0;
9511
9512test_loopback_rx_exit:
f3c87cdd
YG
9513
9514 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9515 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9516 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9517 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9518
9519 /* Update producers */
9520 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9521 fp->rx_sge_prod);
f3c87cdd
YG
9522
9523test_loopback_exit:
9524 bp->link_params.loopback_mode = LOOPBACK_NONE;
9525
9526 return rc;
9527}
9528
9529static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9530{
b5bf9068 9531 int rc = 0, res;
f3c87cdd
YG
9532
9533 if (!netif_running(bp->dev))
9534 return BNX2X_LOOPBACK_FAILED;
9535
f8ef6e44 9536 bnx2x_netif_stop(bp, 1);
3910c8ae 9537 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9538
b5bf9068
EG
9539 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9540 if (res) {
9541 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9542 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9543 }
9544
b5bf9068
EG
9545 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9546 if (res) {
9547 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9548 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9549 }
9550
3910c8ae 9551 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9552 bnx2x_netif_start(bp);
9553
9554 return rc;
9555}
9556
9557#define CRC32_RESIDUAL 0xdebb20e3
9558
9559static int bnx2x_test_nvram(struct bnx2x *bp)
9560{
9561 static const struct {
9562 int offset;
9563 int size;
9564 } nvram_tbl[] = {
9565 { 0, 0x14 }, /* bootstrap */
9566 { 0x14, 0xec }, /* dir */
9567 { 0x100, 0x350 }, /* manuf_info */
9568 { 0x450, 0xf0 }, /* feature_info */
9569 { 0x640, 0x64 }, /* upgrade_key_info */
9570 { 0x6a4, 0x64 },
9571 { 0x708, 0x70 }, /* manuf_key_info */
9572 { 0x778, 0x70 },
9573 { 0, 0 }
9574 };
4781bfad 9575 __be32 buf[0x350 / 4];
f3c87cdd
YG
9576 u8 *data = (u8 *)buf;
9577 int i, rc;
9578 u32 magic, csum;
9579
9580 rc = bnx2x_nvram_read(bp, 0, data, 4);
9581 if (rc) {
f5372251 9582 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9583 goto test_nvram_exit;
9584 }
9585
9586 magic = be32_to_cpu(buf[0]);
9587 if (magic != 0x669955aa) {
9588 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9589 rc = -ENODEV;
9590 goto test_nvram_exit;
9591 }
9592
9593 for (i = 0; nvram_tbl[i].size; i++) {
9594
9595 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9596 nvram_tbl[i].size);
9597 if (rc) {
9598 DP(NETIF_MSG_PROBE,
f5372251 9599 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9600 goto test_nvram_exit;
9601 }
9602
9603 csum = ether_crc_le(nvram_tbl[i].size, data);
9604 if (csum != CRC32_RESIDUAL) {
9605 DP(NETIF_MSG_PROBE,
9606 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9607 rc = -ENODEV;
9608 goto test_nvram_exit;
9609 }
9610 }
9611
9612test_nvram_exit:
9613 return rc;
9614}
9615
9616static int bnx2x_test_intr(struct bnx2x *bp)
9617{
9618 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9619 int i, rc;
9620
9621 if (!netif_running(bp->dev))
9622 return -ENODEV;
9623
8d9c5f34 9624 config->hdr.length = 0;
af246401
EG
9625 if (CHIP_IS_E1(bp))
9626 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9627 else
9628 config->hdr.offset = BP_FUNC(bp);
0626b899 9629 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9630 config->hdr.reserved1 = 0;
9631
9632 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9633 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9634 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9635 if (rc == 0) {
9636 bp->set_mac_pending++;
9637 for (i = 0; i < 10; i++) {
9638 if (!bp->set_mac_pending)
9639 break;
9640 msleep_interruptible(10);
9641 }
9642 if (i == 10)
9643 rc = -ENODEV;
9644 }
9645
9646 return rc;
9647}
9648
a2fbb9ea
ET
9649static void bnx2x_self_test(struct net_device *dev,
9650 struct ethtool_test *etest, u64 *buf)
9651{
9652 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9653
9654 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9655
f3c87cdd 9656 if (!netif_running(dev))
a2fbb9ea 9657 return;
a2fbb9ea 9658
33471629 9659 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9660 if (IS_E1HMF(bp))
9661 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9662
9663 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9664 u8 link_up;
9665
9666 link_up = bp->link_vars.link_up;
9667 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9668 bnx2x_nic_load(bp, LOAD_DIAG);
9669 /* wait until link state is restored */
9670 bnx2x_wait_for_link(bp, link_up);
9671
9672 if (bnx2x_test_registers(bp) != 0) {
9673 buf[0] = 1;
9674 etest->flags |= ETH_TEST_FL_FAILED;
9675 }
9676 if (bnx2x_test_memory(bp) != 0) {
9677 buf[1] = 1;
9678 etest->flags |= ETH_TEST_FL_FAILED;
9679 }
9680 buf[2] = bnx2x_test_loopback(bp, link_up);
9681 if (buf[2] != 0)
9682 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9683
f3c87cdd
YG
9684 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9685 bnx2x_nic_load(bp, LOAD_NORMAL);
9686 /* wait until link state is restored */
9687 bnx2x_wait_for_link(bp, link_up);
9688 }
9689 if (bnx2x_test_nvram(bp) != 0) {
9690 buf[3] = 1;
a2fbb9ea
ET
9691 etest->flags |= ETH_TEST_FL_FAILED;
9692 }
f3c87cdd
YG
9693 if (bnx2x_test_intr(bp) != 0) {
9694 buf[4] = 1;
9695 etest->flags |= ETH_TEST_FL_FAILED;
9696 }
9697 if (bp->port.pmf)
9698 if (bnx2x_link_test(bp) != 0) {
9699 buf[5] = 1;
9700 etest->flags |= ETH_TEST_FL_FAILED;
9701 }
f3c87cdd
YG
9702
9703#ifdef BNX2X_EXTRA_DEBUG
9704 bnx2x_panic_dump(bp);
9705#endif
a2fbb9ea
ET
9706}
9707
de832a55
EG
9708static const struct {
9709 long offset;
9710 int size;
9711 u8 string[ETH_GSTRING_LEN];
9712} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9713/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9714 { Q_STATS_OFFSET32(error_bytes_received_hi),
9715 8, "[%d]: rx_error_bytes" },
9716 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9717 8, "[%d]: rx_ucast_packets" },
9718 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9719 8, "[%d]: rx_mcast_packets" },
9720 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9721 8, "[%d]: rx_bcast_packets" },
9722 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9723 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9724 4, "[%d]: rx_phy_ip_err_discards"},
9725 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9726 4, "[%d]: rx_skb_alloc_discard" },
9727 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9728
9729/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9730 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9731 8, "[%d]: tx_packets" }
9732};
9733
bb2a0f7a
YG
9734static const struct {
9735 long offset;
9736 int size;
9737 u32 flags;
66e855f3
YG
9738#define STATS_FLAGS_PORT 1
9739#define STATS_FLAGS_FUNC 2
de832a55 9740#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9741 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9742} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9743/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9744 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9745 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9746 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9747 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9748 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9749 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9750 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9751 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9752 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9753 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9754 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9755 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9756 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9757 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9758 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9759 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9760 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9761/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9762 8, STATS_FLAGS_PORT, "rx_fragments" },
9763 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9764 8, STATS_FLAGS_PORT, "rx_jabbers" },
9765 { STATS_OFFSET32(no_buff_discard_hi),
9766 8, STATS_FLAGS_BOTH, "rx_discards" },
9767 { STATS_OFFSET32(mac_filter_discard),
9768 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9769 { STATS_OFFSET32(xxoverflow_discard),
9770 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9771 { STATS_OFFSET32(brb_drop_hi),
9772 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9773 { STATS_OFFSET32(brb_truncate_hi),
9774 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9775 { STATS_OFFSET32(pause_frames_received_hi),
9776 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9777 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9778 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9779 { STATS_OFFSET32(nig_timer_max),
9780 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9781/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9782 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9783 { STATS_OFFSET32(rx_skb_alloc_failed),
9784 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9785 { STATS_OFFSET32(hw_csum_err),
9786 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9787
9788 { STATS_OFFSET32(total_bytes_transmitted_hi),
9789 8, STATS_FLAGS_BOTH, "tx_bytes" },
9790 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9791 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9792 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9793 8, STATS_FLAGS_BOTH, "tx_packets" },
9794 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9795 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9796 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9797 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9798 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9799 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9800 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9801 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9802/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9803 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9804 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9805 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9806 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9807 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9808 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9809 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9810 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9811 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9812 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9813 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9814 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9815 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9816 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9817 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9818 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9819 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9820 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9821 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9822/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9823 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9824 { STATS_OFFSET32(pause_frames_sent_hi),
9825 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9826};
9827
de832a55
EG
9828#define IS_PORT_STAT(i) \
9829 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9830#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9831#define IS_E1HMF_MODE_STAT(bp) \
9832 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9833
a2fbb9ea
ET
9834static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9835{
bb2a0f7a 9836 struct bnx2x *bp = netdev_priv(dev);
de832a55 9837 int i, j, k;
bb2a0f7a 9838
a2fbb9ea
ET
9839 switch (stringset) {
9840 case ETH_SS_STATS:
de832a55
EG
9841 if (is_multi(bp)) {
9842 k = 0;
9843 for_each_queue(bp, i) {
9844 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9845 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9846 bnx2x_q_stats_arr[j].string, i);
9847 k += BNX2X_NUM_Q_STATS;
9848 }
9849 if (IS_E1HMF_MODE_STAT(bp))
9850 break;
9851 for (j = 0; j < BNX2X_NUM_STATS; j++)
9852 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9853 bnx2x_stats_arr[j].string);
9854 } else {
9855 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9856 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9857 continue;
9858 strcpy(buf + j*ETH_GSTRING_LEN,
9859 bnx2x_stats_arr[i].string);
9860 j++;
9861 }
bb2a0f7a 9862 }
a2fbb9ea
ET
9863 break;
9864
9865 case ETH_SS_TEST:
9866 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9867 break;
9868 }
9869}
9870
9871static int bnx2x_get_stats_count(struct net_device *dev)
9872{
bb2a0f7a 9873 struct bnx2x *bp = netdev_priv(dev);
de832a55 9874 int i, num_stats;
bb2a0f7a 9875
de832a55
EG
9876 if (is_multi(bp)) {
9877 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9878 if (!IS_E1HMF_MODE_STAT(bp))
9879 num_stats += BNX2X_NUM_STATS;
9880 } else {
9881 if (IS_E1HMF_MODE_STAT(bp)) {
9882 num_stats = 0;
9883 for (i = 0; i < BNX2X_NUM_STATS; i++)
9884 if (IS_FUNC_STAT(i))
9885 num_stats++;
9886 } else
9887 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9888 }
de832a55 9889
bb2a0f7a 9890 return num_stats;
a2fbb9ea
ET
9891}
9892
9893static void bnx2x_get_ethtool_stats(struct net_device *dev,
9894 struct ethtool_stats *stats, u64 *buf)
9895{
9896 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9897 u32 *hw_stats, *offset;
9898 int i, j, k;
bb2a0f7a 9899
de832a55
EG
9900 if (is_multi(bp)) {
9901 k = 0;
9902 for_each_queue(bp, i) {
9903 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9904 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9905 if (bnx2x_q_stats_arr[j].size == 0) {
9906 /* skip this counter */
9907 buf[k + j] = 0;
9908 continue;
9909 }
9910 offset = (hw_stats +
9911 bnx2x_q_stats_arr[j].offset);
9912 if (bnx2x_q_stats_arr[j].size == 4) {
9913 /* 4-byte counter */
9914 buf[k + j] = (u64) *offset;
9915 continue;
9916 }
9917 /* 8-byte counter */
9918 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9919 }
9920 k += BNX2X_NUM_Q_STATS;
9921 }
9922 if (IS_E1HMF_MODE_STAT(bp))
9923 return;
9924 hw_stats = (u32 *)&bp->eth_stats;
9925 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9926 if (bnx2x_stats_arr[j].size == 0) {
9927 /* skip this counter */
9928 buf[k + j] = 0;
9929 continue;
9930 }
9931 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9932 if (bnx2x_stats_arr[j].size == 4) {
9933 /* 4-byte counter */
9934 buf[k + j] = (u64) *offset;
9935 continue;
9936 }
9937 /* 8-byte counter */
9938 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9939 }
de832a55
EG
9940 } else {
9941 hw_stats = (u32 *)&bp->eth_stats;
9942 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9943 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9944 continue;
9945 if (bnx2x_stats_arr[i].size == 0) {
9946 /* skip this counter */
9947 buf[j] = 0;
9948 j++;
9949 continue;
9950 }
9951 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9952 if (bnx2x_stats_arr[i].size == 4) {
9953 /* 4-byte counter */
9954 buf[j] = (u64) *offset;
9955 j++;
9956 continue;
9957 }
9958 /* 8-byte counter */
9959 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9960 j++;
a2fbb9ea 9961 }
a2fbb9ea
ET
9962 }
9963}
9964
9965static int bnx2x_phys_id(struct net_device *dev, u32 data)
9966{
9967 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9968 int port = BP_PORT(bp);
a2fbb9ea
ET
9969 int i;
9970
34f80b04
EG
9971 if (!netif_running(dev))
9972 return 0;
9973
9974 if (!bp->port.pmf)
9975 return 0;
9976
a2fbb9ea
ET
9977 if (data == 0)
9978 data = 2;
9979
9980 for (i = 0; i < (data * 2); i++) {
c18487ee 9981 if ((i % 2) == 0)
34f80b04 9982 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9983 bp->link_params.hw_led_mode,
9984 bp->link_params.chip_id);
9985 else
34f80b04 9986 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9987 bp->link_params.hw_led_mode,
9988 bp->link_params.chip_id);
9989
a2fbb9ea
ET
9990 msleep_interruptible(500);
9991 if (signal_pending(current))
9992 break;
9993 }
9994
c18487ee 9995 if (bp->link_vars.link_up)
34f80b04 9996 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9997 bp->link_vars.line_speed,
9998 bp->link_params.hw_led_mode,
9999 bp->link_params.chip_id);
a2fbb9ea
ET
10000
10001 return 0;
10002}
10003
10004static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10005 .get_settings = bnx2x_get_settings,
10006 .set_settings = bnx2x_set_settings,
10007 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10008 .get_regs_len = bnx2x_get_regs_len,
10009 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10010 .get_wol = bnx2x_get_wol,
10011 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10012 .get_msglevel = bnx2x_get_msglevel,
10013 .set_msglevel = bnx2x_set_msglevel,
10014 .nway_reset = bnx2x_nway_reset,
10015 .get_link = ethtool_op_get_link,
10016 .get_eeprom_len = bnx2x_get_eeprom_len,
10017 .get_eeprom = bnx2x_get_eeprom,
10018 .set_eeprom = bnx2x_set_eeprom,
10019 .get_coalesce = bnx2x_get_coalesce,
10020 .set_coalesce = bnx2x_set_coalesce,
10021 .get_ringparam = bnx2x_get_ringparam,
10022 .set_ringparam = bnx2x_set_ringparam,
10023 .get_pauseparam = bnx2x_get_pauseparam,
10024 .set_pauseparam = bnx2x_set_pauseparam,
10025 .get_rx_csum = bnx2x_get_rx_csum,
10026 .set_rx_csum = bnx2x_set_rx_csum,
10027 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10028 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10029 .set_flags = bnx2x_set_flags,
10030 .get_flags = ethtool_op_get_flags,
10031 .get_sg = ethtool_op_get_sg,
10032 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10033 .get_tso = ethtool_op_get_tso,
10034 .set_tso = bnx2x_set_tso,
10035 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10036 .self_test = bnx2x_self_test,
10037 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10038 .phys_id = bnx2x_phys_id,
10039 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10040 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10041};
10042
10043/* end of ethtool_ops */
10044
10045/****************************************************************************
10046* General service functions
10047****************************************************************************/
10048
10049static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10050{
10051 u16 pmcsr;
10052
10053 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10054
10055 switch (state) {
10056 case PCI_D0:
34f80b04 10057 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10058 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10059 PCI_PM_CTRL_PME_STATUS));
10060
10061 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10062 /* delay required during transition out of D3hot */
a2fbb9ea 10063 msleep(20);
34f80b04 10064 break;
a2fbb9ea 10065
34f80b04
EG
10066 case PCI_D3hot:
10067 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10068 pmcsr |= 3;
a2fbb9ea 10069
34f80b04
EG
10070 if (bp->wol)
10071 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10072
34f80b04
EG
10073 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10074 pmcsr);
a2fbb9ea 10075
34f80b04
EG
10076 /* No more memory access after this point until
10077 * device is brought back to D0.
10078 */
10079 break;
10080
10081 default:
10082 return -EINVAL;
10083 }
10084 return 0;
a2fbb9ea
ET
10085}
10086
237907c1
EG
10087static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10088{
10089 u16 rx_cons_sb;
10090
10091 /* Tell compiler that status block fields can change */
10092 barrier();
10093 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10094 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10095 rx_cons_sb++;
10096 return (fp->rx_comp_cons != rx_cons_sb);
10097}
10098
34f80b04
EG
10099/*
10100 * net_device service functions
10101 */
10102
a2fbb9ea
ET
10103static int bnx2x_poll(struct napi_struct *napi, int budget)
10104{
10105 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10106 napi);
10107 struct bnx2x *bp = fp->bp;
10108 int work_done = 0;
10109
10110#ifdef BNX2X_STOP_ON_ERROR
10111 if (unlikely(bp->panic))
34f80b04 10112 goto poll_panic;
a2fbb9ea
ET
10113#endif
10114
10115 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10116 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10117 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10118
10119 bnx2x_update_fpsb_idx(fp);
10120
237907c1 10121 if (bnx2x_has_tx_work(fp))
7961f791 10122 bnx2x_tx_int(fp);
a2fbb9ea 10123
8534f32c 10124 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10125 work_done = bnx2x_rx_int(fp, budget);
356e2385 10126
8534f32c
EG
10127 /* must not complete if we consumed full budget */
10128 if (work_done >= budget)
10129 goto poll_again;
10130 }
a2fbb9ea 10131
8534f32c
EG
10132 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10133 * ensure that status block indices have been actually read
10134 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10135 * so that we won't write the "newer" value of the status block to IGU
10136 * (if there was a DMA right after BNX2X_HAS_WORK and
10137 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10138 * may be postponed to right before bnx2x_ack_sb). In this case
10139 * there will never be another interrupt until there is another update
10140 * of the status block, while there is still unhandled work.
10141 */
10142 rmb();
a2fbb9ea 10143
8534f32c 10144 if (!BNX2X_HAS_WORK(fp)) {
a2fbb9ea 10145#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10146poll_panic:
a2fbb9ea 10147#endif
288379f0 10148 napi_complete(napi);
a2fbb9ea 10149
0626b899 10150 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10151 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10152 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10153 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10154 }
356e2385 10155
8534f32c 10156poll_again:
a2fbb9ea
ET
10157 return work_done;
10158}
10159
755735eb
EG
10160
10161/* we split the first BD into headers and data BDs
33471629 10162 * to ease the pain of our fellow microcode engineers
755735eb
EG
10163 * we use one mapping for both BDs
10164 * So far this has only been observed to happen
10165 * in Other Operating Systems(TM)
10166 */
10167static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10168 struct bnx2x_fastpath *fp,
10169 struct eth_tx_bd **tx_bd, u16 hlen,
10170 u16 bd_prod, int nbd)
10171{
10172 struct eth_tx_bd *h_tx_bd = *tx_bd;
10173 struct eth_tx_bd *d_tx_bd;
10174 dma_addr_t mapping;
10175 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10176
10177 /* first fix first BD */
10178 h_tx_bd->nbd = cpu_to_le16(nbd);
10179 h_tx_bd->nbytes = cpu_to_le16(hlen);
10180
10181 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10182 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10183 h_tx_bd->addr_lo, h_tx_bd->nbd);
10184
10185 /* now get a new data BD
10186 * (after the pbd) and fill it */
10187 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10188 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10189
10190 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10191 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10192
10193 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10194 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10195 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10196 d_tx_bd->vlan = 0;
10197 /* this marks the BD as one that has no individual mapping
10198 * the FW ignores this flag in a BD not marked start
10199 */
10200 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10201 DP(NETIF_MSG_TX_QUEUED,
10202 "TSO split data size is %d (%x:%x)\n",
10203 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10204
10205 /* update tx_bd for marking the last BD flag */
10206 *tx_bd = d_tx_bd;
10207
10208 return bd_prod;
10209}
10210
10211static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10212{
10213 if (fix > 0)
10214 csum = (u16) ~csum_fold(csum_sub(csum,
10215 csum_partial(t_header - fix, fix, 0)));
10216
10217 else if (fix < 0)
10218 csum = (u16) ~csum_fold(csum_add(csum,
10219 csum_partial(t_header, -fix, 0)));
10220
10221 return swab16(csum);
10222}
10223
10224static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10225{
10226 u32 rc;
10227
10228 if (skb->ip_summed != CHECKSUM_PARTIAL)
10229 rc = XMIT_PLAIN;
10230
10231 else {
4781bfad 10232 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10233 rc = XMIT_CSUM_V6;
10234 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10235 rc |= XMIT_CSUM_TCP;
10236
10237 } else {
10238 rc = XMIT_CSUM_V4;
10239 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10240 rc |= XMIT_CSUM_TCP;
10241 }
10242 }
10243
10244 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10245 rc |= XMIT_GSO_V4;
10246
10247 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10248 rc |= XMIT_GSO_V6;
10249
10250 return rc;
10251}
10252
632da4d6 10253#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10254/* check if packet requires linearization (packet is too fragmented)
10255 no need to check fragmentation if page size > 8K (there will be no
10256 violation to FW restrictions) */
755735eb
EG
10257static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10258 u32 xmit_type)
10259{
10260 int to_copy = 0;
10261 int hlen = 0;
10262 int first_bd_sz = 0;
10263
10264 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10265 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10266
10267 if (xmit_type & XMIT_GSO) {
10268 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10269 /* Check if LSO packet needs to be copied:
10270 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10271 int wnd_size = MAX_FETCH_BD - 3;
33471629 10272 /* Number of windows to check */
755735eb
EG
10273 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10274 int wnd_idx = 0;
10275 int frag_idx = 0;
10276 u32 wnd_sum = 0;
10277
10278 /* Headers length */
10279 hlen = (int)(skb_transport_header(skb) - skb->data) +
10280 tcp_hdrlen(skb);
10281
10282 /* Amount of data (w/o headers) on linear part of SKB*/
10283 first_bd_sz = skb_headlen(skb) - hlen;
10284
10285 wnd_sum = first_bd_sz;
10286
10287 /* Calculate the first sum - it's special */
10288 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10289 wnd_sum +=
10290 skb_shinfo(skb)->frags[frag_idx].size;
10291
10292 /* If there was data on linear skb data - check it */
10293 if (first_bd_sz > 0) {
10294 if (unlikely(wnd_sum < lso_mss)) {
10295 to_copy = 1;
10296 goto exit_lbl;
10297 }
10298
10299 wnd_sum -= first_bd_sz;
10300 }
10301
10302 /* Others are easier: run through the frag list and
10303 check all windows */
10304 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10305 wnd_sum +=
10306 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10307
10308 if (unlikely(wnd_sum < lso_mss)) {
10309 to_copy = 1;
10310 break;
10311 }
10312 wnd_sum -=
10313 skb_shinfo(skb)->frags[wnd_idx].size;
10314 }
755735eb
EG
10315 } else {
10316 /* in non-LSO too fragmented packet should always
10317 be linearized */
10318 to_copy = 1;
10319 }
10320 }
10321
10322exit_lbl:
10323 if (unlikely(to_copy))
10324 DP(NETIF_MSG_TX_QUEUED,
10325 "Linearization IS REQUIRED for %s packet. "
10326 "num_frags %d hlen %d first_bd_sz %d\n",
10327 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10328 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10329
10330 return to_copy;
10331}
632da4d6 10332#endif
755735eb
EG
10333
10334/* called with netif_tx_lock
a2fbb9ea 10335 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10336 * netif_wake_queue()
a2fbb9ea
ET
10337 */
10338static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10339{
10340 struct bnx2x *bp = netdev_priv(dev);
10341 struct bnx2x_fastpath *fp;
555f6c78 10342 struct netdev_queue *txq;
a2fbb9ea
ET
10343 struct sw_tx_bd *tx_buf;
10344 struct eth_tx_bd *tx_bd;
10345 struct eth_tx_parse_bd *pbd = NULL;
10346 u16 pkt_prod, bd_prod;
755735eb 10347 int nbd, fp_index;
a2fbb9ea 10348 dma_addr_t mapping;
755735eb
EG
10349 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10350 int vlan_off = (bp->e1hov ? 4 : 0);
10351 int i;
10352 u8 hlen = 0;
a2fbb9ea
ET
10353
10354#ifdef BNX2X_STOP_ON_ERROR
10355 if (unlikely(bp->panic))
10356 return NETDEV_TX_BUSY;
10357#endif
10358
555f6c78
EG
10359 fp_index = skb_get_queue_mapping(skb);
10360 txq = netdev_get_tx_queue(dev, fp_index);
10361
a2fbb9ea 10362 fp = &bp->fp[fp_index];
755735eb 10363
231fd58a 10364 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10365 fp->eth_q_stats.driver_xoff++,
555f6c78 10366 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10367 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10368 return NETDEV_TX_BUSY;
10369 }
10370
755735eb
EG
10371 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10372 " gso type %x xmit_type %x\n",
10373 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10374 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10375
632da4d6 10376#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10377 /* First, check if we need to linearize the skb (due to FW
10378 restrictions). No need to check fragmentation if page size > 8K
10379 (there will be no violation to FW restrictions) */
755735eb
EG
10380 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10381 /* Statistics of linearization */
10382 bp->lin_cnt++;
10383 if (skb_linearize(skb) != 0) {
10384 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10385 "silently dropping this SKB\n");
10386 dev_kfree_skb_any(skb);
da5a662a 10387 return NETDEV_TX_OK;
755735eb
EG
10388 }
10389 }
632da4d6 10390#endif
755735eb 10391
a2fbb9ea 10392 /*
755735eb 10393 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10394 then for TSO or xsum we have a parsing info BD,
755735eb 10395 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10396 (don't forget to mark the last one as last,
10397 and to unmap only AFTER you write to the BD ...)
755735eb 10398 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10399 */
10400
10401 pkt_prod = fp->tx_pkt_prod++;
755735eb 10402 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10403
755735eb 10404 /* get a tx_buf and first BD */
a2fbb9ea
ET
10405 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10406 tx_bd = &fp->tx_desc_ring[bd_prod];
10407
10408 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10409 tx_bd->general_data = (UNICAST_ADDRESS <<
10410 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10411 /* header nbd */
10412 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10413
755735eb
EG
10414 /* remember the first BD of the packet */
10415 tx_buf->first_bd = fp->tx_bd_prod;
10416 tx_buf->skb = skb;
a2fbb9ea
ET
10417
10418 DP(NETIF_MSG_TX_QUEUED,
10419 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10420 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10421
0c6671b0
EG
10422#ifdef BCM_VLAN
10423 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10424 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10425 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10426 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10427 vlan_off += 4;
10428 } else
0c6671b0 10429#endif
755735eb 10430 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10431
755735eb 10432 if (xmit_type) {
755735eb 10433 /* turn on parsing and get a BD */
a2fbb9ea
ET
10434 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10435 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10436
10437 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10438 }
10439
10440 if (xmit_type & XMIT_CSUM) {
10441 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10442
10443 /* for now NS flag is not used in Linux */
4781bfad
EG
10444 pbd->global_data =
10445 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10446 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10447
755735eb
EG
10448 pbd->ip_hlen = (skb_transport_header(skb) -
10449 skb_network_header(skb)) / 2;
10450
10451 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10452
755735eb
EG
10453 pbd->total_hlen = cpu_to_le16(hlen);
10454 hlen = hlen*2 - vlan_off;
a2fbb9ea 10455
755735eb
EG
10456 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10457
10458 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10459 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10460 ETH_TX_BD_FLAGS_IP_CSUM;
10461 else
10462 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10463
10464 if (xmit_type & XMIT_CSUM_TCP) {
10465 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10466
10467 } else {
10468 s8 fix = SKB_CS_OFF(skb); /* signed! */
10469
a2fbb9ea 10470 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10471 pbd->cs_offset = fix / 2;
a2fbb9ea 10472
755735eb
EG
10473 DP(NETIF_MSG_TX_QUEUED,
10474 "hlen %d offset %d fix %d csum before fix %x\n",
10475 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10476 SKB_CS(skb));
10477
10478 /* HW bug: fixup the CSUM */
10479 pbd->tcp_pseudo_csum =
10480 bnx2x_csum_fix(skb_transport_header(skb),
10481 SKB_CS(skb), fix);
10482
10483 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10484 pbd->tcp_pseudo_csum);
10485 }
a2fbb9ea
ET
10486 }
10487
10488 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10489 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10490
10491 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10492 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10493 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10494 tx_bd->nbd = cpu_to_le16(nbd);
10495 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10496
10497 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10498 " nbytes %d flags %x vlan %x\n",
10499 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10500 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10501 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10502
755735eb 10503 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10504
10505 DP(NETIF_MSG_TX_QUEUED,
10506 "TSO packet len %d hlen %d total len %d tso size %d\n",
10507 skb->len, hlen, skb_headlen(skb),
10508 skb_shinfo(skb)->gso_size);
10509
10510 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10511
755735eb
EG
10512 if (unlikely(skb_headlen(skb) > hlen))
10513 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10514 bd_prod, ++nbd);
a2fbb9ea
ET
10515
10516 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10517 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10518 pbd->tcp_flags = pbd_tcp_flags(skb);
10519
10520 if (xmit_type & XMIT_GSO_V4) {
10521 pbd->ip_id = swab16(ip_hdr(skb)->id);
10522 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10523 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10524 ip_hdr(skb)->daddr,
10525 0, IPPROTO_TCP, 0));
755735eb
EG
10526
10527 } else
10528 pbd->tcp_pseudo_csum =
10529 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10530 &ipv6_hdr(skb)->daddr,
10531 0, IPPROTO_TCP, 0));
10532
a2fbb9ea
ET
10533 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10534 }
10535
755735eb
EG
10536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10537 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10538
755735eb
EG
10539 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10540 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10541
755735eb
EG
10542 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10543 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10544
755735eb
EG
10545 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10546 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10547 tx_bd->nbytes = cpu_to_le16(frag->size);
10548 tx_bd->vlan = cpu_to_le16(pkt_prod);
10549 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10550
755735eb
EG
10551 DP(NETIF_MSG_TX_QUEUED,
10552 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10553 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10554 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10555 }
10556
755735eb 10557 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10558 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10559
10560 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10561 tx_bd, tx_bd->bd_flags.as_bitfield);
10562
a2fbb9ea
ET
10563 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10564
755735eb 10565 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10566 * if the packet contains or ends with it
10567 */
10568 if (TX_BD_POFF(bd_prod) < nbd)
10569 nbd++;
10570
10571 if (pbd)
10572 DP(NETIF_MSG_TX_QUEUED,
10573 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10574 " tcp_flags %x xsum %x seq %u hlen %u\n",
10575 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10576 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10577 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10578
755735eb 10579 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10580
58f4c4cf
EG
10581 /*
10582 * Make sure that the BD data is updated before updating the producer
10583 * since FW might read the BD right after the producer is updated.
10584 * This is only applicable for weak-ordered memory model archs such
10585 * as IA-64. The following barrier is also mandatory since FW will
10586 * assumes packets must have BDs.
10587 */
10588 wmb();
10589
4781bfad 10590 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10591 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10592 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10593 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10594
10595 mmiowb();
10596
755735eb 10597 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10598 dev->trans_start = jiffies;
10599
10600 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10601 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10602 if we put Tx into XOFF state. */
10603 smp_mb();
555f6c78 10604 netif_tx_stop_queue(txq);
de832a55 10605 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10606 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10607 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10608 }
10609 fp->tx_pkt++;
10610
10611 return NETDEV_TX_OK;
10612}
10613
bb2a0f7a 10614/* called with rtnl_lock */
a2fbb9ea
ET
10615static int bnx2x_open(struct net_device *dev)
10616{
10617 struct bnx2x *bp = netdev_priv(dev);
10618
6eccabb3
EG
10619 netif_carrier_off(dev);
10620
a2fbb9ea
ET
10621 bnx2x_set_power_state(bp, PCI_D0);
10622
bb2a0f7a 10623 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10624}
10625
bb2a0f7a 10626/* called with rtnl_lock */
a2fbb9ea
ET
10627static int bnx2x_close(struct net_device *dev)
10628{
a2fbb9ea
ET
10629 struct bnx2x *bp = netdev_priv(dev);
10630
10631 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10632 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10633 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10634 if (!CHIP_REV_IS_SLOW(bp))
10635 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10636
10637 return 0;
10638}
10639
f5372251 10640/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10641static void bnx2x_set_rx_mode(struct net_device *dev)
10642{
10643 struct bnx2x *bp = netdev_priv(dev);
10644 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10645 int port = BP_PORT(bp);
10646
10647 if (bp->state != BNX2X_STATE_OPEN) {
10648 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10649 return;
10650 }
10651
10652 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10653
10654 if (dev->flags & IFF_PROMISC)
10655 rx_mode = BNX2X_RX_MODE_PROMISC;
10656
10657 else if ((dev->flags & IFF_ALLMULTI) ||
10658 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10659 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10660
10661 else { /* some multicasts */
10662 if (CHIP_IS_E1(bp)) {
10663 int i, old, offset;
10664 struct dev_mc_list *mclist;
10665 struct mac_configuration_cmd *config =
10666 bnx2x_sp(bp, mcast_config);
10667
10668 for (i = 0, mclist = dev->mc_list;
10669 mclist && (i < dev->mc_count);
10670 i++, mclist = mclist->next) {
10671
10672 config->config_table[i].
10673 cam_entry.msb_mac_addr =
10674 swab16(*(u16 *)&mclist->dmi_addr[0]);
10675 config->config_table[i].
10676 cam_entry.middle_mac_addr =
10677 swab16(*(u16 *)&mclist->dmi_addr[2]);
10678 config->config_table[i].
10679 cam_entry.lsb_mac_addr =
10680 swab16(*(u16 *)&mclist->dmi_addr[4]);
10681 config->config_table[i].cam_entry.flags =
10682 cpu_to_le16(port);
10683 config->config_table[i].
10684 target_table_entry.flags = 0;
10685 config->config_table[i].
10686 target_table_entry.client_id = 0;
10687 config->config_table[i].
10688 target_table_entry.vlan_id = 0;
10689
10690 DP(NETIF_MSG_IFUP,
10691 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10692 config->config_table[i].
10693 cam_entry.msb_mac_addr,
10694 config->config_table[i].
10695 cam_entry.middle_mac_addr,
10696 config->config_table[i].
10697 cam_entry.lsb_mac_addr);
10698 }
8d9c5f34 10699 old = config->hdr.length;
34f80b04
EG
10700 if (old > i) {
10701 for (; i < old; i++) {
10702 if (CAM_IS_INVALID(config->
10703 config_table[i])) {
af246401 10704 /* already invalidated */
34f80b04
EG
10705 break;
10706 }
10707 /* invalidate */
10708 CAM_INVALIDATE(config->
10709 config_table[i]);
10710 }
10711 }
10712
10713 if (CHIP_REV_IS_SLOW(bp))
10714 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10715 else
10716 offset = BNX2X_MAX_MULTICAST*(1 + port);
10717
8d9c5f34 10718 config->hdr.length = i;
34f80b04 10719 config->hdr.offset = offset;
8d9c5f34 10720 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10721 config->hdr.reserved1 = 0;
10722
10723 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10724 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10725 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10726 0);
10727 } else { /* E1H */
10728 /* Accept one or more multicasts */
10729 struct dev_mc_list *mclist;
10730 u32 mc_filter[MC_HASH_SIZE];
10731 u32 crc, bit, regidx;
10732 int i;
10733
10734 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10735
10736 for (i = 0, mclist = dev->mc_list;
10737 mclist && (i < dev->mc_count);
10738 i++, mclist = mclist->next) {
10739
7c510e4b
JB
10740 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10741 mclist->dmi_addr);
34f80b04
EG
10742
10743 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10744 bit = (crc >> 24) & 0xff;
10745 regidx = bit >> 5;
10746 bit &= 0x1f;
10747 mc_filter[regidx] |= (1 << bit);
10748 }
10749
10750 for (i = 0; i < MC_HASH_SIZE; i++)
10751 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10752 mc_filter[i]);
10753 }
10754 }
10755
10756 bp->rx_mode = rx_mode;
10757 bnx2x_set_storm_rx_mode(bp);
10758}
10759
10760/* called with rtnl_lock */
a2fbb9ea
ET
10761static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10762{
10763 struct sockaddr *addr = p;
10764 struct bnx2x *bp = netdev_priv(dev);
10765
34f80b04 10766 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10767 return -EINVAL;
10768
10769 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10770 if (netif_running(dev)) {
10771 if (CHIP_IS_E1(bp))
3101c2bc 10772 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10773 else
3101c2bc 10774 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10775 }
a2fbb9ea
ET
10776
10777 return 0;
10778}
10779
c18487ee 10780/* called with rtnl_lock */
a2fbb9ea
ET
10781static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10782{
10783 struct mii_ioctl_data *data = if_mii(ifr);
10784 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10785 int port = BP_PORT(bp);
a2fbb9ea
ET
10786 int err;
10787
10788 switch (cmd) {
10789 case SIOCGMIIPHY:
34f80b04 10790 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10791
c14423fe 10792 /* fallthrough */
c18487ee 10793
a2fbb9ea 10794 case SIOCGMIIREG: {
c18487ee 10795 u16 mii_regval;
a2fbb9ea 10796
c18487ee
YR
10797 if (!netif_running(dev))
10798 return -EAGAIN;
a2fbb9ea 10799
34f80b04 10800 mutex_lock(&bp->port.phy_mutex);
3196a88a 10801 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10802 DEFAULT_PHY_DEV_ADDR,
10803 (data->reg_num & 0x1f), &mii_regval);
10804 data->val_out = mii_regval;
34f80b04 10805 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10806 return err;
10807 }
10808
10809 case SIOCSMIIREG:
10810 if (!capable(CAP_NET_ADMIN))
10811 return -EPERM;
10812
c18487ee
YR
10813 if (!netif_running(dev))
10814 return -EAGAIN;
10815
34f80b04 10816 mutex_lock(&bp->port.phy_mutex);
3196a88a 10817 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10818 DEFAULT_PHY_DEV_ADDR,
10819 (data->reg_num & 0x1f), data->val_in);
34f80b04 10820 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10821 return err;
10822
10823 default:
10824 /* do nothing */
10825 break;
10826 }
10827
10828 return -EOPNOTSUPP;
10829}
10830
34f80b04 10831/* called with rtnl_lock */
a2fbb9ea
ET
10832static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10833{
10834 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10835 int rc = 0;
a2fbb9ea
ET
10836
10837 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10838 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10839 return -EINVAL;
10840
10841 /* This does not race with packet allocation
c14423fe 10842 * because the actual alloc size is
a2fbb9ea
ET
10843 * only updated as part of load
10844 */
10845 dev->mtu = new_mtu;
10846
10847 if (netif_running(dev)) {
34f80b04
EG
10848 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10849 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10850 }
34f80b04
EG
10851
10852 return rc;
a2fbb9ea
ET
10853}
10854
10855static void bnx2x_tx_timeout(struct net_device *dev)
10856{
10857 struct bnx2x *bp = netdev_priv(dev);
10858
10859#ifdef BNX2X_STOP_ON_ERROR
10860 if (!bp->panic)
10861 bnx2x_panic();
10862#endif
10863 /* This allows the netif to be shutdown gracefully before resetting */
10864 schedule_work(&bp->reset_task);
10865}
10866
10867#ifdef BCM_VLAN
34f80b04 10868/* called with rtnl_lock */
a2fbb9ea
ET
10869static void bnx2x_vlan_rx_register(struct net_device *dev,
10870 struct vlan_group *vlgrp)
10871{
10872 struct bnx2x *bp = netdev_priv(dev);
10873
10874 bp->vlgrp = vlgrp;
0c6671b0
EG
10875
10876 /* Set flags according to the required capabilities */
10877 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10878
10879 if (dev->features & NETIF_F_HW_VLAN_TX)
10880 bp->flags |= HW_VLAN_TX_FLAG;
10881
10882 if (dev->features & NETIF_F_HW_VLAN_RX)
10883 bp->flags |= HW_VLAN_RX_FLAG;
10884
a2fbb9ea 10885 if (netif_running(dev))
49d66772 10886 bnx2x_set_client_config(bp);
a2fbb9ea 10887}
34f80b04 10888
a2fbb9ea
ET
10889#endif
10890
10891#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10892static void poll_bnx2x(struct net_device *dev)
10893{
10894 struct bnx2x *bp = netdev_priv(dev);
10895
10896 disable_irq(bp->pdev->irq);
10897 bnx2x_interrupt(bp->pdev->irq, dev);
10898 enable_irq(bp->pdev->irq);
10899}
10900#endif
10901
c64213cd
SH
10902static const struct net_device_ops bnx2x_netdev_ops = {
10903 .ndo_open = bnx2x_open,
10904 .ndo_stop = bnx2x_close,
10905 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 10906 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
10907 .ndo_set_mac_address = bnx2x_change_mac_addr,
10908 .ndo_validate_addr = eth_validate_addr,
10909 .ndo_do_ioctl = bnx2x_ioctl,
10910 .ndo_change_mtu = bnx2x_change_mtu,
10911 .ndo_tx_timeout = bnx2x_tx_timeout,
10912#ifdef BCM_VLAN
10913 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10914#endif
10915#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10916 .ndo_poll_controller = poll_bnx2x,
10917#endif
10918};
10919
34f80b04
EG
10920static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10921 struct net_device *dev)
a2fbb9ea
ET
10922{
10923 struct bnx2x *bp;
10924 int rc;
10925
10926 SET_NETDEV_DEV(dev, &pdev->dev);
10927 bp = netdev_priv(dev);
10928
34f80b04
EG
10929 bp->dev = dev;
10930 bp->pdev = pdev;
a2fbb9ea 10931 bp->flags = 0;
34f80b04 10932 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10933
10934 rc = pci_enable_device(pdev);
10935 if (rc) {
10936 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10937 goto err_out;
10938 }
10939
10940 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10941 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10942 " aborting\n");
10943 rc = -ENODEV;
10944 goto err_out_disable;
10945 }
10946
10947 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10948 printk(KERN_ERR PFX "Cannot find second PCI device"
10949 " base address, aborting\n");
10950 rc = -ENODEV;
10951 goto err_out_disable;
10952 }
10953
34f80b04
EG
10954 if (atomic_read(&pdev->enable_cnt) == 1) {
10955 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10956 if (rc) {
10957 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10958 " aborting\n");
10959 goto err_out_disable;
10960 }
a2fbb9ea 10961
34f80b04
EG
10962 pci_set_master(pdev);
10963 pci_save_state(pdev);
10964 }
a2fbb9ea
ET
10965
10966 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10967 if (bp->pm_cap == 0) {
10968 printk(KERN_ERR PFX "Cannot find power management"
10969 " capability, aborting\n");
10970 rc = -EIO;
10971 goto err_out_release;
10972 }
10973
10974 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10975 if (bp->pcie_cap == 0) {
10976 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10977 " aborting\n");
10978 rc = -EIO;
10979 goto err_out_release;
10980 }
10981
6a35528a 10982 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 10983 bp->flags |= USING_DAC_FLAG;
6a35528a 10984 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
10985 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10986 " failed, aborting\n");
10987 rc = -EIO;
10988 goto err_out_release;
10989 }
10990
10991 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10992 printk(KERN_ERR PFX "System does not support DMA,"
10993 " aborting\n");
10994 rc = -EIO;
10995 goto err_out_release;
10996 }
10997
34f80b04
EG
10998 dev->mem_start = pci_resource_start(pdev, 0);
10999 dev->base_addr = dev->mem_start;
11000 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11001
11002 dev->irq = pdev->irq;
11003
275f165f 11004 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11005 if (!bp->regview) {
11006 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11007 rc = -ENOMEM;
11008 goto err_out_release;
11009 }
11010
34f80b04
EG
11011 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11012 min_t(u64, BNX2X_DB_SIZE,
11013 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11014 if (!bp->doorbells) {
11015 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11016 rc = -ENOMEM;
11017 goto err_out_unmap;
11018 }
11019
11020 bnx2x_set_power_state(bp, PCI_D0);
11021
34f80b04
EG
11022 /* clean indirect addresses */
11023 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11024 PCICFG_VENDOR_ID_OFFSET);
11025 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11026 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11027 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11028 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11029
34f80b04 11030 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11031
c64213cd 11032 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11033 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11034 dev->features |= NETIF_F_SG;
11035 dev->features |= NETIF_F_HW_CSUM;
11036 if (bp->flags & USING_DAC_FLAG)
11037 dev->features |= NETIF_F_HIGHDMA;
11038#ifdef BCM_VLAN
11039 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11040 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
11041#endif
11042 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 11043 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
11044
11045 return 0;
11046
11047err_out_unmap:
11048 if (bp->regview) {
11049 iounmap(bp->regview);
11050 bp->regview = NULL;
11051 }
a2fbb9ea
ET
11052 if (bp->doorbells) {
11053 iounmap(bp->doorbells);
11054 bp->doorbells = NULL;
11055 }
11056
11057err_out_release:
34f80b04
EG
11058 if (atomic_read(&pdev->enable_cnt) == 1)
11059 pci_release_regions(pdev);
a2fbb9ea
ET
11060
11061err_out_disable:
11062 pci_disable_device(pdev);
11063 pci_set_drvdata(pdev, NULL);
11064
11065err_out:
11066 return rc;
11067}
11068
25047950
ET
11069static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11070{
11071 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11072
11073 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11074 return val;
11075}
11076
11077/* return value of 1=2.5GHz 2=5GHz */
11078static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11079{
11080 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11081
11082 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11083 return val;
11084}
11085
a2fbb9ea
ET
11086static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11087 const struct pci_device_id *ent)
11088{
11089 static int version_printed;
11090 struct net_device *dev = NULL;
11091 struct bnx2x *bp;
25047950 11092 int rc;
a2fbb9ea
ET
11093
11094 if (version_printed++ == 0)
11095 printk(KERN_INFO "%s", version);
11096
11097 /* dev zeroed in init_etherdev */
555f6c78 11098 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11099 if (!dev) {
11100 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11101 return -ENOMEM;
34f80b04 11102 }
a2fbb9ea 11103
a2fbb9ea
ET
11104 bp = netdev_priv(dev);
11105 bp->msglevel = debug;
11106
34f80b04 11107 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11108 if (rc < 0) {
11109 free_netdev(dev);
11110 return rc;
11111 }
11112
a2fbb9ea
ET
11113 pci_set_drvdata(pdev, dev);
11114
34f80b04 11115 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11116 if (rc)
11117 goto init_one_exit;
11118
11119 rc = register_netdev(dev);
34f80b04 11120 if (rc) {
693fc0d1 11121 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11122 goto init_one_exit;
11123 }
11124
25047950 11125 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11126 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11127 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11128 bnx2x_get_pcie_width(bp),
11129 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11130 dev->base_addr, bp->pdev->irq);
e174961c 11131 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11132
a2fbb9ea 11133 return 0;
34f80b04
EG
11134
11135init_one_exit:
11136 if (bp->regview)
11137 iounmap(bp->regview);
11138
11139 if (bp->doorbells)
11140 iounmap(bp->doorbells);
11141
11142 free_netdev(dev);
11143
11144 if (atomic_read(&pdev->enable_cnt) == 1)
11145 pci_release_regions(pdev);
11146
11147 pci_disable_device(pdev);
11148 pci_set_drvdata(pdev, NULL);
11149
11150 return rc;
a2fbb9ea
ET
11151}
11152
11153static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11154{
11155 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11156 struct bnx2x *bp;
11157
11158 if (!dev) {
228241eb
ET
11159 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11160 return;
11161 }
228241eb 11162 bp = netdev_priv(dev);
a2fbb9ea 11163
a2fbb9ea
ET
11164 unregister_netdev(dev);
11165
11166 if (bp->regview)
11167 iounmap(bp->regview);
11168
11169 if (bp->doorbells)
11170 iounmap(bp->doorbells);
11171
11172 free_netdev(dev);
34f80b04
EG
11173
11174 if (atomic_read(&pdev->enable_cnt) == 1)
11175 pci_release_regions(pdev);
11176
a2fbb9ea
ET
11177 pci_disable_device(pdev);
11178 pci_set_drvdata(pdev, NULL);
11179}
11180
11181static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11182{
11183 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11184 struct bnx2x *bp;
11185
34f80b04
EG
11186 if (!dev) {
11187 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11188 return -ENODEV;
11189 }
11190 bp = netdev_priv(dev);
a2fbb9ea 11191
34f80b04 11192 rtnl_lock();
a2fbb9ea 11193
34f80b04 11194 pci_save_state(pdev);
228241eb 11195
34f80b04
EG
11196 if (!netif_running(dev)) {
11197 rtnl_unlock();
11198 return 0;
11199 }
a2fbb9ea
ET
11200
11201 netif_device_detach(dev);
a2fbb9ea 11202
da5a662a 11203 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11204
a2fbb9ea 11205 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11206
34f80b04
EG
11207 rtnl_unlock();
11208
a2fbb9ea
ET
11209 return 0;
11210}
11211
11212static int bnx2x_resume(struct pci_dev *pdev)
11213{
11214 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11215 struct bnx2x *bp;
a2fbb9ea
ET
11216 int rc;
11217
228241eb
ET
11218 if (!dev) {
11219 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11220 return -ENODEV;
11221 }
228241eb 11222 bp = netdev_priv(dev);
a2fbb9ea 11223
34f80b04
EG
11224 rtnl_lock();
11225
228241eb 11226 pci_restore_state(pdev);
34f80b04
EG
11227
11228 if (!netif_running(dev)) {
11229 rtnl_unlock();
11230 return 0;
11231 }
11232
a2fbb9ea
ET
11233 bnx2x_set_power_state(bp, PCI_D0);
11234 netif_device_attach(dev);
11235
da5a662a 11236 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11237
34f80b04
EG
11238 rtnl_unlock();
11239
11240 return rc;
a2fbb9ea
ET
11241}
11242
f8ef6e44
YG
11243static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11244{
11245 int i;
11246
11247 bp->state = BNX2X_STATE_ERROR;
11248
11249 bp->rx_mode = BNX2X_RX_MODE_NONE;
11250
11251 bnx2x_netif_stop(bp, 0);
11252
11253 del_timer_sync(&bp->timer);
11254 bp->stats_state = STATS_STATE_DISABLED;
11255 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11256
11257 /* Release IRQs */
11258 bnx2x_free_irq(bp);
11259
11260 if (CHIP_IS_E1(bp)) {
11261 struct mac_configuration_cmd *config =
11262 bnx2x_sp(bp, mcast_config);
11263
8d9c5f34 11264 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11265 CAM_INVALIDATE(config->config_table[i]);
11266 }
11267
11268 /* Free SKBs, SGEs, TPA pool and driver internals */
11269 bnx2x_free_skbs(bp);
555f6c78 11270 for_each_rx_queue(bp, i)
f8ef6e44 11271 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11272 for_each_rx_queue(bp, i)
7cde1c8b 11273 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11274 bnx2x_free_mem(bp);
11275
11276 bp->state = BNX2X_STATE_CLOSED;
11277
11278 netif_carrier_off(bp->dev);
11279
11280 return 0;
11281}
11282
11283static void bnx2x_eeh_recover(struct bnx2x *bp)
11284{
11285 u32 val;
11286
11287 mutex_init(&bp->port.phy_mutex);
11288
11289 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11290 bp->link_params.shmem_base = bp->common.shmem_base;
11291 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11292
11293 if (!bp->common.shmem_base ||
11294 (bp->common.shmem_base < 0xA0000) ||
11295 (bp->common.shmem_base >= 0xC0000)) {
11296 BNX2X_DEV_INFO("MCP not active\n");
11297 bp->flags |= NO_MCP_FLAG;
11298 return;
11299 }
11300
11301 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11302 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11303 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11304 BNX2X_ERR("BAD MCP validity signature\n");
11305
11306 if (!BP_NOMCP(bp)) {
11307 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11308 & DRV_MSG_SEQ_NUMBER_MASK);
11309 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11310 }
11311}
11312
493adb1f
WX
11313/**
11314 * bnx2x_io_error_detected - called when PCI error is detected
11315 * @pdev: Pointer to PCI device
11316 * @state: The current pci connection state
11317 *
11318 * This function is called after a PCI bus error affecting
11319 * this device has been detected.
11320 */
11321static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11322 pci_channel_state_t state)
11323{
11324 struct net_device *dev = pci_get_drvdata(pdev);
11325 struct bnx2x *bp = netdev_priv(dev);
11326
11327 rtnl_lock();
11328
11329 netif_device_detach(dev);
11330
11331 if (netif_running(dev))
f8ef6e44 11332 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11333
11334 pci_disable_device(pdev);
11335
11336 rtnl_unlock();
11337
11338 /* Request a slot reset */
11339 return PCI_ERS_RESULT_NEED_RESET;
11340}
11341
11342/**
11343 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11344 * @pdev: Pointer to PCI device
11345 *
11346 * Restart the card from scratch, as if from a cold-boot.
11347 */
11348static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11349{
11350 struct net_device *dev = pci_get_drvdata(pdev);
11351 struct bnx2x *bp = netdev_priv(dev);
11352
11353 rtnl_lock();
11354
11355 if (pci_enable_device(pdev)) {
11356 dev_err(&pdev->dev,
11357 "Cannot re-enable PCI device after reset\n");
11358 rtnl_unlock();
11359 return PCI_ERS_RESULT_DISCONNECT;
11360 }
11361
11362 pci_set_master(pdev);
11363 pci_restore_state(pdev);
11364
11365 if (netif_running(dev))
11366 bnx2x_set_power_state(bp, PCI_D0);
11367
11368 rtnl_unlock();
11369
11370 return PCI_ERS_RESULT_RECOVERED;
11371}
11372
11373/**
11374 * bnx2x_io_resume - called when traffic can start flowing again
11375 * @pdev: Pointer to PCI device
11376 *
11377 * This callback is called when the error recovery driver tells us that
11378 * its OK to resume normal operation.
11379 */
11380static void bnx2x_io_resume(struct pci_dev *pdev)
11381{
11382 struct net_device *dev = pci_get_drvdata(pdev);
11383 struct bnx2x *bp = netdev_priv(dev);
11384
11385 rtnl_lock();
11386
f8ef6e44
YG
11387 bnx2x_eeh_recover(bp);
11388
493adb1f 11389 if (netif_running(dev))
f8ef6e44 11390 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11391
11392 netif_device_attach(dev);
11393
11394 rtnl_unlock();
11395}
11396
11397static struct pci_error_handlers bnx2x_err_handler = {
11398 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11399 .slot_reset = bnx2x_io_slot_reset,
11400 .resume = bnx2x_io_resume,
493adb1f
WX
11401};
11402
a2fbb9ea 11403static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11404 .name = DRV_MODULE_NAME,
11405 .id_table = bnx2x_pci_tbl,
11406 .probe = bnx2x_init_one,
11407 .remove = __devexit_p(bnx2x_remove_one),
11408 .suspend = bnx2x_suspend,
11409 .resume = bnx2x_resume,
11410 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11411};
11412
11413static int __init bnx2x_init(void)
11414{
1cf167f2
EG
11415 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11416 if (bnx2x_wq == NULL) {
11417 printk(KERN_ERR PFX "Cannot create workqueue\n");
11418 return -ENOMEM;
11419 }
11420
a2fbb9ea
ET
11421 return pci_register_driver(&bnx2x_pci_driver);
11422}
11423
11424static void __exit bnx2x_cleanup(void)
11425{
11426 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11427
11428 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11429}
11430
11431module_init(bnx2x_init);
11432module_exit(bnx2x_cleanup);
11433