]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Debug prints
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
56
2b144023
EG
57#define DRV_MODULE_VERSION "1.48.102"
58#define DRV_MODULE_RELDATE "2009/02/12"
34f80b04 59#define BNX2X_BC_VER 0x040200
a2fbb9ea 60
34f80b04
EG
61/* Time in jiffies before concluding the transmitter is hung */
62#define TX_TIMEOUT (5*HZ)
a2fbb9ea 63
53a10565 64static char version[] __devinitdata =
34f80b04 65 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
66 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
24e3fcef 68MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 72
555f6c78
EG
73static int multi_mode = 1;
74module_param(multi_mode, int, 0);
2059aba7 75MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
555f6c78 76
19680c48 77static int disable_tpa;
19680c48 78module_param(disable_tpa, int, 0);
9898f86d 79MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
80
81static int int_mode;
82module_param(int_mode, int, 0);
83MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
84
9898f86d 85static int poll;
a2fbb9ea 86module_param(poll, int, 0);
9898f86d 87MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
88
89static int mrrs = -1;
90module_param(mrrs, int, 0);
91MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
92
9898f86d 93static int debug;
a2fbb9ea 94module_param(debug, int, 0);
9898f86d
EG
95MODULE_PARM_DESC(debug, " Default debug msglevel");
96
97static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 98
1cf167f2 99static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
100
101enum bnx2x_board_type {
102 BCM57710 = 0,
34f80b04
EG
103 BCM57711 = 1,
104 BCM57711E = 2,
a2fbb9ea
ET
105};
106
34f80b04 107/* indexed by board_type, above */
53a10565 108static struct {
a2fbb9ea
ET
109 char *name;
110} board_info[] __devinitdata = {
34f80b04
EG
111 { "Broadcom NetXtreme II BCM57710 XGb" },
112 { "Broadcom NetXtreme II BCM57711 XGb" },
113 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
114};
115
34f80b04 116
a2fbb9ea
ET
117static const struct pci_device_id bnx2x_pci_tbl[] = {
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
124 { 0 }
125};
126
127MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128
129/****************************************************************************
130* General service functions
131****************************************************************************/
132
133/* used only at init
134 * locking is done by mcp
135 */
136static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137{
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
140 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
141 PCICFG_VENDOR_ID_OFFSET);
142}
143
a2fbb9ea
ET
144static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145{
146 u32 val;
147
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
149 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
150 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
151 PCICFG_VENDOR_ID_OFFSET);
152
153 return val;
154}
a2fbb9ea
ET
155
156static const u32 dmae_reg_go_c[] = {
157 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
158 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
159 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
160 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
161};
162
163/* copy command into DMAE command memory and set DMAE command go */
164static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
165 int idx)
166{
167 u32 cmd_offset;
168 int i;
169
170 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
171 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
172 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173
ad8d3948
EG
174 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
175 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
176 }
177 REG_WR(bp, dmae_reg_go_c[idx], 1);
178}
179
ad8d3948
EG
180void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
181 u32 len32)
a2fbb9ea 182{
ad8d3948 183 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 184 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
185 int cnt = 200;
186
187 if (!bp->dmae_ready) {
188 u32 *data = bnx2x_sp(bp, wb_data[0]);
189
190 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
191 " using indirect\n", dst_addr, len32);
192 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 return;
194 }
195
196 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
197
198 memset(dmae, 0, sizeof(struct dmae_command));
199
200 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
201 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
202 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203#ifdef __BIG_ENDIAN
204 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205#else
206 DMAE_CMD_ENDIANITY_DW_SWAP |
207#endif
34f80b04
EG
208 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
209 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
210 dmae->src_addr_lo = U64_LO(dma_addr);
211 dmae->src_addr_hi = U64_HI(dma_addr);
212 dmae->dst_addr_lo = dst_addr >> 2;
213 dmae->dst_addr_hi = 0;
214 dmae->len = len32;
215 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 217 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 218
c3eefaf6 219 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
220 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
221 "dst_addr [%x:%08x (%08x)]\n"
222 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
223 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
224 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
225 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 226 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
227 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
228 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
229
230 *wb_comp = 0;
231
34f80b04 232 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
233
234 udelay(5);
ad8d3948
EG
235
236 while (*wb_comp != DMAE_COMP_VAL) {
237 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
238
ad8d3948 239 if (!cnt) {
c3eefaf6 240 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
241 break;
242 }
ad8d3948 243 cnt--;
12469401
YG
244 /* adjust delay for emulation/FPGA */
245 if (CHIP_REV_IS_SLOW(bp))
246 msleep(100);
247 else
248 udelay(5);
a2fbb9ea 249 }
ad8d3948
EG
250
251 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
252}
253
c18487ee 254void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 255{
ad8d3948 256 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
258 int cnt = 200;
259
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 int i;
263
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 return;
269 }
270
271 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
272
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
275
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
34f80b04
EG
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->len = len32;
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 293 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 294
c3eefaf6 295 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
302
303 *wb_comp = 0;
304
34f80b04 305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
306
307 udelay(5);
ad8d3948
EG
308
309 while (*wb_comp != DMAE_COMP_VAL) {
310
ad8d3948 311 if (!cnt) {
c3eefaf6 312 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
313 break;
314 }
ad8d3948 315 cnt--;
12469401
YG
316 /* adjust delay for emulation/FPGA */
317 if (CHIP_REV_IS_SLOW(bp))
318 msleep(100);
319 else
320 udelay(5);
a2fbb9ea 321 }
ad8d3948 322 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
323 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
324 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
325
326 mutex_unlock(&bp->dmae_mutex);
327}
328
329/* used only for slowpath so not inlined */
330static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331{
332 u32 wb_write[2];
333
334 wb_write[0] = val_hi;
335 wb_write[1] = val_lo;
336 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 337}
a2fbb9ea 338
ad8d3948
EG
339#ifdef USE_WB_RD
340static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341{
342 u32 wb_data[2];
343
344 REG_RD_DMAE(bp, reg, wb_data, 2);
345
346 return HILO_U64(wb_data[0], wb_data[1]);
347}
348#endif
349
a2fbb9ea
ET
350static int bnx2x_mc_assert(struct bnx2x *bp)
351{
a2fbb9ea 352 char last_idx;
34f80b04
EG
353 int i, rc = 0;
354 u32 row0, row1, row2, row3;
355
356 /* XSTORM */
357 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
358 XSTORM_ASSERT_LIST_INDEX_OFFSET);
359 if (last_idx)
360 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
361
362 /* print the asserts */
363 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
364
365 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_OFFSET(i));
367 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
369 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
371 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
372 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
373
374 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
375 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
376 " 0x%08x 0x%08x 0x%08x\n",
377 i, row3, row2, row1, row0);
378 rc++;
379 } else {
380 break;
381 }
382 }
383
384 /* TSTORM */
385 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
386 TSTORM_ASSERT_LIST_INDEX_OFFSET);
387 if (last_idx)
388 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
389
390 /* print the asserts */
391 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
392
393 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_OFFSET(i));
395 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
397 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
399 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
400 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
401
402 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
403 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
404 " 0x%08x 0x%08x 0x%08x\n",
405 i, row3, row2, row1, row0);
406 rc++;
407 } else {
408 break;
409 }
410 }
411
412 /* CSTORM */
413 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
414 CSTORM_ASSERT_LIST_INDEX_OFFSET);
415 if (last_idx)
416 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
417
418 /* print the asserts */
419 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
420
421 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_OFFSET(i));
423 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
425 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
427 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
428 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
429
430 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
431 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
432 " 0x%08x 0x%08x 0x%08x\n",
433 i, row3, row2, row1, row0);
434 rc++;
435 } else {
436 break;
437 }
438 }
439
440 /* USTORM */
441 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
442 USTORM_ASSERT_LIST_INDEX_OFFSET);
443 if (last_idx)
444 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
445
446 /* print the asserts */
447 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
448
449 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_OFFSET(i));
451 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i) + 4);
453 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 8);
455 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
456 USTORM_ASSERT_LIST_OFFSET(i) + 12);
457
458 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
459 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
460 " 0x%08x 0x%08x 0x%08x\n",
461 i, row3, row2, row1, row0);
462 rc++;
463 } else {
464 break;
a2fbb9ea
ET
465 }
466 }
34f80b04 467
a2fbb9ea
ET
468 return rc;
469}
c14423fe 470
a2fbb9ea
ET
471static void bnx2x_fw_dump(struct bnx2x *bp)
472{
473 u32 mark, offset;
4781bfad 474 __be32 data[9];
a2fbb9ea
ET
475 int word;
476
477 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
478 mark = ((mark + 0x3) & ~0x3);
479 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
480
481 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
482 for (word = 0; word < 8; word++)
483 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
484 offset + 4*word));
485 data[8] = 0x0;
49d66772 486 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
487 }
488 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
489 for (word = 0; word < 8; word++)
490 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
491 offset + 4*word));
492 data[8] = 0x0;
49d66772 493 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
494 }
495 printk("\n" KERN_ERR PFX "end of fw dump\n");
496}
497
498static void bnx2x_panic_dump(struct bnx2x *bp)
499{
500 int i;
501 u16 j, start, end;
502
66e855f3
YG
503 bp->stats_state = STATS_STATE_DISABLED;
504 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
505
a2fbb9ea
ET
506 BNX2X_ERR("begin crash dump -----------------\n");
507
8440d2b6
EG
508 /* Indices */
509 /* Common */
510 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
511 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
512 " spq_prod_idx(%u)\n",
513 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
514 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
515
516 /* Rx */
517 for_each_rx_queue(bp, i) {
a2fbb9ea 518 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 519
c3eefaf6 520 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
521 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
522 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 523 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
524 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
525 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 526 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
527 " fp_u_idx(%x) *sb_u_idx(%x)\n",
528 fp->rx_sge_prod, fp->last_max_sge,
529 le16_to_cpu(fp->fp_u_idx),
530 fp->status_blk->u_status_block.status_block_index);
531 }
a2fbb9ea 532
8440d2b6
EG
533 /* Tx */
534 for_each_tx_queue(bp, i) {
535 struct bnx2x_fastpath *fp = &bp->fp[i];
536 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 537
c3eefaf6 538 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
539 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
540 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
541 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 542 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
8440d2b6
EG
543 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
544 fp->status_blk->c_status_block.status_block_index,
545 hw_prods->packets_prod, hw_prods->bds_prod);
546 }
a2fbb9ea 547
8440d2b6
EG
548 /* Rings */
549 /* Rx */
550 for_each_rx_queue(bp, i) {
551 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
552
553 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
554 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 555 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
556 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
557 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
558
c3eefaf6
EG
559 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
560 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
561 }
562
3196a88a
EG
563 start = RX_SGE(fp->rx_sge_prod);
564 end = RX_SGE(fp->last_max_sge);
8440d2b6 565 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
566 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
567 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
568
c3eefaf6
EG
569 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
570 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
571 }
572
a2fbb9ea
ET
573 start = RCQ_BD(fp->rx_comp_cons - 10);
574 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 575 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
576 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
577
c3eefaf6
EG
578 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
579 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
580 }
581 }
582
8440d2b6
EG
583 /* Tx */
584 for_each_tx_queue(bp, i) {
585 struct bnx2x_fastpath *fp = &bp->fp[i];
586
587 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
588 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
589 for (j = start; j != end; j = TX_BD(j + 1)) {
590 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
591
c3eefaf6
EG
592 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
593 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
594 }
595
596 start = TX_BD(fp->tx_bd_cons - 10);
597 end = TX_BD(fp->tx_bd_cons + 254);
598 for (j = start; j != end; j = TX_BD(j + 1)) {
599 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
600
c3eefaf6
EG
601 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
602 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
603 }
604 }
a2fbb9ea 605
34f80b04 606 bnx2x_fw_dump(bp);
a2fbb9ea
ET
607 bnx2x_mc_assert(bp);
608 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
609}
610
615f8fd9 611static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 612{
34f80b04 613 int port = BP_PORT(bp);
a2fbb9ea
ET
614 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
615 u32 val = REG_RD(bp, addr);
616 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 617 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
618
619 if (msix) {
8badd27a
EG
620 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
621 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
622 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
623 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
624 } else if (msi) {
625 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
626 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
627 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
628 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
629 } else {
630 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 631 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
632 HC_CONFIG_0_REG_INT_LINE_EN_0 |
633 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 634
8badd27a
EG
635 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
636 val, port, addr);
615f8fd9
ET
637
638 REG_WR(bp, addr, val);
639
a2fbb9ea
ET
640 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
641 }
642
8badd27a
EG
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
644 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
645
646 REG_WR(bp, addr, val);
34f80b04
EG
647
648 if (CHIP_IS_E1H(bp)) {
649 /* init leading/trailing edge */
650 if (IS_E1HMF(bp)) {
8badd27a 651 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 652 if (bp->port.pmf)
4acac6a5
EG
653 /* enable nig and gpio3 attention */
654 val |= 0x1100;
34f80b04
EG
655 } else
656 val = 0xffff;
657
658 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
659 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
660 }
a2fbb9ea
ET
661}
662
615f8fd9 663static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 664{
34f80b04 665 int port = BP_PORT(bp);
a2fbb9ea
ET
666 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
667 u32 val = REG_RD(bp, addr);
668
669 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
670 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
671 HC_CONFIG_0_REG_INT_LINE_EN_0 |
672 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
673
674 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
675 val, port, addr);
676
8badd27a
EG
677 /* flush all outstanding writes */
678 mmiowb();
679
a2fbb9ea
ET
680 REG_WR(bp, addr, val);
681 if (REG_RD(bp, addr) != val)
682 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 683
a2fbb9ea
ET
684}
685
f8ef6e44 686static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 687{
a2fbb9ea 688 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 689 int i, offset;
a2fbb9ea 690
34f80b04 691 /* disable interrupt handling */
a2fbb9ea 692 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
693 if (disable_hw)
694 /* prevent the HW from sending interrupts */
695 bnx2x_int_disable(bp);
a2fbb9ea
ET
696
697 /* make sure all ISRs are done */
698 if (msix) {
8badd27a
EG
699 synchronize_irq(bp->msix_table[0].vector);
700 offset = 1;
a2fbb9ea 701 for_each_queue(bp, i)
8badd27a 702 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
703 } else
704 synchronize_irq(bp->pdev->irq);
705
706 /* make sure sp_task is not running */
1cf167f2
EG
707 cancel_delayed_work(&bp->sp_task);
708 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
709}
710
34f80b04 711/* fast path */
a2fbb9ea
ET
712
713/*
34f80b04 714 * General service functions
a2fbb9ea
ET
715 */
716
34f80b04 717static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
718 u8 storm, u16 index, u8 op, u8 update)
719{
5c862848
EG
720 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
721 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
722 struct igu_ack_register igu_ack;
723
724 igu_ack.status_block_index = index;
725 igu_ack.sb_id_and_flags =
34f80b04 726 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
727 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
728 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
729 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
730
5c862848
EG
731 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
732 (*(u32 *)&igu_ack), hc_addr);
733 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
734}
735
736static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
737{
738 struct host_status_block *fpsb = fp->status_blk;
739 u16 rc = 0;
740
741 barrier(); /* status block is written to by the chip */
742 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
743 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
744 rc |= 1;
745 }
746 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
747 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
748 rc |= 2;
749 }
750 return rc;
751}
752
a2fbb9ea
ET
753static u16 bnx2x_ack_int(struct bnx2x *bp)
754{
5c862848
EG
755 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
756 COMMAND_REG_SIMD_MASK);
757 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 758
5c862848
EG
759 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
760 result, hc_addr);
a2fbb9ea 761
a2fbb9ea
ET
762 return result;
763}
764
765
766/*
767 * fast path service functions
768 */
769
237907c1
EG
770static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
771{
772 u16 tx_cons_sb;
773
774 /* Tell compiler that status block fields can change */
775 barrier();
776 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
777 return (fp->tx_pkt_cons != tx_cons_sb);
778}
779
780static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
781{
782 /* Tell compiler that consumer and producer can change */
783 barrier();
784 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
785}
786
a2fbb9ea
ET
787/* free skb in the packet ring at pos idx
788 * return idx of last bd freed
789 */
790static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
791 u16 idx)
792{
793 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
794 struct eth_tx_bd *tx_bd;
795 struct sk_buff *skb = tx_buf->skb;
34f80b04 796 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
797 int nbd;
798
799 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
800 idx, tx_buf, skb);
801
802 /* unmap first bd */
803 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
804 tx_bd = &fp->tx_desc_ring[bd_idx];
805 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
806 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
807
808 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 809 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
810#ifdef BNX2X_STOP_ON_ERROR
811 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 812 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
813 bnx2x_panic();
814 }
815#endif
816
817 /* Skip a parse bd and the TSO split header bd
818 since they have no mapping */
819 if (nbd)
820 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
821
822 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
823 ETH_TX_BD_FLAGS_TCP_CSUM |
824 ETH_TX_BD_FLAGS_SW_LSO)) {
825 if (--nbd)
826 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
827 tx_bd = &fp->tx_desc_ring[bd_idx];
828 /* is this a TSO split header bd? */
829 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
830 if (--nbd)
831 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
832 }
833 }
834
835 /* now free frags */
836 while (nbd > 0) {
837
838 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
839 tx_bd = &fp->tx_desc_ring[bd_idx];
840 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
841 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
842 if (--nbd)
843 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844 }
845
846 /* release skb */
53e5e96e 847 WARN_ON(!skb);
a2fbb9ea
ET
848 dev_kfree_skb(skb);
849 tx_buf->first_bd = 0;
850 tx_buf->skb = NULL;
851
34f80b04 852 return new_cons;
a2fbb9ea
ET
853}
854
34f80b04 855static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 856{
34f80b04
EG
857 s16 used;
858 u16 prod;
859 u16 cons;
a2fbb9ea 860
34f80b04 861 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
862 prod = fp->tx_bd_prod;
863 cons = fp->tx_bd_cons;
864
34f80b04
EG
865 /* NUM_TX_RINGS = number of "next-page" entries
866 It will be used as a threshold */
867 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 868
34f80b04 869#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
870 WARN_ON(used < 0);
871 WARN_ON(used > fp->bp->tx_ring_size);
872 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 873#endif
a2fbb9ea 874
34f80b04 875 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
876}
877
7961f791 878static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
879{
880 struct bnx2x *bp = fp->bp;
555f6c78 881 struct netdev_queue *txq;
a2fbb9ea
ET
882 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
883 int done = 0;
884
885#ifdef BNX2X_STOP_ON_ERROR
886 if (unlikely(bp->panic))
887 return;
888#endif
889
555f6c78 890 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
891 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
892 sw_cons = fp->tx_pkt_cons;
893
894 while (sw_cons != hw_cons) {
895 u16 pkt_cons;
896
897 pkt_cons = TX_BD(sw_cons);
898
899 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
900
34f80b04 901 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
902 hw_cons, sw_cons, pkt_cons);
903
34f80b04 904/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
905 rmb();
906 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
907 }
908*/
909 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
910 sw_cons++;
911 done++;
a2fbb9ea
ET
912 }
913
914 fp->tx_pkt_cons = sw_cons;
915 fp->tx_bd_cons = bd_cons;
916
a2fbb9ea 917 /* TBD need a thresh? */
555f6c78 918 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 919
555f6c78 920 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 921
6044735d
EG
922 /* Need to make the tx_bd_cons update visible to start_xmit()
923 * before checking for netif_tx_queue_stopped(). Without the
924 * memory barrier, there is a small possibility that
925 * start_xmit() will miss it and cause the queue to be stopped
926 * forever.
927 */
928 smp_mb();
929
555f6c78 930 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 931 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 932 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 933 netif_tx_wake_queue(txq);
a2fbb9ea 934
555f6c78 935 __netif_tx_unlock(txq);
a2fbb9ea
ET
936 }
937}
938
3196a88a 939
a2fbb9ea
ET
940static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
941 union eth_rx_cqe *rr_cqe)
942{
943 struct bnx2x *bp = fp->bp;
944 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
945 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946
34f80b04 947 DP(BNX2X_MSG_SP,
a2fbb9ea 948 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 949 fp->index, cid, command, bp->state,
34f80b04 950 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
951
952 bp->spq_left++;
953
0626b899 954 if (fp->index) {
a2fbb9ea
ET
955 switch (command | fp->state) {
956 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
957 BNX2X_FP_STATE_OPENING):
958 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
959 cid);
960 fp->state = BNX2X_FP_STATE_OPEN;
961 break;
962
963 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
964 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
965 cid);
966 fp->state = BNX2X_FP_STATE_HALTED;
967 break;
968
969 default:
34f80b04
EG
970 BNX2X_ERR("unexpected MC reply (%d) "
971 "fp->state is %x\n", command, fp->state);
972 break;
a2fbb9ea 973 }
34f80b04 974 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
975 return;
976 }
c14423fe 977
a2fbb9ea
ET
978 switch (command | bp->state) {
979 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
980 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
981 bp->state = BNX2X_STATE_OPEN;
982 break;
983
984 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
985 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
986 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
987 fp->state = BNX2X_FP_STATE_HALTED;
988 break;
989
a2fbb9ea 990 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 991 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 992 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
993 break;
994
3196a88a 995
a2fbb9ea 996 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 997 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 998 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 999 bp->set_mac_pending = 0;
a2fbb9ea
ET
1000 break;
1001
49d66772 1002 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1003 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1004 break;
1005
a2fbb9ea 1006 default:
34f80b04 1007 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1008 command, bp->state);
34f80b04 1009 break;
a2fbb9ea 1010 }
34f80b04 1011 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1012}
1013
7a9b2557
VZ
1014static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1015 struct bnx2x_fastpath *fp, u16 index)
1016{
1017 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1018 struct page *page = sw_buf->page;
1019 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1020
1021 /* Skip "next page" elements */
1022 if (!page)
1023 return;
1024
1025 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1026 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1027 __free_pages(page, PAGES_PER_SGE_SHIFT);
1028
1029 sw_buf->page = NULL;
1030 sge->addr_hi = 0;
1031 sge->addr_lo = 0;
1032}
1033
1034static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1035 struct bnx2x_fastpath *fp, int last)
1036{
1037 int i;
1038
1039 for (i = 0; i < last; i++)
1040 bnx2x_free_rx_sge(bp, fp, i);
1041}
1042
1043static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1044 struct bnx2x_fastpath *fp, u16 index)
1045{
1046 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1047 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1048 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1049 dma_addr_t mapping;
1050
1051 if (unlikely(page == NULL))
1052 return -ENOMEM;
1053
4f40f2cb 1054 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1055 PCI_DMA_FROMDEVICE);
8d8bb39b 1056 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1057 __free_pages(page, PAGES_PER_SGE_SHIFT);
1058 return -ENOMEM;
1059 }
1060
1061 sw_buf->page = page;
1062 pci_unmap_addr_set(sw_buf, mapping, mapping);
1063
1064 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1065 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1066
1067 return 0;
1068}
1069
a2fbb9ea
ET
1070static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1071 struct bnx2x_fastpath *fp, u16 index)
1072{
1073 struct sk_buff *skb;
1074 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1075 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1076 dma_addr_t mapping;
1077
1078 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1079 if (unlikely(skb == NULL))
1080 return -ENOMEM;
1081
437cf2f1 1082 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1083 PCI_DMA_FROMDEVICE);
8d8bb39b 1084 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1085 dev_kfree_skb(skb);
1086 return -ENOMEM;
1087 }
1088
1089 rx_buf->skb = skb;
1090 pci_unmap_addr_set(rx_buf, mapping, mapping);
1091
1092 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1093 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1094
1095 return 0;
1096}
1097
1098/* note that we are not allocating a new skb,
1099 * we are just moving one from cons to prod
1100 * we are not creating a new mapping,
1101 * so there is no need to check for dma_mapping_error().
1102 */
1103static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1104 struct sk_buff *skb, u16 cons, u16 prod)
1105{
1106 struct bnx2x *bp = fp->bp;
1107 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1108 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1109 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1110 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1111
1112 pci_dma_sync_single_for_device(bp->pdev,
1113 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1114 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1115
1116 prod_rx_buf->skb = cons_rx_buf->skb;
1117 pci_unmap_addr_set(prod_rx_buf, mapping,
1118 pci_unmap_addr(cons_rx_buf, mapping));
1119 *prod_bd = *cons_bd;
1120}
1121
7a9b2557
VZ
1122static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1123 u16 idx)
1124{
1125 u16 last_max = fp->last_max_sge;
1126
1127 if (SUB_S16(idx, last_max) > 0)
1128 fp->last_max_sge = idx;
1129}
1130
1131static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1132{
1133 int i, j;
1134
1135 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1136 int idx = RX_SGE_CNT * i - 1;
1137
1138 for (j = 0; j < 2; j++) {
1139 SGE_MASK_CLEAR_BIT(fp, idx);
1140 idx--;
1141 }
1142 }
1143}
1144
1145static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1146 struct eth_fast_path_rx_cqe *fp_cqe)
1147{
1148 struct bnx2x *bp = fp->bp;
4f40f2cb 1149 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1150 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1151 SGE_PAGE_SHIFT;
7a9b2557
VZ
1152 u16 last_max, last_elem, first_elem;
1153 u16 delta = 0;
1154 u16 i;
1155
1156 if (!sge_len)
1157 return;
1158
1159 /* First mark all used pages */
1160 for (i = 0; i < sge_len; i++)
1161 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1162
1163 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1164 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1165
1166 /* Here we assume that the last SGE index is the biggest */
1167 prefetch((void *)(fp->sge_mask));
1168 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1169
1170 last_max = RX_SGE(fp->last_max_sge);
1171 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1172 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1173
1174 /* If ring is not full */
1175 if (last_elem + 1 != first_elem)
1176 last_elem++;
1177
1178 /* Now update the prod */
1179 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1180 if (likely(fp->sge_mask[i]))
1181 break;
1182
1183 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1184 delta += RX_SGE_MASK_ELEM_SZ;
1185 }
1186
1187 if (delta > 0) {
1188 fp->rx_sge_prod += delta;
1189 /* clear page-end entries */
1190 bnx2x_clear_sge_mask_next_elems(fp);
1191 }
1192
1193 DP(NETIF_MSG_RX_STATUS,
1194 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1195 fp->last_max_sge, fp->rx_sge_prod);
1196}
1197
1198static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1199{
1200 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1201 memset(fp->sge_mask, 0xff,
1202 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1203
33471629
EG
1204 /* Clear the two last indices in the page to 1:
1205 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1206 hence will never be indicated and should be removed from
1207 the calculations. */
1208 bnx2x_clear_sge_mask_next_elems(fp);
1209}
1210
1211static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1212 struct sk_buff *skb, u16 cons, u16 prod)
1213{
1214 struct bnx2x *bp = fp->bp;
1215 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1216 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218 dma_addr_t mapping;
1219
1220 /* move empty skb from pool to prod and map it */
1221 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1222 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1223 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1224 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1225
1226 /* move partial skb from cons to pool (don't unmap yet) */
1227 fp->tpa_pool[queue] = *cons_rx_buf;
1228
1229 /* mark bin state as start - print error if current state != stop */
1230 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1231 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1232
1233 fp->tpa_state[queue] = BNX2X_TPA_START;
1234
1235 /* point prod_bd to new skb */
1236 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1237 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1238
1239#ifdef BNX2X_STOP_ON_ERROR
1240 fp->tpa_queue_used |= (1 << queue);
1241#ifdef __powerpc64__
1242 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1243#else
1244 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1245#endif
1246 fp->tpa_queue_used);
1247#endif
1248}
1249
1250static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1251 struct sk_buff *skb,
1252 struct eth_fast_path_rx_cqe *fp_cqe,
1253 u16 cqe_idx)
1254{
1255 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1256 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1257 u32 i, frag_len, frag_size, pages;
1258 int err;
1259 int j;
1260
1261 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1262 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1263
1264 /* This is needed in order to enable forwarding support */
1265 if (frag_size)
4f40f2cb 1266 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1267 max(frag_size, (u32)len_on_bd));
1268
1269#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1270 if (pages >
1271 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1272 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1273 pages, cqe_idx);
1274 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1275 fp_cqe->pkt_len, len_on_bd);
1276 bnx2x_panic();
1277 return -EINVAL;
1278 }
1279#endif
1280
1281 /* Run through the SGL and compose the fragmented skb */
1282 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1283 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1284
1285 /* FW gives the indices of the SGE as if the ring is an array
1286 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1287 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1288 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1289 old_rx_pg = *rx_pg;
1290
1291 /* If we fail to allocate a substitute page, we simply stop
1292 where we are and drop the whole packet */
1293 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1294 if (unlikely(err)) {
de832a55 1295 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1296 return err;
1297 }
1298
1299 /* Unmap the page as we r going to pass it to the stack */
1300 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1301 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1302
1303 /* Add one frag and update the appropriate fields in the skb */
1304 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1305
1306 skb->data_len += frag_len;
1307 skb->truesize += frag_len;
1308 skb->len += frag_len;
1309
1310 frag_size -= frag_len;
1311 }
1312
1313 return 0;
1314}
1315
1316static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1317 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1318 u16 cqe_idx)
1319{
1320 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1321 struct sk_buff *skb = rx_buf->skb;
1322 /* alloc new skb */
1323 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1324
1325 /* Unmap skb in the pool anyway, as we are going to change
1326 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1327 fails. */
1328 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1329 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1330
7a9b2557 1331 if (likely(new_skb)) {
66e855f3
YG
1332 /* fix ip xsum and give it to the stack */
1333 /* (no need to map the new skb) */
0c6671b0
EG
1334#ifdef BCM_VLAN
1335 int is_vlan_cqe =
1336 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1337 PARSING_FLAGS_VLAN);
1338 int is_not_hwaccel_vlan_cqe =
1339 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1340#endif
7a9b2557
VZ
1341
1342 prefetch(skb);
1343 prefetch(((char *)(skb)) + 128);
1344
7a9b2557
VZ
1345#ifdef BNX2X_STOP_ON_ERROR
1346 if (pad + len > bp->rx_buf_size) {
1347 BNX2X_ERR("skb_put is about to fail... "
1348 "pad %d len %d rx_buf_size %d\n",
1349 pad, len, bp->rx_buf_size);
1350 bnx2x_panic();
1351 return;
1352 }
1353#endif
1354
1355 skb_reserve(skb, pad);
1356 skb_put(skb, len);
1357
1358 skb->protocol = eth_type_trans(skb, bp->dev);
1359 skb->ip_summed = CHECKSUM_UNNECESSARY;
1360
1361 {
1362 struct iphdr *iph;
1363
1364 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1365#ifdef BCM_VLAN
1366 /* If there is no Rx VLAN offloading -
1367 take VLAN tag into an account */
1368 if (unlikely(is_not_hwaccel_vlan_cqe))
1369 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1370#endif
7a9b2557
VZ
1371 iph->check = 0;
1372 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1373 }
1374
1375 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1376 &cqe->fast_path_cqe, cqe_idx)) {
1377#ifdef BCM_VLAN
0c6671b0
EG
1378 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1379 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1380 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1381 le16_to_cpu(cqe->fast_path_cqe.
1382 vlan_tag));
1383 else
1384#endif
1385 netif_receive_skb(skb);
1386 } else {
1387 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1388 " - dropping packet!\n");
1389 dev_kfree_skb(skb);
1390 }
1391
7a9b2557
VZ
1392
1393 /* put new skb in bin */
1394 fp->tpa_pool[queue].skb = new_skb;
1395
1396 } else {
66e855f3 1397 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1398 DP(NETIF_MSG_RX_STATUS,
1399 "Failed to allocate new skb - dropping packet!\n");
de832a55 1400 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1401 }
1402
1403 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1404}
1405
1406static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1407 struct bnx2x_fastpath *fp,
1408 u16 bd_prod, u16 rx_comp_prod,
1409 u16 rx_sge_prod)
1410{
8d9c5f34 1411 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1412 int i;
1413
1414 /* Update producers */
1415 rx_prods.bd_prod = bd_prod;
1416 rx_prods.cqe_prod = rx_comp_prod;
1417 rx_prods.sge_prod = rx_sge_prod;
1418
58f4c4cf
EG
1419 /*
1420 * Make sure that the BD and SGE data is updated before updating the
1421 * producers since FW might read the BD/SGE right after the producer
1422 * is updated.
1423 * This is only applicable for weak-ordered memory model archs such
1424 * as IA-64. The following barrier is also mandatory since FW will
1425 * assumes BDs must have buffers.
1426 */
1427 wmb();
1428
8d9c5f34
EG
1429 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1430 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1431 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1432 ((u32 *)&rx_prods)[i]);
1433
58f4c4cf
EG
1434 mmiowb(); /* keep prod updates ordered */
1435
7a9b2557 1436 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1437 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1438 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1439}
1440
a2fbb9ea
ET
1441static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1442{
1443 struct bnx2x *bp = fp->bp;
34f80b04 1444 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1445 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1446 int rx_pkt = 0;
1447
1448#ifdef BNX2X_STOP_ON_ERROR
1449 if (unlikely(bp->panic))
1450 return 0;
1451#endif
1452
34f80b04
EG
1453 /* CQ "next element" is of the size of the regular element,
1454 that's why it's ok here */
a2fbb9ea
ET
1455 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1456 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1457 hw_comp_cons++;
1458
1459 bd_cons = fp->rx_bd_cons;
1460 bd_prod = fp->rx_bd_prod;
34f80b04 1461 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1462 sw_comp_cons = fp->rx_comp_cons;
1463 sw_comp_prod = fp->rx_comp_prod;
1464
1465 /* Memory barrier necessary as speculative reads of the rx
1466 * buffer can be ahead of the index in the status block
1467 */
1468 rmb();
1469
1470 DP(NETIF_MSG_RX_STATUS,
1471 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1472 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1473
1474 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1475 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1476 struct sk_buff *skb;
1477 union eth_rx_cqe *cqe;
34f80b04
EG
1478 u8 cqe_fp_flags;
1479 u16 len, pad;
a2fbb9ea
ET
1480
1481 comp_ring_cons = RCQ_BD(sw_comp_cons);
1482 bd_prod = RX_BD(bd_prod);
1483 bd_cons = RX_BD(bd_cons);
1484
1485 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1486 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1487
a2fbb9ea 1488 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1489 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1490 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1491 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1492 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1493 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1494
1495 /* is this a slowpath msg? */
34f80b04 1496 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1497 bnx2x_sp_event(fp, cqe);
1498 goto next_cqe;
1499
1500 /* this is an rx packet */
1501 } else {
1502 rx_buf = &fp->rx_buf_ring[bd_cons];
1503 skb = rx_buf->skb;
a2fbb9ea
ET
1504 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1505 pad = cqe->fast_path_cqe.placement_offset;
1506
7a9b2557
VZ
1507 /* If CQE is marked both TPA_START and TPA_END
1508 it is a non-TPA CQE */
1509 if ((!fp->disable_tpa) &&
1510 (TPA_TYPE(cqe_fp_flags) !=
1511 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1512 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1513
1514 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1515 DP(NETIF_MSG_RX_STATUS,
1516 "calling tpa_start on queue %d\n",
1517 queue);
1518
1519 bnx2x_tpa_start(fp, queue, skb,
1520 bd_cons, bd_prod);
1521 goto next_rx;
1522 }
1523
1524 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1525 DP(NETIF_MSG_RX_STATUS,
1526 "calling tpa_stop on queue %d\n",
1527 queue);
1528
1529 if (!BNX2X_RX_SUM_FIX(cqe))
1530 BNX2X_ERR("STOP on none TCP "
1531 "data\n");
1532
1533 /* This is a size of the linear data
1534 on this skb */
1535 len = le16_to_cpu(cqe->fast_path_cqe.
1536 len_on_bd);
1537 bnx2x_tpa_stop(bp, fp, queue, pad,
1538 len, cqe, comp_ring_cons);
1539#ifdef BNX2X_STOP_ON_ERROR
1540 if (bp->panic)
1541 return -EINVAL;
1542#endif
1543
1544 bnx2x_update_sge_prod(fp,
1545 &cqe->fast_path_cqe);
1546 goto next_cqe;
1547 }
1548 }
1549
a2fbb9ea
ET
1550 pci_dma_sync_single_for_device(bp->pdev,
1551 pci_unmap_addr(rx_buf, mapping),
1552 pad + RX_COPY_THRESH,
1553 PCI_DMA_FROMDEVICE);
1554 prefetch(skb);
1555 prefetch(((char *)(skb)) + 128);
1556
1557 /* is this an error packet? */
34f80b04 1558 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1559 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1560 "ERROR flags %x rx packet %u\n",
1561 cqe_fp_flags, sw_comp_cons);
de832a55 1562 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1563 goto reuse_rx;
1564 }
1565
1566 /* Since we don't have a jumbo ring
1567 * copy small packets if mtu > 1500
1568 */
1569 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1570 (len <= RX_COPY_THRESH)) {
1571 struct sk_buff *new_skb;
1572
1573 new_skb = netdev_alloc_skb(bp->dev,
1574 len + pad);
1575 if (new_skb == NULL) {
1576 DP(NETIF_MSG_RX_ERR,
34f80b04 1577 "ERROR packet dropped "
a2fbb9ea 1578 "because of alloc failure\n");
de832a55 1579 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1580 goto reuse_rx;
1581 }
1582
1583 /* aligned copy */
1584 skb_copy_from_linear_data_offset(skb, pad,
1585 new_skb->data + pad, len);
1586 skb_reserve(new_skb, pad);
1587 skb_put(new_skb, len);
1588
1589 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1590
1591 skb = new_skb;
1592
1593 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1594 pci_unmap_single(bp->pdev,
1595 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1596 bp->rx_buf_size,
a2fbb9ea
ET
1597 PCI_DMA_FROMDEVICE);
1598 skb_reserve(skb, pad);
1599 skb_put(skb, len);
1600
1601 } else {
1602 DP(NETIF_MSG_RX_ERR,
34f80b04 1603 "ERROR packet dropped because "
a2fbb9ea 1604 "of alloc failure\n");
de832a55 1605 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1606reuse_rx:
1607 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1608 goto next_rx;
1609 }
1610
1611 skb->protocol = eth_type_trans(skb, bp->dev);
1612
1613 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1614 if (bp->rx_csum) {
1adcd8be
EG
1615 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1616 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1617 else
de832a55 1618 fp->eth_q_stats.hw_csum_err++;
66e855f3 1619 }
a2fbb9ea
ET
1620 }
1621
748e5439 1622 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1623#ifdef BCM_VLAN
0c6671b0 1624 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1625 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1626 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1627 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1628 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1629 else
1630#endif
34f80b04 1631 netif_receive_skb(skb);
a2fbb9ea 1632
a2fbb9ea
ET
1633
1634next_rx:
1635 rx_buf->skb = NULL;
1636
1637 bd_cons = NEXT_RX_IDX(bd_cons);
1638 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1639 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1640 rx_pkt++;
a2fbb9ea
ET
1641next_cqe:
1642 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1643 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1644
34f80b04 1645 if (rx_pkt == budget)
a2fbb9ea
ET
1646 break;
1647 } /* while */
1648
1649 fp->rx_bd_cons = bd_cons;
34f80b04 1650 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1651 fp->rx_comp_cons = sw_comp_cons;
1652 fp->rx_comp_prod = sw_comp_prod;
1653
7a9b2557
VZ
1654 /* Update producers */
1655 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1656 fp->rx_sge_prod);
a2fbb9ea
ET
1657
1658 fp->rx_pkt += rx_pkt;
1659 fp->rx_calls++;
1660
1661 return rx_pkt;
1662}
1663
1664static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1665{
1666 struct bnx2x_fastpath *fp = fp_cookie;
1667 struct bnx2x *bp = fp->bp;
0626b899 1668 int index = fp->index;
a2fbb9ea 1669
da5a662a
VZ
1670 /* Return here if interrupt is disabled */
1671 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1672 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1673 return IRQ_HANDLED;
1674 }
1675
34f80b04 1676 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1677 index, fp->sb_id);
1678 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1679
1680#ifdef BNX2X_STOP_ON_ERROR
1681 if (unlikely(bp->panic))
1682 return IRQ_HANDLED;
1683#endif
1684
1685 prefetch(fp->rx_cons_sb);
1686 prefetch(fp->tx_cons_sb);
1687 prefetch(&fp->status_blk->c_status_block.status_block_index);
1688 prefetch(&fp->status_blk->u_status_block.status_block_index);
1689
288379f0 1690 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1691
a2fbb9ea
ET
1692 return IRQ_HANDLED;
1693}
1694
1695static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1696{
555f6c78 1697 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1698 u16 status = bnx2x_ack_int(bp);
34f80b04 1699 u16 mask;
a2fbb9ea 1700
34f80b04 1701 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1702 if (unlikely(status == 0)) {
1703 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1704 return IRQ_NONE;
1705 }
f5372251 1706 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1707
34f80b04 1708 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1709 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1710 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1711 return IRQ_HANDLED;
1712 }
1713
3196a88a
EG
1714#ifdef BNX2X_STOP_ON_ERROR
1715 if (unlikely(bp->panic))
1716 return IRQ_HANDLED;
1717#endif
1718
34f80b04
EG
1719 mask = 0x2 << bp->fp[0].sb_id;
1720 if (status & mask) {
a2fbb9ea
ET
1721 struct bnx2x_fastpath *fp = &bp->fp[0];
1722
1723 prefetch(fp->rx_cons_sb);
1724 prefetch(fp->tx_cons_sb);
1725 prefetch(&fp->status_blk->c_status_block.status_block_index);
1726 prefetch(&fp->status_blk->u_status_block.status_block_index);
1727
288379f0 1728 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1729
34f80b04 1730 status &= ~mask;
a2fbb9ea
ET
1731 }
1732
a2fbb9ea 1733
34f80b04 1734 if (unlikely(status & 0x1)) {
1cf167f2 1735 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1736
1737 status &= ~0x1;
1738 if (!status)
1739 return IRQ_HANDLED;
1740 }
1741
34f80b04
EG
1742 if (status)
1743 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1744 status);
a2fbb9ea 1745
c18487ee 1746 return IRQ_HANDLED;
a2fbb9ea
ET
1747}
1748
c18487ee 1749/* end of fast path */
a2fbb9ea 1750
bb2a0f7a 1751static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1752
c18487ee
YR
1753/* Link */
1754
1755/*
1756 * General service functions
1757 */
a2fbb9ea 1758
4a37fb66 1759static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1760{
1761 u32 lock_status;
1762 u32 resource_bit = (1 << resource);
4a37fb66
YG
1763 int func = BP_FUNC(bp);
1764 u32 hw_lock_control_reg;
c18487ee 1765 int cnt;
a2fbb9ea 1766
c18487ee
YR
1767 /* Validating that the resource is within range */
1768 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1769 DP(NETIF_MSG_HW,
1770 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1771 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1772 return -EINVAL;
1773 }
a2fbb9ea 1774
4a37fb66
YG
1775 if (func <= 5) {
1776 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1777 } else {
1778 hw_lock_control_reg =
1779 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1780 }
1781
c18487ee 1782 /* Validating that the resource is not already taken */
4a37fb66 1783 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1784 if (lock_status & resource_bit) {
1785 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1786 lock_status, resource_bit);
1787 return -EEXIST;
1788 }
a2fbb9ea 1789
46230476
EG
1790 /* Try for 5 second every 5ms */
1791 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1792 /* Try to acquire the lock */
4a37fb66
YG
1793 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1794 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1795 if (lock_status & resource_bit)
1796 return 0;
a2fbb9ea 1797
c18487ee 1798 msleep(5);
a2fbb9ea 1799 }
c18487ee
YR
1800 DP(NETIF_MSG_HW, "Timeout\n");
1801 return -EAGAIN;
1802}
a2fbb9ea 1803
4a37fb66 1804static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1805{
1806 u32 lock_status;
1807 u32 resource_bit = (1 << resource);
4a37fb66
YG
1808 int func = BP_FUNC(bp);
1809 u32 hw_lock_control_reg;
a2fbb9ea 1810
c18487ee
YR
1811 /* Validating that the resource is within range */
1812 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1813 DP(NETIF_MSG_HW,
1814 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1815 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1816 return -EINVAL;
1817 }
1818
4a37fb66
YG
1819 if (func <= 5) {
1820 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1821 } else {
1822 hw_lock_control_reg =
1823 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1824 }
1825
c18487ee 1826 /* Validating that the resource is currently taken */
4a37fb66 1827 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1828 if (!(lock_status & resource_bit)) {
1829 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1830 lock_status, resource_bit);
1831 return -EFAULT;
a2fbb9ea
ET
1832 }
1833
4a37fb66 1834 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1835 return 0;
1836}
1837
1838/* HW Lock for shared dual port PHYs */
4a37fb66 1839static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1840{
34f80b04 1841 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1842
46c6a674
EG
1843 if (bp->port.need_hw_lock)
1844 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1845}
a2fbb9ea 1846
4a37fb66 1847static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1848{
46c6a674
EG
1849 if (bp->port.need_hw_lock)
1850 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1851
34f80b04 1852 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1853}
a2fbb9ea 1854
4acac6a5
EG
1855int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1856{
1857 /* The GPIO should be swapped if swap register is set and active */
1858 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1859 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1860 int gpio_shift = gpio_num +
1861 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1862 u32 gpio_mask = (1 << gpio_shift);
1863 u32 gpio_reg;
1864 int value;
1865
1866 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1867 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1868 return -EINVAL;
1869 }
1870
1871 /* read GPIO value */
1872 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1873
1874 /* get the requested pin value */
1875 if ((gpio_reg & gpio_mask) == gpio_mask)
1876 value = 1;
1877 else
1878 value = 0;
1879
1880 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1881
1882 return value;
1883}
1884
17de50b7 1885int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1886{
1887 /* The GPIO should be swapped if swap register is set and active */
1888 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1889 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1890 int gpio_shift = gpio_num +
1891 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1892 u32 gpio_mask = (1 << gpio_shift);
1893 u32 gpio_reg;
a2fbb9ea 1894
c18487ee
YR
1895 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1896 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1897 return -EINVAL;
1898 }
a2fbb9ea 1899
4a37fb66 1900 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1901 /* read GPIO and mask except the float bits */
1902 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1903
c18487ee
YR
1904 switch (mode) {
1905 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1906 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1907 gpio_num, gpio_shift);
1908 /* clear FLOAT and set CLR */
1909 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1910 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1911 break;
a2fbb9ea 1912
c18487ee
YR
1913 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1914 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1915 gpio_num, gpio_shift);
1916 /* clear FLOAT and set SET */
1917 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1919 break;
a2fbb9ea 1920
17de50b7 1921 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1922 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1923 gpio_num, gpio_shift);
1924 /* set FLOAT */
1925 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1926 break;
a2fbb9ea 1927
c18487ee
YR
1928 default:
1929 break;
a2fbb9ea
ET
1930 }
1931
c18487ee 1932 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1933 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1934
c18487ee 1935 return 0;
a2fbb9ea
ET
1936}
1937
4acac6a5
EG
1938int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1939{
1940 /* The GPIO should be swapped if swap register is set and active */
1941 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1942 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1943 int gpio_shift = gpio_num +
1944 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1945 u32 gpio_mask = (1 << gpio_shift);
1946 u32 gpio_reg;
1947
1948 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1949 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1950 return -EINVAL;
1951 }
1952
1953 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1954 /* read GPIO int */
1955 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1956
1957 switch (mode) {
1958 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1959 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1960 "output low\n", gpio_num, gpio_shift);
1961 /* clear SET and set CLR */
1962 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1963 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1964 break;
1965
1966 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1967 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1968 "output high\n", gpio_num, gpio_shift);
1969 /* clear CLR and set SET */
1970 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1971 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1972 break;
1973
1974 default:
1975 break;
1976 }
1977
1978 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1979 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1980
1981 return 0;
1982}
1983
c18487ee 1984static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1985{
c18487ee
YR
1986 u32 spio_mask = (1 << spio_num);
1987 u32 spio_reg;
a2fbb9ea 1988
c18487ee
YR
1989 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1990 (spio_num > MISC_REGISTERS_SPIO_7)) {
1991 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1992 return -EINVAL;
a2fbb9ea
ET
1993 }
1994
4a37fb66 1995 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1996 /* read SPIO and mask except the float bits */
1997 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1998
c18487ee 1999 switch (mode) {
6378c025 2000 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2001 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2002 /* clear FLOAT and set CLR */
2003 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2004 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2005 break;
a2fbb9ea 2006
6378c025 2007 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2008 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2009 /* clear FLOAT and set SET */
2010 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2011 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2012 break;
a2fbb9ea 2013
c18487ee
YR
2014 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2015 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2016 /* set FLOAT */
2017 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2018 break;
a2fbb9ea 2019
c18487ee
YR
2020 default:
2021 break;
a2fbb9ea
ET
2022 }
2023
c18487ee 2024 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2025 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2026
a2fbb9ea
ET
2027 return 0;
2028}
2029
c18487ee 2030static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2031{
ad33ea3a
EG
2032 switch (bp->link_vars.ieee_fc &
2033 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2034 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2035 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2036 ADVERTISED_Pause);
2037 break;
356e2385 2038
c18487ee 2039 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2040 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2041 ADVERTISED_Pause);
2042 break;
356e2385 2043
c18487ee 2044 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2045 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2046 break;
356e2385 2047
c18487ee 2048 default:
34f80b04 2049 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2050 ADVERTISED_Pause);
2051 break;
2052 }
2053}
f1410647 2054
c18487ee
YR
2055static void bnx2x_link_report(struct bnx2x *bp)
2056{
2057 if (bp->link_vars.link_up) {
2058 if (bp->state == BNX2X_STATE_OPEN)
2059 netif_carrier_on(bp->dev);
2060 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2061
c18487ee 2062 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2063
c18487ee
YR
2064 if (bp->link_vars.duplex == DUPLEX_FULL)
2065 printk("full duplex");
2066 else
2067 printk("half duplex");
f1410647 2068
c0700f90
DM
2069 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2070 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2071 printk(", receive ");
356e2385
EG
2072 if (bp->link_vars.flow_ctrl &
2073 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2074 printk("& transmit ");
2075 } else {
2076 printk(", transmit ");
2077 }
2078 printk("flow control ON");
2079 }
2080 printk("\n");
f1410647 2081
c18487ee
YR
2082 } else { /* link_down */
2083 netif_carrier_off(bp->dev);
2084 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2085 }
c18487ee
YR
2086}
2087
b5bf9068 2088static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2089{
19680c48
EG
2090 if (!BP_NOMCP(bp)) {
2091 u8 rc;
a2fbb9ea 2092
19680c48 2093 /* Initialize link parameters structure variables */
8c99e7b0
YR
2094 /* It is recommended to turn off RX FC for jumbo frames
2095 for better performance */
2096 if (IS_E1HMF(bp))
c0700f90 2097 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2098 else if (bp->dev->mtu > 5000)
c0700f90 2099 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2100 else
c0700f90 2101 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2102
4a37fb66 2103 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2104
2105 if (load_mode == LOAD_DIAG)
2106 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2107
19680c48 2108 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2109
4a37fb66 2110 bnx2x_release_phy_lock(bp);
a2fbb9ea 2111
3c96c68b
EG
2112 bnx2x_calc_fc_adv(bp);
2113
b5bf9068
EG
2114 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2115 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2116 bnx2x_link_report(bp);
b5bf9068 2117 }
34f80b04 2118
19680c48
EG
2119 return rc;
2120 }
f5372251 2121 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2122 return -EINVAL;
a2fbb9ea
ET
2123}
2124
c18487ee 2125static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2126{
19680c48 2127 if (!BP_NOMCP(bp)) {
4a37fb66 2128 bnx2x_acquire_phy_lock(bp);
19680c48 2129 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2130 bnx2x_release_phy_lock(bp);
a2fbb9ea 2131
19680c48
EG
2132 bnx2x_calc_fc_adv(bp);
2133 } else
f5372251 2134 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2135}
a2fbb9ea 2136
c18487ee
YR
2137static void bnx2x__link_reset(struct bnx2x *bp)
2138{
19680c48 2139 if (!BP_NOMCP(bp)) {
4a37fb66 2140 bnx2x_acquire_phy_lock(bp);
589abe3a 2141 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2142 bnx2x_release_phy_lock(bp);
19680c48 2143 } else
f5372251 2144 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2145}
a2fbb9ea 2146
c18487ee
YR
2147static u8 bnx2x_link_test(struct bnx2x *bp)
2148{
2149 u8 rc;
a2fbb9ea 2150
4a37fb66 2151 bnx2x_acquire_phy_lock(bp);
c18487ee 2152 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2153 bnx2x_release_phy_lock(bp);
a2fbb9ea 2154
c18487ee
YR
2155 return rc;
2156}
a2fbb9ea 2157
8a1c38d1 2158static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2159{
8a1c38d1
EG
2160 u32 r_param = bp->link_vars.line_speed / 8;
2161 u32 fair_periodic_timeout_usec;
2162 u32 t_fair;
34f80b04 2163
8a1c38d1
EG
2164 memset(&(bp->cmng.rs_vars), 0,
2165 sizeof(struct rate_shaping_vars_per_port));
2166 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2167
8a1c38d1
EG
2168 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2169 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2170
8a1c38d1
EG
2171 /* this is the threshold below which no timer arming will occur
2172 1.25 coefficient is for the threshold to be a little bigger
2173 than the real time, to compensate for timer in-accuracy */
2174 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2175 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2176
8a1c38d1
EG
2177 /* resolution of fairness timer */
2178 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2179 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2180 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2181
8a1c38d1
EG
2182 /* this is the threshold below which we won't arm the timer anymore */
2183 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2184
8a1c38d1
EG
2185 /* we multiply by 1e3/8 to get bytes/msec.
2186 We don't want the credits to pass a credit
2187 of the t_fair*FAIR_MEM (algorithm resolution) */
2188 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2189 /* since each tick is 4 usec */
2190 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2191}
2192
8a1c38d1 2193static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2194{
2195 struct rate_shaping_vars_per_vn m_rs_vn;
2196 struct fairness_vars_per_vn m_fair_vn;
2197 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2198 u16 vn_min_rate, vn_max_rate;
2199 int i;
2200
2201 /* If function is hidden - set min and max to zeroes */
2202 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2203 vn_min_rate = 0;
2204 vn_max_rate = 0;
2205
2206 } else {
2207 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2208 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2209 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2210 if current min rate is zero - set it to 1.
33471629 2211 This is a requirement of the algorithm. */
8a1c38d1 2212 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2213 vn_min_rate = DEF_MIN_RATE;
2214 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2215 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2216 }
2217
8a1c38d1
EG
2218 DP(NETIF_MSG_IFUP,
2219 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2220 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2221
2222 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2223 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2224
2225 /* global vn counter - maximal Mbps for this vn */
2226 m_rs_vn.vn_counter.rate = vn_max_rate;
2227
2228 /* quota - number of bytes transmitted in this period */
2229 m_rs_vn.vn_counter.quota =
2230 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2231
8a1c38d1 2232 if (bp->vn_weight_sum) {
34f80b04
EG
2233 /* credit for each period of the fairness algorithm:
2234 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2235 vn_weight_sum should not be larger than 10000, thus
2236 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2237 than zero */
34f80b04 2238 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2239 max((u32)(vn_min_rate * (T_FAIR_COEF /
2240 (8 * bp->vn_weight_sum))),
2241 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2242 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2243 m_fair_vn.vn_credit_delta);
2244 }
2245
34f80b04
EG
2246 /* Store it to internal memory */
2247 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2248 REG_WR(bp, BAR_XSTRORM_INTMEM +
2249 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2250 ((u32 *)(&m_rs_vn))[i]);
2251
2252 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2253 REG_WR(bp, BAR_XSTRORM_INTMEM +
2254 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2255 ((u32 *)(&m_fair_vn))[i]);
2256}
2257
8a1c38d1 2258
c18487ee
YR
2259/* This function is called upon link interrupt */
2260static void bnx2x_link_attn(struct bnx2x *bp)
2261{
bb2a0f7a
YG
2262 /* Make sure that we are synced with the current statistics */
2263 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2264
c18487ee 2265 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2266
bb2a0f7a
YG
2267 if (bp->link_vars.link_up) {
2268
1c06328c
EG
2269 /* dropless flow control */
2270 if (CHIP_IS_E1H(bp)) {
2271 int port = BP_PORT(bp);
2272 u32 pause_enabled = 0;
2273
2274 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2275 pause_enabled = 1;
2276
2277 REG_WR(bp, BAR_USTRORM_INTMEM +
2278 USTORM_PAUSE_ENABLED_OFFSET(port),
2279 pause_enabled);
2280 }
2281
bb2a0f7a
YG
2282 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2283 struct host_port_stats *pstats;
2284
2285 pstats = bnx2x_sp(bp, port_stats);
2286 /* reset old bmac stats */
2287 memset(&(pstats->mac_stx[0]), 0,
2288 sizeof(struct mac_stx));
2289 }
2290 if ((bp->state == BNX2X_STATE_OPEN) ||
2291 (bp->state == BNX2X_STATE_DISABLED))
2292 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2293 }
2294
c18487ee
YR
2295 /* indicate link status */
2296 bnx2x_link_report(bp);
34f80b04
EG
2297
2298 if (IS_E1HMF(bp)) {
8a1c38d1 2299 int port = BP_PORT(bp);
34f80b04 2300 int func;
8a1c38d1 2301 int vn;
34f80b04
EG
2302
2303 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2304 if (vn == BP_E1HVN(bp))
2305 continue;
2306
8a1c38d1 2307 func = ((vn << 1) | port);
34f80b04
EG
2308
2309 /* Set the attention towards other drivers
2310 on the same port */
2311 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2312 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2313 }
34f80b04 2314
8a1c38d1
EG
2315 if (bp->link_vars.link_up) {
2316 int i;
2317
2318 /* Init rate shaping and fairness contexts */
2319 bnx2x_init_port_minmax(bp);
34f80b04 2320
34f80b04 2321 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2322 bnx2x_init_vn_minmax(bp, 2*vn + port);
2323
2324 /* Store it to internal memory */
2325 for (i = 0;
2326 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2327 REG_WR(bp, BAR_XSTRORM_INTMEM +
2328 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2329 ((u32 *)(&bp->cmng))[i]);
2330 }
34f80b04 2331 }
c18487ee 2332}
a2fbb9ea 2333
c18487ee
YR
2334static void bnx2x__link_status_update(struct bnx2x *bp)
2335{
2336 if (bp->state != BNX2X_STATE_OPEN)
2337 return;
a2fbb9ea 2338
c18487ee 2339 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2340
bb2a0f7a
YG
2341 if (bp->link_vars.link_up)
2342 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2343 else
2344 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2345
c18487ee
YR
2346 /* indicate link status */
2347 bnx2x_link_report(bp);
a2fbb9ea 2348}
a2fbb9ea 2349
34f80b04
EG
2350static void bnx2x_pmf_update(struct bnx2x *bp)
2351{
2352 int port = BP_PORT(bp);
2353 u32 val;
2354
2355 bp->port.pmf = 1;
2356 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2357
2358 /* enable nig attention */
2359 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2360 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2361 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2362
2363 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2364}
2365
c18487ee 2366/* end of Link */
a2fbb9ea
ET
2367
2368/* slow path */
2369
2370/*
2371 * General service functions
2372 */
2373
2374/* the slow path queue is odd since completions arrive on the fastpath ring */
2375static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2376 u32 data_hi, u32 data_lo, int common)
2377{
34f80b04 2378 int func = BP_FUNC(bp);
a2fbb9ea 2379
34f80b04
EG
2380 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2381 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2382 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2383 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2384 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2385
2386#ifdef BNX2X_STOP_ON_ERROR
2387 if (unlikely(bp->panic))
2388 return -EIO;
2389#endif
2390
34f80b04 2391 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2392
2393 if (!bp->spq_left) {
2394 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2395 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2396 bnx2x_panic();
2397 return -EBUSY;
2398 }
f1410647 2399
a2fbb9ea
ET
2400 /* CID needs port number to be encoded int it */
2401 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2402 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2403 HW_CID(bp, cid)));
2404 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2405 if (common)
2406 bp->spq_prod_bd->hdr.type |=
2407 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2408
2409 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2410 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2411
2412 bp->spq_left--;
2413
2414 if (bp->spq_prod_bd == bp->spq_last_bd) {
2415 bp->spq_prod_bd = bp->spq;
2416 bp->spq_prod_idx = 0;
2417 DP(NETIF_MSG_TIMER, "end of spq\n");
2418
2419 } else {
2420 bp->spq_prod_bd++;
2421 bp->spq_prod_idx++;
2422 }
2423
34f80b04 2424 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2425 bp->spq_prod_idx);
2426
34f80b04 2427 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2428 return 0;
2429}
2430
2431/* acquire split MCP access lock register */
4a37fb66 2432static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2433{
a2fbb9ea 2434 u32 i, j, val;
34f80b04 2435 int rc = 0;
a2fbb9ea
ET
2436
2437 might_sleep();
2438 i = 100;
2439 for (j = 0; j < i*10; j++) {
2440 val = (1UL << 31);
2441 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2442 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2443 if (val & (1L << 31))
2444 break;
2445
2446 msleep(5);
2447 }
a2fbb9ea 2448 if (!(val & (1L << 31))) {
19680c48 2449 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2450 rc = -EBUSY;
2451 }
2452
2453 return rc;
2454}
2455
4a37fb66
YG
2456/* release split MCP access lock register */
2457static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2458{
2459 u32 val = 0;
2460
2461 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2462}
2463
2464static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2465{
2466 struct host_def_status_block *def_sb = bp->def_status_blk;
2467 u16 rc = 0;
2468
2469 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2470 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2471 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2472 rc |= 1;
2473 }
2474 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2475 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2476 rc |= 2;
2477 }
2478 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2479 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2480 rc |= 4;
2481 }
2482 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2483 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2484 rc |= 8;
2485 }
2486 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2487 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2488 rc |= 16;
2489 }
2490 return rc;
2491}
2492
2493/*
2494 * slow path service functions
2495 */
2496
2497static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2498{
34f80b04 2499 int port = BP_PORT(bp);
5c862848
EG
2500 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2501 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2502 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2503 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2504 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2505 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2506 u32 aeu_mask;
87942b46 2507 u32 nig_mask = 0;
a2fbb9ea 2508
a2fbb9ea
ET
2509 if (bp->attn_state & asserted)
2510 BNX2X_ERR("IGU ERROR\n");
2511
3fcaf2e5
EG
2512 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2513 aeu_mask = REG_RD(bp, aeu_addr);
2514
a2fbb9ea 2515 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2516 aeu_mask, asserted);
2517 aeu_mask &= ~(asserted & 0xff);
2518 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2519
3fcaf2e5
EG
2520 REG_WR(bp, aeu_addr, aeu_mask);
2521 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2522
3fcaf2e5 2523 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2524 bp->attn_state |= asserted;
3fcaf2e5 2525 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2526
2527 if (asserted & ATTN_HARD_WIRED_MASK) {
2528 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2529
a5e9a7cf
EG
2530 bnx2x_acquire_phy_lock(bp);
2531
877e9aa4 2532 /* save nig interrupt mask */
87942b46 2533 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2534 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2535
c18487ee 2536 bnx2x_link_attn(bp);
a2fbb9ea
ET
2537
2538 /* handle unicore attn? */
2539 }
2540 if (asserted & ATTN_SW_TIMER_4_FUNC)
2541 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2542
2543 if (asserted & GPIO_2_FUNC)
2544 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2545
2546 if (asserted & GPIO_3_FUNC)
2547 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2548
2549 if (asserted & GPIO_4_FUNC)
2550 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2551
2552 if (port == 0) {
2553 if (asserted & ATTN_GENERAL_ATTN_1) {
2554 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2555 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2556 }
2557 if (asserted & ATTN_GENERAL_ATTN_2) {
2558 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2559 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2560 }
2561 if (asserted & ATTN_GENERAL_ATTN_3) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2564 }
2565 } else {
2566 if (asserted & ATTN_GENERAL_ATTN_4) {
2567 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2568 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2569 }
2570 if (asserted & ATTN_GENERAL_ATTN_5) {
2571 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2572 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2573 }
2574 if (asserted & ATTN_GENERAL_ATTN_6) {
2575 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2576 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2577 }
2578 }
2579
2580 } /* if hardwired */
2581
5c862848
EG
2582 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2583 asserted, hc_addr);
2584 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2585
2586 /* now set back the mask */
a5e9a7cf 2587 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2588 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2589 bnx2x_release_phy_lock(bp);
2590 }
a2fbb9ea
ET
2591}
2592
877e9aa4 2593static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2594{
34f80b04 2595 int port = BP_PORT(bp);
877e9aa4
ET
2596 int reg_offset;
2597 u32 val;
2598
34f80b04
EG
2599 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2600 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2601
34f80b04 2602 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2603
2604 val = REG_RD(bp, reg_offset);
2605 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2606 REG_WR(bp, reg_offset, val);
2607
2608 BNX2X_ERR("SPIO5 hw attention\n");
2609
35b19ba5
EG
2610 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2611 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2612 /* Fan failure attention */
2613
17de50b7 2614 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2615 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2616 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2617 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2618 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2619 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2620 /* mark the failure */
c18487ee 2621 bp->link_params.ext_phy_config &=
877e9aa4 2622 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2623 bp->link_params.ext_phy_config |=
877e9aa4
ET
2624 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2625 SHMEM_WR(bp,
2626 dev_info.port_hw_config[port].
2627 external_phy_config,
c18487ee 2628 bp->link_params.ext_phy_config);
877e9aa4
ET
2629 /* log the failure */
2630 printk(KERN_ERR PFX "Fan Failure on Network"
2631 " Controller %s has caused the driver to"
2632 " shutdown the card to prevent permanent"
2633 " damage. Please contact Dell Support for"
2634 " assistance\n", bp->dev->name);
2635 break;
2636
2637 default:
2638 break;
2639 }
2640 }
34f80b04 2641
589abe3a
EG
2642 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2643 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2644 bnx2x_acquire_phy_lock(bp);
2645 bnx2x_handle_module_detect_int(&bp->link_params);
2646 bnx2x_release_phy_lock(bp);
2647 }
2648
34f80b04
EG
2649 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2650
2651 val = REG_RD(bp, reg_offset);
2652 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2653 REG_WR(bp, reg_offset, val);
2654
2655 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2656 (attn & HW_INTERRUT_ASSERT_SET_0));
2657 bnx2x_panic();
2658 }
877e9aa4
ET
2659}
2660
2661static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2662{
2663 u32 val;
2664
0626b899 2665 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2666
2667 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2668 BNX2X_ERR("DB hw attention 0x%x\n", val);
2669 /* DORQ discard attention */
2670 if (val & 0x2)
2671 BNX2X_ERR("FATAL error from DORQ\n");
2672 }
34f80b04
EG
2673
2674 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2675
2676 int port = BP_PORT(bp);
2677 int reg_offset;
2678
2679 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2680 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2681
2682 val = REG_RD(bp, reg_offset);
2683 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2684 REG_WR(bp, reg_offset, val);
2685
2686 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2687 (attn & HW_INTERRUT_ASSERT_SET_1));
2688 bnx2x_panic();
2689 }
877e9aa4
ET
2690}
2691
2692static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2693{
2694 u32 val;
2695
2696 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2697
2698 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2699 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2700 /* CFC error attention */
2701 if (val & 0x2)
2702 BNX2X_ERR("FATAL error from CFC\n");
2703 }
2704
2705 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2706
2707 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2708 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2709 /* RQ_USDMDP_FIFO_OVERFLOW */
2710 if (val & 0x18000)
2711 BNX2X_ERR("FATAL error from PXP\n");
2712 }
34f80b04
EG
2713
2714 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2715
2716 int port = BP_PORT(bp);
2717 int reg_offset;
2718
2719 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2720 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2721
2722 val = REG_RD(bp, reg_offset);
2723 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2724 REG_WR(bp, reg_offset, val);
2725
2726 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2727 (attn & HW_INTERRUT_ASSERT_SET_2));
2728 bnx2x_panic();
2729 }
877e9aa4
ET
2730}
2731
2732static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2733{
34f80b04
EG
2734 u32 val;
2735
877e9aa4
ET
2736 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2737
34f80b04
EG
2738 if (attn & BNX2X_PMF_LINK_ASSERT) {
2739 int func = BP_FUNC(bp);
2740
2741 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2742 bnx2x__link_status_update(bp);
2743 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2744 DRV_STATUS_PMF)
2745 bnx2x_pmf_update(bp);
2746
2747 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2748
2749 BNX2X_ERR("MC assert!\n");
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2751 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2753 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2754 bnx2x_panic();
2755
2756 } else if (attn & BNX2X_MCP_ASSERT) {
2757
2758 BNX2X_ERR("MCP assert!\n");
2759 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2760 bnx2x_fw_dump(bp);
877e9aa4
ET
2761
2762 } else
2763 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2764 }
2765
2766 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2767 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2768 if (attn & BNX2X_GRC_TIMEOUT) {
2769 val = CHIP_IS_E1H(bp) ?
2770 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2771 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2772 }
2773 if (attn & BNX2X_GRC_RSV) {
2774 val = CHIP_IS_E1H(bp) ?
2775 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2776 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2777 }
877e9aa4 2778 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2779 }
2780}
2781
2782static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2783{
a2fbb9ea
ET
2784 struct attn_route attn;
2785 struct attn_route group_mask;
34f80b04 2786 int port = BP_PORT(bp);
877e9aa4 2787 int index;
a2fbb9ea
ET
2788 u32 reg_addr;
2789 u32 val;
3fcaf2e5 2790 u32 aeu_mask;
a2fbb9ea
ET
2791
2792 /* need to take HW lock because MCP or other port might also
2793 try to handle this event */
4a37fb66 2794 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2795
2796 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2797 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2798 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2799 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2800 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2801 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2802
2803 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2804 if (deasserted & (1 << index)) {
2805 group_mask = bp->attn_group[index];
2806
34f80b04
EG
2807 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2808 index, group_mask.sig[0], group_mask.sig[1],
2809 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2810
877e9aa4
ET
2811 bnx2x_attn_int_deasserted3(bp,
2812 attn.sig[3] & group_mask.sig[3]);
2813 bnx2x_attn_int_deasserted1(bp,
2814 attn.sig[1] & group_mask.sig[1]);
2815 bnx2x_attn_int_deasserted2(bp,
2816 attn.sig[2] & group_mask.sig[2]);
2817 bnx2x_attn_int_deasserted0(bp,
2818 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2819
a2fbb9ea
ET
2820 if ((attn.sig[0] & group_mask.sig[0] &
2821 HW_PRTY_ASSERT_SET_0) ||
2822 (attn.sig[1] & group_mask.sig[1] &
2823 HW_PRTY_ASSERT_SET_1) ||
2824 (attn.sig[2] & group_mask.sig[2] &
2825 HW_PRTY_ASSERT_SET_2))
6378c025 2826 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2827 }
2828 }
2829
4a37fb66 2830 bnx2x_release_alr(bp);
a2fbb9ea 2831
5c862848 2832 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2833
2834 val = ~deasserted;
3fcaf2e5
EG
2835 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2836 val, reg_addr);
5c862848 2837 REG_WR(bp, reg_addr, val);
a2fbb9ea 2838
a2fbb9ea 2839 if (~bp->attn_state & deasserted)
3fcaf2e5 2840 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2841
2842 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2843 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2844
3fcaf2e5
EG
2845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2846 aeu_mask = REG_RD(bp, reg_addr);
2847
2848 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2849 aeu_mask, deasserted);
2850 aeu_mask |= (deasserted & 0xff);
2851 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2852
3fcaf2e5
EG
2853 REG_WR(bp, reg_addr, aeu_mask);
2854 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2855
2856 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2857 bp->attn_state &= ~deasserted;
2858 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2859}
2860
2861static void bnx2x_attn_int(struct bnx2x *bp)
2862{
2863 /* read local copy of bits */
68d59484
EG
2864 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2865 attn_bits);
2866 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2867 attn_bits_ack);
a2fbb9ea
ET
2868 u32 attn_state = bp->attn_state;
2869
2870 /* look for changed bits */
2871 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2872 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2873
2874 DP(NETIF_MSG_HW,
2875 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2876 attn_bits, attn_ack, asserted, deasserted);
2877
2878 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2879 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2880
2881 /* handle bits that were raised */
2882 if (asserted)
2883 bnx2x_attn_int_asserted(bp, asserted);
2884
2885 if (deasserted)
2886 bnx2x_attn_int_deasserted(bp, deasserted);
2887}
2888
2889static void bnx2x_sp_task(struct work_struct *work)
2890{
1cf167f2 2891 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2892 u16 status;
2893
34f80b04 2894
a2fbb9ea
ET
2895 /* Return here if interrupt is disabled */
2896 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2897 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2898 return;
2899 }
2900
2901 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2902/* if (status == 0) */
2903/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2904
3196a88a 2905 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2906
877e9aa4
ET
2907 /* HW attentions */
2908 if (status & 0x1)
a2fbb9ea 2909 bnx2x_attn_int(bp);
a2fbb9ea 2910
68d59484 2911 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2912 IGU_INT_NOP, 1);
2913 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2914 IGU_INT_NOP, 1);
2915 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2916 IGU_INT_NOP, 1);
2917 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2918 IGU_INT_NOP, 1);
2919 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2920 IGU_INT_ENABLE, 1);
877e9aa4 2921
a2fbb9ea
ET
2922}
2923
2924static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2925{
2926 struct net_device *dev = dev_instance;
2927 struct bnx2x *bp = netdev_priv(dev);
2928
2929 /* Return here if interrupt is disabled */
2930 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2931 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2932 return IRQ_HANDLED;
2933 }
2934
8d9c5f34 2935 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2936
2937#ifdef BNX2X_STOP_ON_ERROR
2938 if (unlikely(bp->panic))
2939 return IRQ_HANDLED;
2940#endif
2941
1cf167f2 2942 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2943
2944 return IRQ_HANDLED;
2945}
2946
2947/* end of slow path */
2948
2949/* Statistics */
2950
2951/****************************************************************************
2952* Macros
2953****************************************************************************/
2954
a2fbb9ea
ET
2955/* sum[hi:lo] += add[hi:lo] */
2956#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2957 do { \
2958 s_lo += a_lo; \
f5ba6772 2959 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2960 } while (0)
2961
2962/* difference = minuend - subtrahend */
2963#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2964 do { \
bb2a0f7a
YG
2965 if (m_lo < s_lo) { \
2966 /* underflow */ \
a2fbb9ea 2967 d_hi = m_hi - s_hi; \
bb2a0f7a 2968 if (d_hi > 0) { \
6378c025 2969 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2970 d_hi--; \
2971 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2972 } else { \
6378c025 2973 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2974 d_hi = 0; \
2975 d_lo = 0; \
2976 } \
bb2a0f7a
YG
2977 } else { \
2978 /* m_lo >= s_lo */ \
a2fbb9ea 2979 if (m_hi < s_hi) { \
bb2a0f7a
YG
2980 d_hi = 0; \
2981 d_lo = 0; \
2982 } else { \
6378c025 2983 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2984 d_hi = m_hi - s_hi; \
2985 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2986 } \
2987 } \
2988 } while (0)
2989
bb2a0f7a 2990#define UPDATE_STAT64(s, t) \
a2fbb9ea 2991 do { \
bb2a0f7a
YG
2992 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2993 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2994 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2995 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2996 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2997 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2998 } while (0)
2999
bb2a0f7a 3000#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3001 do { \
bb2a0f7a
YG
3002 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3003 diff.lo, new->s##_lo, old->s##_lo); \
3004 ADD_64(estats->t##_hi, diff.hi, \
3005 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3006 } while (0)
3007
3008/* sum[hi:lo] += add */
3009#define ADD_EXTEND_64(s_hi, s_lo, a) \
3010 do { \
3011 s_lo += a; \
3012 s_hi += (s_lo < a) ? 1 : 0; \
3013 } while (0)
3014
bb2a0f7a 3015#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3016 do { \
bb2a0f7a
YG
3017 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3018 pstats->mac_stx[1].s##_lo, \
3019 new->s); \
a2fbb9ea
ET
3020 } while (0)
3021
bb2a0f7a 3022#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3023 do { \
4781bfad
EG
3024 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3025 old_tclient->s = tclient->s; \
de832a55
EG
3026 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3027 } while (0)
3028
3029#define UPDATE_EXTEND_USTAT(s, t) \
3030 do { \
3031 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3032 old_uclient->s = uclient->s; \
3033 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3034 } while (0)
3035
3036#define UPDATE_EXTEND_XSTAT(s, t) \
3037 do { \
4781bfad
EG
3038 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3039 old_xclient->s = xclient->s; \
de832a55
EG
3040 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3041 } while (0)
3042
3043/* minuend -= subtrahend */
3044#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3045 do { \
3046 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3047 } while (0)
3048
3049/* minuend[hi:lo] -= subtrahend */
3050#define SUB_EXTEND_64(m_hi, m_lo, s) \
3051 do { \
3052 SUB_64(m_hi, 0, m_lo, s); \
3053 } while (0)
3054
3055#define SUB_EXTEND_USTAT(s, t) \
3056 do { \
3057 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3058 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3059 } while (0)
3060
3061/*
3062 * General service functions
3063 */
3064
3065static inline long bnx2x_hilo(u32 *hiref)
3066{
3067 u32 lo = *(hiref + 1);
3068#if (BITS_PER_LONG == 64)
3069 u32 hi = *hiref;
3070
3071 return HILO_U64(hi, lo);
3072#else
3073 return lo;
3074#endif
3075}
3076
3077/*
3078 * Init service functions
3079 */
3080
bb2a0f7a
YG
3081static void bnx2x_storm_stats_post(struct bnx2x *bp)
3082{
3083 if (!bp->stats_pending) {
3084 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3085 int i, rc;
bb2a0f7a
YG
3086
3087 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3088 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3089 for_each_queue(bp, i)
3090 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3091
3092 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3093 ((u32 *)&ramrod_data)[1],
3094 ((u32 *)&ramrod_data)[0], 0);
3095 if (rc == 0) {
3096 /* stats ramrod has it's own slot on the spq */
3097 bp->spq_left++;
3098 bp->stats_pending = 1;
3099 }
3100 }
3101}
3102
3103static void bnx2x_stats_init(struct bnx2x *bp)
3104{
3105 int port = BP_PORT(bp);
de832a55 3106 int i;
bb2a0f7a 3107
de832a55 3108 bp->stats_pending = 0;
bb2a0f7a
YG
3109 bp->executer_idx = 0;
3110 bp->stats_counter = 0;
3111
3112 /* port stats */
3113 if (!BP_NOMCP(bp))
3114 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3115 else
3116 bp->port.port_stx = 0;
3117 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3118
3119 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3120 bp->port.old_nig_stats.brb_discard =
3121 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3122 bp->port.old_nig_stats.brb_truncate =
3123 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3124 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3125 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3126 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3127 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3128
3129 /* function stats */
de832a55
EG
3130 for_each_queue(bp, i) {
3131 struct bnx2x_fastpath *fp = &bp->fp[i];
3132
3133 memset(&fp->old_tclient, 0,
3134 sizeof(struct tstorm_per_client_stats));
3135 memset(&fp->old_uclient, 0,
3136 sizeof(struct ustorm_per_client_stats));
3137 memset(&fp->old_xclient, 0,
3138 sizeof(struct xstorm_per_client_stats));
3139 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3140 }
3141
bb2a0f7a 3142 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3143 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3144
3145 bp->stats_state = STATS_STATE_DISABLED;
3146 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3147 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3148}
3149
3150static void bnx2x_hw_stats_post(struct bnx2x *bp)
3151{
3152 struct dmae_command *dmae = &bp->stats_dmae;
3153 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3154
3155 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3156 if (CHIP_REV_IS_SLOW(bp))
3157 return;
bb2a0f7a
YG
3158
3159 /* loader */
3160 if (bp->executer_idx) {
3161 int loader_idx = PMF_DMAE_C(bp);
3162
3163 memset(dmae, 0, sizeof(struct dmae_command));
3164
3165 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3166 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3167 DMAE_CMD_DST_RESET |
3168#ifdef __BIG_ENDIAN
3169 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3170#else
3171 DMAE_CMD_ENDIANITY_DW_SWAP |
3172#endif
3173 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3174 DMAE_CMD_PORT_0) |
3175 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3176 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3177 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3178 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3179 sizeof(struct dmae_command) *
3180 (loader_idx + 1)) >> 2;
3181 dmae->dst_addr_hi = 0;
3182 dmae->len = sizeof(struct dmae_command) >> 2;
3183 if (CHIP_IS_E1(bp))
3184 dmae->len--;
3185 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3186 dmae->comp_addr_hi = 0;
3187 dmae->comp_val = 1;
3188
3189 *stats_comp = 0;
3190 bnx2x_post_dmae(bp, dmae, loader_idx);
3191
3192 } else if (bp->func_stx) {
3193 *stats_comp = 0;
3194 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3195 }
3196}
3197
3198static int bnx2x_stats_comp(struct bnx2x *bp)
3199{
3200 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3201 int cnt = 10;
3202
3203 might_sleep();
3204 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3205 if (!cnt) {
3206 BNX2X_ERR("timeout waiting for stats finished\n");
3207 break;
3208 }
3209 cnt--;
12469401 3210 msleep(1);
bb2a0f7a
YG
3211 }
3212 return 1;
3213}
3214
3215/*
3216 * Statistics service functions
3217 */
3218
3219static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3220{
3221 struct dmae_command *dmae;
3222 u32 opcode;
3223 int loader_idx = PMF_DMAE_C(bp);
3224 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3225
3226 /* sanity */
3227 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3228 BNX2X_ERR("BUG!\n");
3229 return;
3230 }
3231
3232 bp->executer_idx = 0;
3233
3234 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3235 DMAE_CMD_C_ENABLE |
3236 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3237#ifdef __BIG_ENDIAN
3238 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3239#else
3240 DMAE_CMD_ENDIANITY_DW_SWAP |
3241#endif
3242 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3243 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3244
3245 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3246 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3247 dmae->src_addr_lo = bp->port.port_stx >> 2;
3248 dmae->src_addr_hi = 0;
3249 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3250 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3251 dmae->len = DMAE_LEN32_RD_MAX;
3252 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3253 dmae->comp_addr_hi = 0;
3254 dmae->comp_val = 1;
3255
3256 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3257 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3258 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3259 dmae->src_addr_hi = 0;
7a9b2557
VZ
3260 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3261 DMAE_LEN32_RD_MAX * 4);
3262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3263 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3264 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3265 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3266 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3267 dmae->comp_val = DMAE_COMP_VAL;
3268
3269 *stats_comp = 0;
3270 bnx2x_hw_stats_post(bp);
3271 bnx2x_stats_comp(bp);
3272}
3273
3274static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3275{
3276 struct dmae_command *dmae;
34f80b04 3277 int port = BP_PORT(bp);
bb2a0f7a 3278 int vn = BP_E1HVN(bp);
a2fbb9ea 3279 u32 opcode;
bb2a0f7a 3280 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3281 u32 mac_addr;
bb2a0f7a
YG
3282 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3283
3284 /* sanity */
3285 if (!bp->link_vars.link_up || !bp->port.pmf) {
3286 BNX2X_ERR("BUG!\n");
3287 return;
3288 }
a2fbb9ea
ET
3289
3290 bp->executer_idx = 0;
bb2a0f7a
YG
3291
3292 /* MCP */
3293 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3294 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3296#ifdef __BIG_ENDIAN
bb2a0f7a 3297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3298#else
bb2a0f7a 3299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3300#endif
bb2a0f7a
YG
3301 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3302 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3303
bb2a0f7a 3304 if (bp->port.port_stx) {
a2fbb9ea
ET
3305
3306 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3307 dmae->opcode = opcode;
bb2a0f7a
YG
3308 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3309 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3310 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3311 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3312 dmae->len = sizeof(struct host_port_stats) >> 2;
3313 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3314 dmae->comp_addr_hi = 0;
3315 dmae->comp_val = 1;
a2fbb9ea
ET
3316 }
3317
bb2a0f7a
YG
3318 if (bp->func_stx) {
3319
3320 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321 dmae->opcode = opcode;
3322 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3323 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3324 dmae->dst_addr_lo = bp->func_stx >> 2;
3325 dmae->dst_addr_hi = 0;
3326 dmae->len = sizeof(struct host_func_stats) >> 2;
3327 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3328 dmae->comp_addr_hi = 0;
3329 dmae->comp_val = 1;
a2fbb9ea
ET
3330 }
3331
bb2a0f7a 3332 /* MAC */
a2fbb9ea
ET
3333 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3334 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3335 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3336#ifdef __BIG_ENDIAN
3337 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3338#else
3339 DMAE_CMD_ENDIANITY_DW_SWAP |
3340#endif
bb2a0f7a
YG
3341 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3342 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3343
c18487ee 3344 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3345
3346 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3347 NIG_REG_INGRESS_BMAC0_MEM);
3348
3349 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3350 BIGMAC_REGISTER_TX_STAT_GTBYT */
3351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3352 dmae->opcode = opcode;
3353 dmae->src_addr_lo = (mac_addr +
3354 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3355 dmae->src_addr_hi = 0;
3356 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3357 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3358 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3359 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3361 dmae->comp_addr_hi = 0;
3362 dmae->comp_val = 1;
3363
3364 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3365 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367 dmae->opcode = opcode;
3368 dmae->src_addr_lo = (mac_addr +
3369 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3370 dmae->src_addr_hi = 0;
3371 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3372 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3373 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3374 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3375 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3376 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378 dmae->comp_addr_hi = 0;
3379 dmae->comp_val = 1;
3380
c18487ee 3381 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3382
3383 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3384
3385 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3386 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3387 dmae->opcode = opcode;
3388 dmae->src_addr_lo = (mac_addr +
3389 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3390 dmae->src_addr_hi = 0;
3391 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3392 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3393 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3394 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395 dmae->comp_addr_hi = 0;
3396 dmae->comp_val = 1;
3397
3398 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3399 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3400 dmae->opcode = opcode;
3401 dmae->src_addr_lo = (mac_addr +
3402 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3403 dmae->src_addr_hi = 0;
3404 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3405 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3406 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3407 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3408 dmae->len = 1;
3409 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3410 dmae->comp_addr_hi = 0;
3411 dmae->comp_val = 1;
3412
3413 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3414 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3415 dmae->opcode = opcode;
3416 dmae->src_addr_lo = (mac_addr +
3417 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3418 dmae->src_addr_hi = 0;
3419 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3420 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3421 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3422 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3423 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3424 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3425 dmae->comp_addr_hi = 0;
3426 dmae->comp_val = 1;
3427 }
3428
3429 /* NIG */
bb2a0f7a
YG
3430 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3431 dmae->opcode = opcode;
3432 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3433 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3434 dmae->src_addr_hi = 0;
3435 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3436 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3437 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3438 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3439 dmae->comp_addr_hi = 0;
3440 dmae->comp_val = 1;
3441
3442 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3443 dmae->opcode = opcode;
3444 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3445 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3446 dmae->src_addr_hi = 0;
3447 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3448 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3449 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3450 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3451 dmae->len = (2*sizeof(u32)) >> 2;
3452 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3453 dmae->comp_addr_hi = 0;
3454 dmae->comp_val = 1;
3455
a2fbb9ea
ET
3456 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3457 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3458 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3459 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3460#ifdef __BIG_ENDIAN
3461 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3462#else
3463 DMAE_CMD_ENDIANITY_DW_SWAP |
3464#endif
bb2a0f7a
YG
3465 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3466 (vn << DMAE_CMD_E1HVN_SHIFT));
3467 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3468 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3469 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3470 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3471 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3472 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3473 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3474 dmae->len = (2*sizeof(u32)) >> 2;
3475 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3476 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3477 dmae->comp_val = DMAE_COMP_VAL;
3478
3479 *stats_comp = 0;
a2fbb9ea
ET
3480}
3481
bb2a0f7a 3482static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3483{
bb2a0f7a
YG
3484 struct dmae_command *dmae = &bp->stats_dmae;
3485 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3486
bb2a0f7a
YG
3487 /* sanity */
3488 if (!bp->func_stx) {
3489 BNX2X_ERR("BUG!\n");
3490 return;
3491 }
a2fbb9ea 3492
bb2a0f7a
YG
3493 bp->executer_idx = 0;
3494 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3495
bb2a0f7a
YG
3496 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3497 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3499#ifdef __BIG_ENDIAN
3500 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3501#else
3502 DMAE_CMD_ENDIANITY_DW_SWAP |
3503#endif
3504 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3506 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3507 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3508 dmae->dst_addr_lo = bp->func_stx >> 2;
3509 dmae->dst_addr_hi = 0;
3510 dmae->len = sizeof(struct host_func_stats) >> 2;
3511 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3512 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3513 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3514
bb2a0f7a
YG
3515 *stats_comp = 0;
3516}
a2fbb9ea 3517
bb2a0f7a
YG
3518static void bnx2x_stats_start(struct bnx2x *bp)
3519{
3520 if (bp->port.pmf)
3521 bnx2x_port_stats_init(bp);
3522
3523 else if (bp->func_stx)
3524 bnx2x_func_stats_init(bp);
3525
3526 bnx2x_hw_stats_post(bp);
3527 bnx2x_storm_stats_post(bp);
3528}
3529
3530static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3531{
3532 bnx2x_stats_comp(bp);
3533 bnx2x_stats_pmf_update(bp);
3534 bnx2x_stats_start(bp);
3535}
3536
3537static void bnx2x_stats_restart(struct bnx2x *bp)
3538{
3539 bnx2x_stats_comp(bp);
3540 bnx2x_stats_start(bp);
3541}
3542
3543static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3544{
3545 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3546 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3547 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3548 struct {
3549 u32 lo;
3550 u32 hi;
3551 } diff;
bb2a0f7a
YG
3552
3553 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3554 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3555 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3556 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3557 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3558 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3559 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3560 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3561 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3562 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3563 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3564 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3565 UPDATE_STAT64(tx_stat_gt127,
3566 tx_stat_etherstatspkts65octetsto127octets);
3567 UPDATE_STAT64(tx_stat_gt255,
3568 tx_stat_etherstatspkts128octetsto255octets);
3569 UPDATE_STAT64(tx_stat_gt511,
3570 tx_stat_etherstatspkts256octetsto511octets);
3571 UPDATE_STAT64(tx_stat_gt1023,
3572 tx_stat_etherstatspkts512octetsto1023octets);
3573 UPDATE_STAT64(tx_stat_gt1518,
3574 tx_stat_etherstatspkts1024octetsto1522octets);
3575 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3576 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3577 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3578 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3579 UPDATE_STAT64(tx_stat_gterr,
3580 tx_stat_dot3statsinternalmactransmiterrors);
3581 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3582
3583 estats->pause_frames_received_hi =
3584 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3585 estats->pause_frames_received_lo =
3586 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3587
3588 estats->pause_frames_sent_hi =
3589 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3590 estats->pause_frames_sent_lo =
3591 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3592}
3593
3594static void bnx2x_emac_stats_update(struct bnx2x *bp)
3595{
3596 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3597 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3598 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3599
3600 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3601 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3602 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3603 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3605 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3606 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3607 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3608 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3609 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3610 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3611 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3612 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3613 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3614 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3615 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3616 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3617 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3618 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3622 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3623 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3629 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3630 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3631
3632 estats->pause_frames_received_hi =
3633 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3634 estats->pause_frames_received_lo =
3635 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3636 ADD_64(estats->pause_frames_received_hi,
3637 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3638 estats->pause_frames_received_lo,
3639 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3640
3641 estats->pause_frames_sent_hi =
3642 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3643 estats->pause_frames_sent_lo =
3644 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3645 ADD_64(estats->pause_frames_sent_hi,
3646 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3647 estats->pause_frames_sent_lo,
3648 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3649}
3650
3651static int bnx2x_hw_stats_update(struct bnx2x *bp)
3652{
3653 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3654 struct nig_stats *old = &(bp->port.old_nig_stats);
3655 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3656 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3657 struct {
3658 u32 lo;
3659 u32 hi;
3660 } diff;
de832a55 3661 u32 nig_timer_max;
bb2a0f7a
YG
3662
3663 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3664 bnx2x_bmac_stats_update(bp);
3665
3666 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3667 bnx2x_emac_stats_update(bp);
3668
3669 else { /* unreached */
c3eefaf6 3670 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3671 return -1;
3672 }
a2fbb9ea 3673
bb2a0f7a
YG
3674 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3675 new->brb_discard - old->brb_discard);
66e855f3
YG
3676 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3677 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3678
bb2a0f7a
YG
3679 UPDATE_STAT64_NIG(egress_mac_pkt0,
3680 etherstatspkts1024octetsto1522octets);
3681 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3682
bb2a0f7a 3683 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3684
bb2a0f7a
YG
3685 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3686 sizeof(struct mac_stx));
3687 estats->brb_drop_hi = pstats->brb_drop_hi;
3688 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3689
bb2a0f7a 3690 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3691
de832a55
EG
3692 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3693 if (nig_timer_max != estats->nig_timer_max) {
3694 estats->nig_timer_max = nig_timer_max;
3695 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3696 }
3697
bb2a0f7a 3698 return 0;
a2fbb9ea
ET
3699}
3700
bb2a0f7a 3701static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3702{
3703 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3704 struct tstorm_per_port_stats *tport =
de832a55 3705 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3706 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3707 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3708 int i;
3709
3710 memset(&(fstats->total_bytes_received_hi), 0,
3711 sizeof(struct host_func_stats) - 2*sizeof(u32));
3712 estats->error_bytes_received_hi = 0;
3713 estats->error_bytes_received_lo = 0;
3714 estats->etherstatsoverrsizepkts_hi = 0;
3715 estats->etherstatsoverrsizepkts_lo = 0;
3716 estats->no_buff_discard_hi = 0;
3717 estats->no_buff_discard_lo = 0;
a2fbb9ea 3718
de832a55
EG
3719 for_each_queue(bp, i) {
3720 struct bnx2x_fastpath *fp = &bp->fp[i];
3721 int cl_id = fp->cl_id;
3722 struct tstorm_per_client_stats *tclient =
3723 &stats->tstorm_common.client_statistics[cl_id];
3724 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3725 struct ustorm_per_client_stats *uclient =
3726 &stats->ustorm_common.client_statistics[cl_id];
3727 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3728 struct xstorm_per_client_stats *xclient =
3729 &stats->xstorm_common.client_statistics[cl_id];
3730 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3731 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3732 u32 diff;
3733
3734 /* are storm stats valid? */
3735 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3736 bp->stats_counter) {
de832a55
EG
3737 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3738 " xstorm counter (%d) != stats_counter (%d)\n",
3739 i, xclient->stats_counter, bp->stats_counter);
3740 return -1;
3741 }
3742 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3743 bp->stats_counter) {
de832a55
EG
3744 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3745 " tstorm counter (%d) != stats_counter (%d)\n",
3746 i, tclient->stats_counter, bp->stats_counter);
3747 return -2;
3748 }
3749 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3750 bp->stats_counter) {
3751 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3752 " ustorm counter (%d) != stats_counter (%d)\n",
3753 i, uclient->stats_counter, bp->stats_counter);
3754 return -4;
3755 }
a2fbb9ea 3756
de832a55
EG
3757 qstats->total_bytes_received_hi =
3758 qstats->valid_bytes_received_hi =
a2fbb9ea 3759 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3760 qstats->total_bytes_received_lo =
3761 qstats->valid_bytes_received_lo =
a2fbb9ea 3762 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3763
de832a55 3764 qstats->error_bytes_received_hi =
bb2a0f7a 3765 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3766 qstats->error_bytes_received_lo =
bb2a0f7a 3767 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3768
de832a55
EG
3769 ADD_64(qstats->total_bytes_received_hi,
3770 qstats->error_bytes_received_hi,
3771 qstats->total_bytes_received_lo,
3772 qstats->error_bytes_received_lo);
3773
3774 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3775 total_unicast_packets_received);
3776 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3777 total_multicast_packets_received);
3778 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3779 total_broadcast_packets_received);
3780 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3781 etherstatsoverrsizepkts);
3782 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3783
3784 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3785 total_unicast_packets_received);
3786 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3787 total_multicast_packets_received);
3788 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3789 total_broadcast_packets_received);
3790 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3791 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3792 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3793
3794 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3795 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3796 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3797 le32_to_cpu(xclient->total_sent_bytes.lo);
3798
de832a55
EG
3799 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3800 total_unicast_packets_transmitted);
3801 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3802 total_multicast_packets_transmitted);
3803 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3804 total_broadcast_packets_transmitted);
3805
3806 old_tclient->checksum_discard = tclient->checksum_discard;
3807 old_tclient->ttl0_discard = tclient->ttl0_discard;
3808
3809 ADD_64(fstats->total_bytes_received_hi,
3810 qstats->total_bytes_received_hi,
3811 fstats->total_bytes_received_lo,
3812 qstats->total_bytes_received_lo);
3813 ADD_64(fstats->total_bytes_transmitted_hi,
3814 qstats->total_bytes_transmitted_hi,
3815 fstats->total_bytes_transmitted_lo,
3816 qstats->total_bytes_transmitted_lo);
3817 ADD_64(fstats->total_unicast_packets_received_hi,
3818 qstats->total_unicast_packets_received_hi,
3819 fstats->total_unicast_packets_received_lo,
3820 qstats->total_unicast_packets_received_lo);
3821 ADD_64(fstats->total_multicast_packets_received_hi,
3822 qstats->total_multicast_packets_received_hi,
3823 fstats->total_multicast_packets_received_lo,
3824 qstats->total_multicast_packets_received_lo);
3825 ADD_64(fstats->total_broadcast_packets_received_hi,
3826 qstats->total_broadcast_packets_received_hi,
3827 fstats->total_broadcast_packets_received_lo,
3828 qstats->total_broadcast_packets_received_lo);
3829 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3830 qstats->total_unicast_packets_transmitted_hi,
3831 fstats->total_unicast_packets_transmitted_lo,
3832 qstats->total_unicast_packets_transmitted_lo);
3833 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3834 qstats->total_multicast_packets_transmitted_hi,
3835 fstats->total_multicast_packets_transmitted_lo,
3836 qstats->total_multicast_packets_transmitted_lo);
3837 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3838 qstats->total_broadcast_packets_transmitted_hi,
3839 fstats->total_broadcast_packets_transmitted_lo,
3840 qstats->total_broadcast_packets_transmitted_lo);
3841 ADD_64(fstats->valid_bytes_received_hi,
3842 qstats->valid_bytes_received_hi,
3843 fstats->valid_bytes_received_lo,
3844 qstats->valid_bytes_received_lo);
3845
3846 ADD_64(estats->error_bytes_received_hi,
3847 qstats->error_bytes_received_hi,
3848 estats->error_bytes_received_lo,
3849 qstats->error_bytes_received_lo);
3850 ADD_64(estats->etherstatsoverrsizepkts_hi,
3851 qstats->etherstatsoverrsizepkts_hi,
3852 estats->etherstatsoverrsizepkts_lo,
3853 qstats->etherstatsoverrsizepkts_lo);
3854 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3855 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3856 }
3857
3858 ADD_64(fstats->total_bytes_received_hi,
3859 estats->rx_stat_ifhcinbadoctets_hi,
3860 fstats->total_bytes_received_lo,
3861 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3862
3863 memcpy(estats, &(fstats->total_bytes_received_hi),
3864 sizeof(struct host_func_stats) - 2*sizeof(u32));
3865
de832a55
EG
3866 ADD_64(estats->etherstatsoverrsizepkts_hi,
3867 estats->rx_stat_dot3statsframestoolong_hi,
3868 estats->etherstatsoverrsizepkts_lo,
3869 estats->rx_stat_dot3statsframestoolong_lo);
3870 ADD_64(estats->error_bytes_received_hi,
3871 estats->rx_stat_ifhcinbadoctets_hi,
3872 estats->error_bytes_received_lo,
3873 estats->rx_stat_ifhcinbadoctets_lo);
3874
3875 if (bp->port.pmf) {
3876 estats->mac_filter_discard =
3877 le32_to_cpu(tport->mac_filter_discard);
3878 estats->xxoverflow_discard =
3879 le32_to_cpu(tport->xxoverflow_discard);
3880 estats->brb_truncate_discard =
bb2a0f7a 3881 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3882 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3883 }
bb2a0f7a
YG
3884
3885 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3886
de832a55
EG
3887 bp->stats_pending = 0;
3888
a2fbb9ea
ET
3889 return 0;
3890}
3891
bb2a0f7a 3892static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3893{
bb2a0f7a 3894 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3895 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3896 int i;
a2fbb9ea
ET
3897
3898 nstats->rx_packets =
3899 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3900 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3901 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3902
3903 nstats->tx_packets =
3904 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3905 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3906 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3907
de832a55 3908 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3909
0e39e645 3910 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3911
de832a55
EG
3912 nstats->rx_dropped = estats->mac_discard;
3913 for_each_queue(bp, i)
3914 nstats->rx_dropped +=
3915 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3916
a2fbb9ea
ET
3917 nstats->tx_dropped = 0;
3918
3919 nstats->multicast =
de832a55 3920 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3921
bb2a0f7a 3922 nstats->collisions =
de832a55 3923 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3924
3925 nstats->rx_length_errors =
de832a55
EG
3926 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3927 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3928 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3929 bnx2x_hilo(&estats->brb_truncate_hi);
3930 nstats->rx_crc_errors =
3931 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3932 nstats->rx_frame_errors =
3933 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3934 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3935 nstats->rx_missed_errors = estats->xxoverflow_discard;
3936
3937 nstats->rx_errors = nstats->rx_length_errors +
3938 nstats->rx_over_errors +
3939 nstats->rx_crc_errors +
3940 nstats->rx_frame_errors +
0e39e645
ET
3941 nstats->rx_fifo_errors +
3942 nstats->rx_missed_errors;
a2fbb9ea 3943
bb2a0f7a 3944 nstats->tx_aborted_errors =
de832a55
EG
3945 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3946 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3947 nstats->tx_carrier_errors =
3948 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3949 nstats->tx_fifo_errors = 0;
3950 nstats->tx_heartbeat_errors = 0;
3951 nstats->tx_window_errors = 0;
3952
3953 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3954 nstats->tx_carrier_errors +
3955 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3956}
3957
3958static void bnx2x_drv_stats_update(struct bnx2x *bp)
3959{
3960 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3961 int i;
3962
3963 estats->driver_xoff = 0;
3964 estats->rx_err_discard_pkt = 0;
3965 estats->rx_skb_alloc_failed = 0;
3966 estats->hw_csum_err = 0;
3967 for_each_queue(bp, i) {
3968 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3969
3970 estats->driver_xoff += qstats->driver_xoff;
3971 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3972 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3973 estats->hw_csum_err += qstats->hw_csum_err;
3974 }
a2fbb9ea
ET
3975}
3976
bb2a0f7a 3977static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3978{
bb2a0f7a 3979 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3980
bb2a0f7a
YG
3981 if (*stats_comp != DMAE_COMP_VAL)
3982 return;
3983
3984 if (bp->port.pmf)
de832a55 3985 bnx2x_hw_stats_update(bp);
a2fbb9ea 3986
de832a55
EG
3987 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3988 BNX2X_ERR("storm stats were not updated for 3 times\n");
3989 bnx2x_panic();
3990 return;
a2fbb9ea
ET
3991 }
3992
de832a55
EG
3993 bnx2x_net_stats_update(bp);
3994 bnx2x_drv_stats_update(bp);
3995
a2fbb9ea 3996 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3997 struct tstorm_per_client_stats *old_tclient =
3998 &bp->fp->old_tclient;
3999 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4000 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4001 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4002 int i;
a2fbb9ea
ET
4003
4004 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4005 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4006 " tx pkt (%lx)\n",
4007 bnx2x_tx_avail(bp->fp),
7a9b2557 4008 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4009 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4010 " rx pkt (%lx)\n",
7a9b2557
VZ
4011 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4012 bp->fp->rx_comp_cons),
4013 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4014 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4015 "brb truncate %u\n",
4016 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4017 qstats->driver_xoff,
4018 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4019 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4020 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4021 "mac_discard %u mac_filter_discard %u "
4022 "xxovrflow_discard %u brb_truncate_discard %u "
4023 "ttl0_discard %u\n",
4781bfad 4024 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4025 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4026 bnx2x_hilo(&qstats->no_buff_discard_hi),
4027 estats->mac_discard, estats->mac_filter_discard,
4028 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4029 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4030
4031 for_each_queue(bp, i) {
4032 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4033 bnx2x_fp(bp, i, tx_pkt),
4034 bnx2x_fp(bp, i, rx_pkt),
4035 bnx2x_fp(bp, i, rx_calls));
4036 }
4037 }
4038
bb2a0f7a
YG
4039 bnx2x_hw_stats_post(bp);
4040 bnx2x_storm_stats_post(bp);
4041}
a2fbb9ea 4042
bb2a0f7a
YG
4043static void bnx2x_port_stats_stop(struct bnx2x *bp)
4044{
4045 struct dmae_command *dmae;
4046 u32 opcode;
4047 int loader_idx = PMF_DMAE_C(bp);
4048 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4049
bb2a0f7a 4050 bp->executer_idx = 0;
a2fbb9ea 4051
bb2a0f7a
YG
4052 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4053 DMAE_CMD_C_ENABLE |
4054 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4055#ifdef __BIG_ENDIAN
bb2a0f7a 4056 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4057#else
bb2a0f7a 4058 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4059#endif
bb2a0f7a
YG
4060 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4061 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4062
4063 if (bp->port.port_stx) {
4064
4065 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066 if (bp->func_stx)
4067 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4068 else
4069 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4070 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4071 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4072 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4073 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4074 dmae->len = sizeof(struct host_port_stats) >> 2;
4075 if (bp->func_stx) {
4076 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4077 dmae->comp_addr_hi = 0;
4078 dmae->comp_val = 1;
4079 } else {
4080 dmae->comp_addr_lo =
4081 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4082 dmae->comp_addr_hi =
4083 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4084 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4085
bb2a0f7a
YG
4086 *stats_comp = 0;
4087 }
a2fbb9ea
ET
4088 }
4089
bb2a0f7a
YG
4090 if (bp->func_stx) {
4091
4092 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4093 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4094 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4095 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4096 dmae->dst_addr_lo = bp->func_stx >> 2;
4097 dmae->dst_addr_hi = 0;
4098 dmae->len = sizeof(struct host_func_stats) >> 2;
4099 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4100 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4101 dmae->comp_val = DMAE_COMP_VAL;
4102
4103 *stats_comp = 0;
a2fbb9ea 4104 }
bb2a0f7a
YG
4105}
4106
4107static void bnx2x_stats_stop(struct bnx2x *bp)
4108{
4109 int update = 0;
4110
4111 bnx2x_stats_comp(bp);
4112
4113 if (bp->port.pmf)
4114 update = (bnx2x_hw_stats_update(bp) == 0);
4115
4116 update |= (bnx2x_storm_stats_update(bp) == 0);
4117
4118 if (update) {
4119 bnx2x_net_stats_update(bp);
a2fbb9ea 4120
bb2a0f7a
YG
4121 if (bp->port.pmf)
4122 bnx2x_port_stats_stop(bp);
4123
4124 bnx2x_hw_stats_post(bp);
4125 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4126 }
4127}
4128
bb2a0f7a
YG
4129static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4130{
4131}
4132
4133static const struct {
4134 void (*action)(struct bnx2x *bp);
4135 enum bnx2x_stats_state next_state;
4136} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4137/* state event */
4138{
4139/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4140/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4141/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4142/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4143},
4144{
4145/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4146/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4147/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4148/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4149}
4150};
4151
4152static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4153{
4154 enum bnx2x_stats_state state = bp->stats_state;
4155
4156 bnx2x_stats_stm[state][event].action(bp);
4157 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4158
4159 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4160 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4161 state, event, bp->stats_state);
4162}
4163
a2fbb9ea
ET
4164static void bnx2x_timer(unsigned long data)
4165{
4166 struct bnx2x *bp = (struct bnx2x *) data;
4167
4168 if (!netif_running(bp->dev))
4169 return;
4170
4171 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4172 goto timer_restart;
a2fbb9ea
ET
4173
4174 if (poll) {
4175 struct bnx2x_fastpath *fp = &bp->fp[0];
4176 int rc;
4177
7961f791 4178 bnx2x_tx_int(fp);
a2fbb9ea
ET
4179 rc = bnx2x_rx_int(fp, 1000);
4180 }
4181
34f80b04
EG
4182 if (!BP_NOMCP(bp)) {
4183 int func = BP_FUNC(bp);
a2fbb9ea
ET
4184 u32 drv_pulse;
4185 u32 mcp_pulse;
4186
4187 ++bp->fw_drv_pulse_wr_seq;
4188 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4189 /* TBD - add SYSTEM_TIME */
4190 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4191 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4192
34f80b04 4193 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4194 MCP_PULSE_SEQ_MASK);
4195 /* The delta between driver pulse and mcp response
4196 * should be 1 (before mcp response) or 0 (after mcp response)
4197 */
4198 if ((drv_pulse != mcp_pulse) &&
4199 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4200 /* someone lost a heartbeat... */
4201 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4202 drv_pulse, mcp_pulse);
4203 }
4204 }
4205
bb2a0f7a
YG
4206 if ((bp->state == BNX2X_STATE_OPEN) ||
4207 (bp->state == BNX2X_STATE_DISABLED))
4208 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4209
f1410647 4210timer_restart:
a2fbb9ea
ET
4211 mod_timer(&bp->timer, jiffies + bp->current_interval);
4212}
4213
4214/* end of Statistics */
4215
4216/* nic init */
4217
4218/*
4219 * nic init service functions
4220 */
4221
34f80b04 4222static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4223{
34f80b04
EG
4224 int port = BP_PORT(bp);
4225
490c3c9b 4226 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04 4227 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4228 sizeof(struct ustorm_status_block)/4);
490c3c9b 4229 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04 4230 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4231 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4232}
4233
5c862848
EG
4234static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4235 dma_addr_t mapping, int sb_id)
34f80b04
EG
4236{
4237 int port = BP_PORT(bp);
bb2a0f7a 4238 int func = BP_FUNC(bp);
a2fbb9ea 4239 int index;
34f80b04 4240 u64 section;
a2fbb9ea
ET
4241
4242 /* USTORM */
4243 section = ((u64)mapping) + offsetof(struct host_status_block,
4244 u_status_block);
34f80b04 4245 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4246
4247 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4248 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4249 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4250 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4251 U64_HI(section));
bb2a0f7a
YG
4252 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4253 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4254
4255 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4256 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4257 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4258
4259 /* CSTORM */
4260 section = ((u64)mapping) + offsetof(struct host_status_block,
4261 c_status_block);
34f80b04 4262 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4263
4264 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4265 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4266 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4267 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4268 U64_HI(section));
7a9b2557
VZ
4269 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4270 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4271
4272 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4273 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4274 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4275
4276 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4277}
4278
4279static void bnx2x_zero_def_sb(struct bnx2x *bp)
4280{
4281 int func = BP_FUNC(bp);
a2fbb9ea 4282
490c3c9b
EG
4283 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4284 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4285 sizeof(struct tstorm_def_status_block)/4);
4286 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04
EG
4287 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4288 sizeof(struct ustorm_def_status_block)/4);
490c3c9b 4289 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04
EG
4290 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4291 sizeof(struct cstorm_def_status_block)/4);
490c3c9b 4292 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
34f80b04
EG
4293 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4294 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4295}
4296
4297static void bnx2x_init_def_sb(struct bnx2x *bp,
4298 struct host_def_status_block *def_sb,
34f80b04 4299 dma_addr_t mapping, int sb_id)
a2fbb9ea 4300{
34f80b04
EG
4301 int port = BP_PORT(bp);
4302 int func = BP_FUNC(bp);
a2fbb9ea
ET
4303 int index, val, reg_offset;
4304 u64 section;
4305
4306 /* ATTN */
4307 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4308 atten_status_block);
34f80b04 4309 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4310
49d66772
ET
4311 bp->attn_state = 0;
4312
a2fbb9ea
ET
4313 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4314 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4315
34f80b04 4316 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4317 bp->attn_group[index].sig[0] = REG_RD(bp,
4318 reg_offset + 0x10*index);
4319 bp->attn_group[index].sig[1] = REG_RD(bp,
4320 reg_offset + 0x4 + 0x10*index);
4321 bp->attn_group[index].sig[2] = REG_RD(bp,
4322 reg_offset + 0x8 + 0x10*index);
4323 bp->attn_group[index].sig[3] = REG_RD(bp,
4324 reg_offset + 0xc + 0x10*index);
4325 }
4326
a2fbb9ea
ET
4327 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4328 HC_REG_ATTN_MSG0_ADDR_L);
4329
4330 REG_WR(bp, reg_offset, U64_LO(section));
4331 REG_WR(bp, reg_offset + 4, U64_HI(section));
4332
4333 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4334
4335 val = REG_RD(bp, reg_offset);
34f80b04 4336 val |= sb_id;
a2fbb9ea
ET
4337 REG_WR(bp, reg_offset, val);
4338
4339 /* USTORM */
4340 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4341 u_def_status_block);
34f80b04 4342 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4343
4344 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4345 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4346 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4347 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4348 U64_HI(section));
5c862848 4349 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4350 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4351
4352 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4353 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4354 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4355
4356 /* CSTORM */
4357 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4358 c_def_status_block);
34f80b04 4359 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4360
4361 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4362 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4363 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4364 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4365 U64_HI(section));
5c862848 4366 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4367 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4368
4369 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4370 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4371 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4372
4373 /* TSTORM */
4374 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4375 t_def_status_block);
34f80b04 4376 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4377
4378 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4379 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4380 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4381 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4382 U64_HI(section));
5c862848 4383 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4384 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4385
4386 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4387 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4388 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4389
4390 /* XSTORM */
4391 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4392 x_def_status_block);
34f80b04 4393 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4394
4395 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4396 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4397 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4398 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4399 U64_HI(section));
5c862848 4400 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4401 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4402
4403 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4404 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4405 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4406
bb2a0f7a 4407 bp->stats_pending = 0;
66e855f3 4408 bp->set_mac_pending = 0;
bb2a0f7a 4409
34f80b04 4410 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4411}
4412
4413static void bnx2x_update_coalesce(struct bnx2x *bp)
4414{
34f80b04 4415 int port = BP_PORT(bp);
a2fbb9ea
ET
4416 int i;
4417
4418 for_each_queue(bp, i) {
34f80b04 4419 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4420
4421 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4422 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4423 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4424 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4425 bp->rx_ticks/12);
a2fbb9ea 4426 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4427 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4428 U_SB_ETH_RX_CQ_INDEX),
4429 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4430
4431 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4432 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4433 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4434 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4435 bp->tx_ticks/12);
a2fbb9ea 4436 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4437 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4438 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4439 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4440 }
4441}
4442
7a9b2557
VZ
4443static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4444 struct bnx2x_fastpath *fp, int last)
4445{
4446 int i;
4447
4448 for (i = 0; i < last; i++) {
4449 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4450 struct sk_buff *skb = rx_buf->skb;
4451
4452 if (skb == NULL) {
4453 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4454 continue;
4455 }
4456
4457 if (fp->tpa_state[i] == BNX2X_TPA_START)
4458 pci_unmap_single(bp->pdev,
4459 pci_unmap_addr(rx_buf, mapping),
356e2385 4460 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4461
4462 dev_kfree_skb(skb);
4463 rx_buf->skb = NULL;
4464 }
4465}
4466
a2fbb9ea
ET
4467static void bnx2x_init_rx_rings(struct bnx2x *bp)
4468{
7a9b2557 4469 int func = BP_FUNC(bp);
32626230
EG
4470 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4471 ETH_MAX_AGGREGATION_QUEUES_E1H;
4472 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4473 int i, j;
a2fbb9ea 4474
87942b46 4475 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4476 DP(NETIF_MSG_IFUP,
4477 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4478
7a9b2557 4479 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4480
555f6c78 4481 for_each_rx_queue(bp, j) {
32626230 4482 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4483
32626230 4484 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4485 fp->tpa_pool[i].skb =
4486 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4487 if (!fp->tpa_pool[i].skb) {
4488 BNX2X_ERR("Failed to allocate TPA "
4489 "skb pool for queue[%d] - "
4490 "disabling TPA on this "
4491 "queue!\n", j);
4492 bnx2x_free_tpa_pool(bp, fp, i);
4493 fp->disable_tpa = 1;
4494 break;
4495 }
4496 pci_unmap_addr_set((struct sw_rx_bd *)
4497 &bp->fp->tpa_pool[i],
4498 mapping, 0);
4499 fp->tpa_state[i] = BNX2X_TPA_STOP;
4500 }
4501 }
4502 }
4503
555f6c78 4504 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4505 struct bnx2x_fastpath *fp = &bp->fp[j];
4506
4507 fp->rx_bd_cons = 0;
4508 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4509 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4510
4511 /* "next page" elements initialization */
4512 /* SGE ring */
4513 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4514 struct eth_rx_sge *sge;
4515
4516 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4517 sge->addr_hi =
4518 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4519 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4520 sge->addr_lo =
4521 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4522 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4523 }
4524
4525 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4526
7a9b2557 4527 /* RX BD ring */
a2fbb9ea
ET
4528 for (i = 1; i <= NUM_RX_RINGS; i++) {
4529 struct eth_rx_bd *rx_bd;
4530
4531 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4532 rx_bd->addr_hi =
4533 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4534 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4535 rx_bd->addr_lo =
4536 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4537 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4538 }
4539
34f80b04 4540 /* CQ ring */
a2fbb9ea
ET
4541 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4542 struct eth_rx_cqe_next_page *nextpg;
4543
4544 nextpg = (struct eth_rx_cqe_next_page *)
4545 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4546 nextpg->addr_hi =
4547 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4548 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4549 nextpg->addr_lo =
4550 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4551 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4552 }
4553
7a9b2557
VZ
4554 /* Allocate SGEs and initialize the ring elements */
4555 for (i = 0, ring_prod = 0;
4556 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4557
7a9b2557
VZ
4558 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4559 BNX2X_ERR("was only able to allocate "
4560 "%d rx sges\n", i);
4561 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4562 /* Cleanup already allocated elements */
4563 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4564 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4565 fp->disable_tpa = 1;
4566 ring_prod = 0;
4567 break;
4568 }
4569 ring_prod = NEXT_SGE_IDX(ring_prod);
4570 }
4571 fp->rx_sge_prod = ring_prod;
4572
4573 /* Allocate BDs and initialize BD ring */
66e855f3 4574 fp->rx_comp_cons = 0;
7a9b2557 4575 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4576 for (i = 0; i < bp->rx_ring_size; i++) {
4577 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4578 BNX2X_ERR("was only able to allocate "
de832a55
EG
4579 "%d rx skbs on queue[%d]\n", i, j);
4580 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4581 break;
4582 }
4583 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4584 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4585 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4586 }
4587
7a9b2557
VZ
4588 fp->rx_bd_prod = ring_prod;
4589 /* must not have more available CQEs than BDs */
4590 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4591 cqe_ring_prod);
a2fbb9ea
ET
4592 fp->rx_pkt = fp->rx_calls = 0;
4593
7a9b2557
VZ
4594 /* Warning!
4595 * this will generate an interrupt (to the TSTORM)
4596 * must only be done after chip is initialized
4597 */
4598 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4599 fp->rx_sge_prod);
a2fbb9ea
ET
4600 if (j != 0)
4601 continue;
4602
4603 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4604 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4605 U64_LO(fp->rx_comp_mapping));
4606 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4607 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4608 U64_HI(fp->rx_comp_mapping));
4609 }
4610}
4611
4612static void bnx2x_init_tx_ring(struct bnx2x *bp)
4613{
4614 int i, j;
4615
555f6c78 4616 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4617 struct bnx2x_fastpath *fp = &bp->fp[j];
4618
4619 for (i = 1; i <= NUM_TX_RINGS; i++) {
4620 struct eth_tx_bd *tx_bd =
4621 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4622
4623 tx_bd->addr_hi =
4624 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4625 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4626 tx_bd->addr_lo =
4627 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4628 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4629 }
4630
4631 fp->tx_pkt_prod = 0;
4632 fp->tx_pkt_cons = 0;
4633 fp->tx_bd_prod = 0;
4634 fp->tx_bd_cons = 0;
4635 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4636 fp->tx_pkt = 0;
4637 }
4638}
4639
4640static void bnx2x_init_sp_ring(struct bnx2x *bp)
4641{
34f80b04 4642 int func = BP_FUNC(bp);
a2fbb9ea
ET
4643
4644 spin_lock_init(&bp->spq_lock);
4645
4646 bp->spq_left = MAX_SPQ_PENDING;
4647 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4648 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4649 bp->spq_prod_bd = bp->spq;
4650 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4651
34f80b04 4652 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4653 U64_LO(bp->spq_mapping));
34f80b04
EG
4654 REG_WR(bp,
4655 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4656 U64_HI(bp->spq_mapping));
4657
34f80b04 4658 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4659 bp->spq_prod_idx);
4660}
4661
4662static void bnx2x_init_context(struct bnx2x *bp)
4663{
4664 int i;
4665
4666 for_each_queue(bp, i) {
4667 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4668 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4669 u8 cl_id = fp->cl_id;
0626b899 4670 u8 sb_id = fp->sb_id;
a2fbb9ea 4671
34f80b04
EG
4672 context->ustorm_st_context.common.sb_index_numbers =
4673 BNX2X_RX_SB_INDEX_NUM;
0626b899 4674 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4675 context->ustorm_st_context.common.status_block_id = sb_id;
4676 context->ustorm_st_context.common.flags =
de832a55
EG
4677 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4678 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4679 context->ustorm_st_context.common.statistics_counter_id =
4680 cl_id;
8d9c5f34 4681 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4682 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4683 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4684 bp->rx_buf_size;
34f80b04 4685 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4686 U64_HI(fp->rx_desc_mapping);
34f80b04 4687 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4688 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4689 if (!fp->disable_tpa) {
4690 context->ustorm_st_context.common.flags |=
4691 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4692 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4693 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4694 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4695 (u32)0xffff);
7a9b2557
VZ
4696 context->ustorm_st_context.common.sge_page_base_hi =
4697 U64_HI(fp->rx_sge_mapping);
4698 context->ustorm_st_context.common.sge_page_base_lo =
4699 U64_LO(fp->rx_sge_mapping);
4700 }
4701
8d9c5f34
EG
4702 context->ustorm_ag_context.cdu_usage =
4703 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4704 CDU_REGION_NUMBER_UCM_AG,
4705 ETH_CONNECTION_TYPE);
4706
4707 context->xstorm_st_context.tx_bd_page_base_hi =
4708 U64_HI(fp->tx_desc_mapping);
4709 context->xstorm_st_context.tx_bd_page_base_lo =
4710 U64_LO(fp->tx_desc_mapping);
4711 context->xstorm_st_context.db_data_addr_hi =
4712 U64_HI(fp->tx_prods_mapping);
4713 context->xstorm_st_context.db_data_addr_lo =
4714 U64_LO(fp->tx_prods_mapping);
0626b899 4715 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4716 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4717 context->cstorm_st_context.sb_index_number =
5c862848 4718 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4719 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4720
4721 context->xstorm_ag_context.cdu_reserved =
4722 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4723 CDU_REGION_NUMBER_XCM_AG,
4724 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4725 }
4726}
4727
4728static void bnx2x_init_ind_table(struct bnx2x *bp)
4729{
26c8fa4d 4730 int func = BP_FUNC(bp);
a2fbb9ea
ET
4731 int i;
4732
555f6c78 4733 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4734 return;
4735
555f6c78
EG
4736 DP(NETIF_MSG_IFUP,
4737 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4738 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4739 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4740 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4741 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4742}
4743
49d66772
ET
4744static void bnx2x_set_client_config(struct bnx2x *bp)
4745{
49d66772 4746 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4747 int port = BP_PORT(bp);
4748 int i;
49d66772 4749
e7799c5f 4750 tstorm_client.mtu = bp->dev->mtu;
49d66772 4751 tstorm_client.config_flags =
de832a55
EG
4752 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4753 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4754#ifdef BCM_VLAN
0c6671b0 4755 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4756 tstorm_client.config_flags |=
8d9c5f34 4757 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4758 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4759 }
4760#endif
49d66772 4761
7a9b2557
VZ
4762 if (bp->flags & TPA_ENABLE_FLAG) {
4763 tstorm_client.max_sges_for_packet =
4f40f2cb 4764 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4765 tstorm_client.max_sges_for_packet =
4766 ((tstorm_client.max_sges_for_packet +
4767 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4768 PAGES_PER_SGE_SHIFT;
4769
4770 tstorm_client.config_flags |=
4771 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4772 }
4773
49d66772 4774 for_each_queue(bp, i) {
de832a55
EG
4775 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4776
49d66772 4777 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4778 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4779 ((u32 *)&tstorm_client)[0]);
4780 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4781 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4782 ((u32 *)&tstorm_client)[1]);
4783 }
4784
34f80b04
EG
4785 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4786 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4787}
4788
a2fbb9ea
ET
4789static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4790{
a2fbb9ea 4791 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4792 int mode = bp->rx_mode;
4793 int mask = (1 << BP_L_ID(bp));
4794 int func = BP_FUNC(bp);
a2fbb9ea
ET
4795 int i;
4796
3196a88a 4797 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4798
4799 switch (mode) {
4800 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4801 tstorm_mac_filter.ucast_drop_all = mask;
4802 tstorm_mac_filter.mcast_drop_all = mask;
4803 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4804 break;
356e2385 4805
a2fbb9ea 4806 case BNX2X_RX_MODE_NORMAL:
34f80b04 4807 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4808 break;
356e2385 4809
a2fbb9ea 4810 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4811 tstorm_mac_filter.mcast_accept_all = mask;
4812 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4813 break;
356e2385 4814
a2fbb9ea 4815 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4816 tstorm_mac_filter.ucast_accept_all = mask;
4817 tstorm_mac_filter.mcast_accept_all = mask;
4818 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4819 break;
356e2385 4820
a2fbb9ea 4821 default:
34f80b04
EG
4822 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4823 break;
a2fbb9ea
ET
4824 }
4825
4826 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4827 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4828 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4829 ((u32 *)&tstorm_mac_filter)[i]);
4830
34f80b04 4831/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4832 ((u32 *)&tstorm_mac_filter)[i]); */
4833 }
a2fbb9ea 4834
49d66772
ET
4835 if (mode != BNX2X_RX_MODE_NONE)
4836 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4837}
4838
471de716
EG
4839static void bnx2x_init_internal_common(struct bnx2x *bp)
4840{
4841 int i;
4842
3cdf1db7
YG
4843 if (bp->flags & TPA_ENABLE_FLAG) {
4844 struct tstorm_eth_tpa_exist tpa = {0};
4845
4846 tpa.tpa_exist = 1;
4847
4848 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4849 ((u32 *)&tpa)[0]);
4850 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4851 ((u32 *)&tpa)[1]);
4852 }
4853
471de716
EG
4854 /* Zero this manually as its initialization is
4855 currently missing in the initTool */
4856 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4857 REG_WR(bp, BAR_USTRORM_INTMEM +
4858 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4859}
4860
4861static void bnx2x_init_internal_port(struct bnx2x *bp)
4862{
4863 int port = BP_PORT(bp);
4864
4865 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4866 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869}
4870
8a1c38d1
EG
4871/* Calculates the sum of vn_min_rates.
4872 It's needed for further normalizing of the min_rates.
4873 Returns:
4874 sum of vn_min_rates.
4875 or
4876 0 - if all the min_rates are 0.
4877 In the later case fainess algorithm should be deactivated.
4878 If not all min_rates are zero then those that are zeroes will be set to 1.
4879 */
4880static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4881{
4882 int all_zero = 1;
4883 int port = BP_PORT(bp);
4884 int vn;
4885
4886 bp->vn_weight_sum = 0;
4887 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4888 int func = 2*vn + port;
4889 u32 vn_cfg =
4890 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4891 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4892 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4893
4894 /* Skip hidden vns */
4895 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4896 continue;
4897
4898 /* If min rate is zero - set it to 1 */
4899 if (!vn_min_rate)
4900 vn_min_rate = DEF_MIN_RATE;
4901 else
4902 all_zero = 0;
4903
4904 bp->vn_weight_sum += vn_min_rate;
4905 }
4906
4907 /* ... only if all min rates are zeros - disable fairness */
4908 if (all_zero)
4909 bp->vn_weight_sum = 0;
4910}
4911
471de716 4912static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4913{
a2fbb9ea
ET
4914 struct tstorm_eth_function_common_config tstorm_config = {0};
4915 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4916 int port = BP_PORT(bp);
4917 int func = BP_FUNC(bp);
de832a55
EG
4918 int i, j;
4919 u32 offset;
471de716 4920 u16 max_agg_size;
a2fbb9ea
ET
4921
4922 if (is_multi(bp)) {
555f6c78 4923 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4924 tstorm_config.rss_result_mask = MULTI_MASK;
4925 }
8d9c5f34
EG
4926 if (IS_E1HMF(bp))
4927 tstorm_config.config_flags |=
4928 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4929
34f80b04
EG
4930 tstorm_config.leading_client_id = BP_L_ID(bp);
4931
a2fbb9ea 4932 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4933 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4934 (*(u32 *)&tstorm_config));
4935
c14423fe 4936 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4937 bnx2x_set_storm_rx_mode(bp);
4938
de832a55
EG
4939 for_each_queue(bp, i) {
4940 u8 cl_id = bp->fp[i].cl_id;
4941
4942 /* reset xstorm per client statistics */
4943 offset = BAR_XSTRORM_INTMEM +
4944 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4945 for (j = 0;
4946 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4947 REG_WR(bp, offset + j*4, 0);
4948
4949 /* reset tstorm per client statistics */
4950 offset = BAR_TSTRORM_INTMEM +
4951 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4952 for (j = 0;
4953 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4954 REG_WR(bp, offset + j*4, 0);
4955
4956 /* reset ustorm per client statistics */
4957 offset = BAR_USTRORM_INTMEM +
4958 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4959 for (j = 0;
4960 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4961 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4962 }
4963
4964 /* Init statistics related context */
34f80b04 4965 stats_flags.collect_eth = 1;
a2fbb9ea 4966
66e855f3 4967 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4968 ((u32 *)&stats_flags)[0]);
66e855f3 4969 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4970 ((u32 *)&stats_flags)[1]);
4971
66e855f3 4972 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4973 ((u32 *)&stats_flags)[0]);
66e855f3 4974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4975 ((u32 *)&stats_flags)[1]);
4976
de832a55
EG
4977 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4978 ((u32 *)&stats_flags)[0]);
4979 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4980 ((u32 *)&stats_flags)[1]);
4981
66e855f3 4982 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4983 ((u32 *)&stats_flags)[0]);
66e855f3 4984 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4985 ((u32 *)&stats_flags)[1]);
4986
66e855f3
YG
4987 REG_WR(bp, BAR_XSTRORM_INTMEM +
4988 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4989 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4990 REG_WR(bp, BAR_XSTRORM_INTMEM +
4991 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4992 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4993
4994 REG_WR(bp, BAR_TSTRORM_INTMEM +
4995 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4996 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4997 REG_WR(bp, BAR_TSTRORM_INTMEM +
4998 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4999 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5000
de832a55
EG
5001 REG_WR(bp, BAR_USTRORM_INTMEM +
5002 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5003 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5004 REG_WR(bp, BAR_USTRORM_INTMEM +
5005 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5006 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5007
34f80b04
EG
5008 if (CHIP_IS_E1H(bp)) {
5009 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5010 IS_E1HMF(bp));
5011 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5012 IS_E1HMF(bp));
5013 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5014 IS_E1HMF(bp));
5015 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5016 IS_E1HMF(bp));
5017
7a9b2557
VZ
5018 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5019 bp->e1hov);
34f80b04
EG
5020 }
5021
4f40f2cb
EG
5022 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5023 max_agg_size =
5024 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5025 SGE_PAGE_SIZE * PAGES_PER_SGE),
5026 (u32)0xffff);
555f6c78 5027 for_each_rx_queue(bp, i) {
7a9b2557 5028 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5029
5030 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5031 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5032 U64_LO(fp->rx_comp_mapping));
5033 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5034 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5035 U64_HI(fp->rx_comp_mapping));
5036
7a9b2557 5037 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5038 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5039 max_agg_size);
5040 }
8a1c38d1 5041
1c06328c
EG
5042 /* dropless flow control */
5043 if (CHIP_IS_E1H(bp)) {
5044 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5045
5046 rx_pause.bd_thr_low = 250;
5047 rx_pause.cqe_thr_low = 250;
5048 rx_pause.cos = 1;
5049 rx_pause.sge_thr_low = 0;
5050 rx_pause.bd_thr_high = 350;
5051 rx_pause.cqe_thr_high = 350;
5052 rx_pause.sge_thr_high = 0;
5053
5054 for_each_rx_queue(bp, i) {
5055 struct bnx2x_fastpath *fp = &bp->fp[i];
5056
5057 if (!fp->disable_tpa) {
5058 rx_pause.sge_thr_low = 150;
5059 rx_pause.sge_thr_high = 250;
5060 }
5061
5062
5063 offset = BAR_USTRORM_INTMEM +
5064 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5065 fp->cl_id);
5066 for (j = 0;
5067 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5068 j++)
5069 REG_WR(bp, offset + j*4,
5070 ((u32 *)&rx_pause)[j]);
5071 }
5072 }
5073
8a1c38d1
EG
5074 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5075
5076 /* Init rate shaping and fairness contexts */
5077 if (IS_E1HMF(bp)) {
5078 int vn;
5079
5080 /* During init there is no active link
5081 Until link is up, set link rate to 10Gbps */
5082 bp->link_vars.line_speed = SPEED_10000;
5083 bnx2x_init_port_minmax(bp);
5084
5085 bnx2x_calc_vn_weight_sum(bp);
5086
5087 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5088 bnx2x_init_vn_minmax(bp, 2*vn + port);
5089
5090 /* Enable rate shaping and fairness */
5091 bp->cmng.flags.cmng_enables =
5092 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5093 if (bp->vn_weight_sum)
5094 bp->cmng.flags.cmng_enables |=
5095 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5096 else
5097 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5098 " fairness will be disabled\n");
5099 } else {
5100 /* rate shaping and fairness are disabled */
5101 DP(NETIF_MSG_IFUP,
5102 "single function mode minmax will be disabled\n");
5103 }
5104
5105
5106 /* Store it to internal memory */
5107 if (bp->port.pmf)
5108 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5109 REG_WR(bp, BAR_XSTRORM_INTMEM +
5110 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5111 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5112}
5113
471de716
EG
5114static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5115{
5116 switch (load_code) {
5117 case FW_MSG_CODE_DRV_LOAD_COMMON:
5118 bnx2x_init_internal_common(bp);
5119 /* no break */
5120
5121 case FW_MSG_CODE_DRV_LOAD_PORT:
5122 bnx2x_init_internal_port(bp);
5123 /* no break */
5124
5125 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5126 bnx2x_init_internal_func(bp);
5127 break;
5128
5129 default:
5130 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5131 break;
5132 }
5133}
5134
5135static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5136{
5137 int i;
5138
5139 for_each_queue(bp, i) {
5140 struct bnx2x_fastpath *fp = &bp->fp[i];
5141
34f80b04 5142 fp->bp = bp;
a2fbb9ea 5143 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5144 fp->index = i;
34f80b04
EG
5145 fp->cl_id = BP_L_ID(bp) + i;
5146 fp->sb_id = fp->cl_id;
5147 DP(NETIF_MSG_IFUP,
f5372251
EG
5148 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5149 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5150 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5151 fp->sb_id);
5c862848 5152 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5153 }
5154
16119785
EG
5155 /* ensure status block indices were read */
5156 rmb();
5157
5158
5c862848
EG
5159 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5160 DEF_SB_ID);
5161 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5162 bnx2x_update_coalesce(bp);
5163 bnx2x_init_rx_rings(bp);
5164 bnx2x_init_tx_ring(bp);
5165 bnx2x_init_sp_ring(bp);
5166 bnx2x_init_context(bp);
471de716 5167 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5168 bnx2x_init_ind_table(bp);
0ef00459
EG
5169 bnx2x_stats_init(bp);
5170
5171 /* At this point, we are ready for interrupts */
5172 atomic_set(&bp->intr_sem, 0);
5173
5174 /* flush all before enabling interrupts */
5175 mb();
5176 mmiowb();
5177
615f8fd9 5178 bnx2x_int_enable(bp);
a2fbb9ea
ET
5179}
5180
5181/* end of nic init */
5182
5183/*
5184 * gzip service functions
5185 */
5186
5187static int bnx2x_gunzip_init(struct bnx2x *bp)
5188{
5189 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5190 &bp->gunzip_mapping);
5191 if (bp->gunzip_buf == NULL)
5192 goto gunzip_nomem1;
5193
5194 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5195 if (bp->strm == NULL)
5196 goto gunzip_nomem2;
5197
5198 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5199 GFP_KERNEL);
5200 if (bp->strm->workspace == NULL)
5201 goto gunzip_nomem3;
5202
5203 return 0;
5204
5205gunzip_nomem3:
5206 kfree(bp->strm);
5207 bp->strm = NULL;
5208
5209gunzip_nomem2:
5210 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5211 bp->gunzip_mapping);
5212 bp->gunzip_buf = NULL;
5213
5214gunzip_nomem1:
5215 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5216 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5217 return -ENOMEM;
5218}
5219
5220static void bnx2x_gunzip_end(struct bnx2x *bp)
5221{
5222 kfree(bp->strm->workspace);
5223
5224 kfree(bp->strm);
5225 bp->strm = NULL;
5226
5227 if (bp->gunzip_buf) {
5228 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5229 bp->gunzip_mapping);
5230 bp->gunzip_buf = NULL;
5231 }
5232}
5233
5234static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5235{
5236 int n, rc;
5237
5238 /* check gzip header */
5239 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5240 return -EINVAL;
5241
5242 n = 10;
5243
34f80b04 5244#define FNAME 0x8
a2fbb9ea
ET
5245
5246 if (zbuf[3] & FNAME)
5247 while ((zbuf[n++] != 0) && (n < len));
5248
5249 bp->strm->next_in = zbuf + n;
5250 bp->strm->avail_in = len - n;
5251 bp->strm->next_out = bp->gunzip_buf;
5252 bp->strm->avail_out = FW_BUF_SIZE;
5253
5254 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5255 if (rc != Z_OK)
5256 return rc;
5257
5258 rc = zlib_inflate(bp->strm, Z_FINISH);
5259 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5260 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5261 bp->dev->name, bp->strm->msg);
5262
5263 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5264 if (bp->gunzip_outlen & 0x3)
5265 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5266 " gunzip_outlen (%d) not aligned\n",
5267 bp->dev->name, bp->gunzip_outlen);
5268 bp->gunzip_outlen >>= 2;
5269
5270 zlib_inflateEnd(bp->strm);
5271
5272 if (rc == Z_STREAM_END)
5273 return 0;
5274
5275 return rc;
5276}
5277
5278/* nic load/unload */
5279
5280/*
34f80b04 5281 * General service functions
a2fbb9ea
ET
5282 */
5283
5284/* send a NIG loopback debug packet */
5285static void bnx2x_lb_pckt(struct bnx2x *bp)
5286{
a2fbb9ea 5287 u32 wb_write[3];
a2fbb9ea
ET
5288
5289 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5290 wb_write[0] = 0x55555555;
5291 wb_write[1] = 0x55555555;
34f80b04 5292 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5293 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5294
5295 /* NON-IP protocol */
a2fbb9ea
ET
5296 wb_write[0] = 0x09000000;
5297 wb_write[1] = 0x55555555;
34f80b04 5298 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5299 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5300}
5301
5302/* some of the internal memories
5303 * are not directly readable from the driver
5304 * to test them we send debug packets
5305 */
5306static int bnx2x_int_mem_test(struct bnx2x *bp)
5307{
5308 int factor;
5309 int count, i;
5310 u32 val = 0;
5311
ad8d3948 5312 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5313 factor = 120;
ad8d3948
EG
5314 else if (CHIP_REV_IS_EMUL(bp))
5315 factor = 200;
5316 else
a2fbb9ea 5317 factor = 1;
a2fbb9ea
ET
5318
5319 DP(NETIF_MSG_HW, "start part1\n");
5320
5321 /* Disable inputs of parser neighbor blocks */
5322 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5323 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5324 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5325 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5326
5327 /* Write 0 to parser credits for CFC search request */
5328 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5329
5330 /* send Ethernet packet */
5331 bnx2x_lb_pckt(bp);
5332
5333 /* TODO do i reset NIG statistic? */
5334 /* Wait until NIG register shows 1 packet of size 0x10 */
5335 count = 1000 * factor;
5336 while (count) {
34f80b04 5337
a2fbb9ea
ET
5338 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5339 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5340 if (val == 0x10)
5341 break;
5342
5343 msleep(10);
5344 count--;
5345 }
5346 if (val != 0x10) {
5347 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5348 return -1;
5349 }
5350
5351 /* Wait until PRS register shows 1 packet */
5352 count = 1000 * factor;
5353 while (count) {
5354 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5355 if (val == 1)
5356 break;
5357
5358 msleep(10);
5359 count--;
5360 }
5361 if (val != 0x1) {
5362 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5363 return -2;
5364 }
5365
5366 /* Reset and init BRB, PRS */
34f80b04 5367 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5368 msleep(50);
34f80b04 5369 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5370 msleep(50);
5371 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5372 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5373
5374 DP(NETIF_MSG_HW, "part2\n");
5375
5376 /* Disable inputs of parser neighbor blocks */
5377 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5378 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5379 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5380 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5381
5382 /* Write 0 to parser credits for CFC search request */
5383 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5384
5385 /* send 10 Ethernet packets */
5386 for (i = 0; i < 10; i++)
5387 bnx2x_lb_pckt(bp);
5388
5389 /* Wait until NIG register shows 10 + 1
5390 packets of size 11*0x10 = 0xb0 */
5391 count = 1000 * factor;
5392 while (count) {
34f80b04 5393
a2fbb9ea
ET
5394 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5395 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5396 if (val == 0xb0)
5397 break;
5398
5399 msleep(10);
5400 count--;
5401 }
5402 if (val != 0xb0) {
5403 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5404 return -3;
5405 }
5406
5407 /* Wait until PRS register shows 2 packets */
5408 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5409 if (val != 2)
5410 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5411
5412 /* Write 1 to parser credits for CFC search request */
5413 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5414
5415 /* Wait until PRS register shows 3 packets */
5416 msleep(10 * factor);
5417 /* Wait until NIG register shows 1 packet of size 0x10 */
5418 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5419 if (val != 3)
5420 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5421
5422 /* clear NIG EOP FIFO */
5423 for (i = 0; i < 11; i++)
5424 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5425 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5426 if (val != 1) {
5427 BNX2X_ERR("clear of NIG failed\n");
5428 return -4;
5429 }
5430
5431 /* Reset and init BRB, PRS, NIG */
5432 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5433 msleep(50);
5434 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5435 msleep(50);
5436 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5437 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5438#ifndef BCM_ISCSI
5439 /* set NIC mode */
5440 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5441#endif
5442
5443 /* Enable inputs of parser neighbor blocks */
5444 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5445 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5446 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5447 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5448
5449 DP(NETIF_MSG_HW, "done\n");
5450
5451 return 0; /* OK */
5452}
5453
5454static void enable_blocks_attention(struct bnx2x *bp)
5455{
5456 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5457 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5458 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5459 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5460 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5461 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5462 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5463 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5464 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5465/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5466/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5467 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5468 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5469 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5470/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5471/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5472 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5473 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5474 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5475 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5476/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5477/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5478 if (CHIP_REV_IS_FPGA(bp))
5479 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5480 else
5481 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5482 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5483 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5484 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5485/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5486/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5487 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5488 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5489/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5490 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5491}
5492
34f80b04 5493
81f75bbf
EG
5494static void bnx2x_reset_common(struct bnx2x *bp)
5495{
5496 /* reset_common */
5497 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5498 0xd3ffff7f);
5499 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5500}
5501
34f80b04 5502static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5503{
a2fbb9ea 5504 u32 val, i;
a2fbb9ea 5505
34f80b04 5506 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5507
81f75bbf 5508 bnx2x_reset_common(bp);
34f80b04
EG
5509 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5510 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5511
34f80b04
EG
5512 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5513 if (CHIP_IS_E1H(bp))
5514 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5515
34f80b04
EG
5516 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5517 msleep(30);
5518 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5519
34f80b04
EG
5520 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5521 if (CHIP_IS_E1(bp)) {
5522 /* enable HW interrupt from PXP on USDM overflow
5523 bit 16 on INT_MASK_0 */
5524 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5525 }
a2fbb9ea 5526
34f80b04
EG
5527 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5528 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5529
5530#ifdef __BIG_ENDIAN
34f80b04
EG
5531 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5532 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5533 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5534 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5535 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5536 /* make sure this value is 0 */
5537 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5538
5539/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5540 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5541 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5542 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5543 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5544#endif
5545
34f80b04 5546 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5547#ifdef BCM_ISCSI
34f80b04
EG
5548 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5549 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5550 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5551#endif
5552
34f80b04
EG
5553 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5554 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5555
34f80b04
EG
5556 /* let the HW do it's magic ... */
5557 msleep(100);
5558 /* finish PXP init */
5559 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5560 if (val != 1) {
5561 BNX2X_ERR("PXP2 CFG failed\n");
5562 return -EBUSY;
5563 }
5564 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5565 if (val != 1) {
5566 BNX2X_ERR("PXP2 RD_INIT failed\n");
5567 return -EBUSY;
5568 }
a2fbb9ea 5569
34f80b04
EG
5570 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5571 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5572
34f80b04 5573 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5574
34f80b04
EG
5575 /* clean the DMAE memory */
5576 bp->dmae_ready = 1;
5577 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5578
34f80b04
EG
5579 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5580 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5581 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5582 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5583
34f80b04
EG
5584 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5585 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5586 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5587 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5588
5589 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5590 /* soft reset pulse */
5591 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5592 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5593
5594#ifdef BCM_ISCSI
34f80b04 5595 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5596#endif
a2fbb9ea 5597
34f80b04
EG
5598 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5599 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5600 if (!CHIP_REV_IS_SLOW(bp)) {
5601 /* enable hw interrupt from doorbell Q */
5602 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5603 }
a2fbb9ea 5604
34f80b04 5605 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5606 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5607 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5608 /* set NIC mode */
5609 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5610 if (CHIP_IS_E1H(bp))
5611 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5612
34f80b04
EG
5613 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5614 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5615 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5616 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5617
490c3c9b
EG
5618 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5619 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5620 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5621 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5622
34f80b04
EG
5623 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5624 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5625 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5626 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5627
34f80b04
EG
5628 /* sync semi rtc */
5629 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5630 0x80000000);
5631 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5632 0x80000000);
a2fbb9ea 5633
34f80b04
EG
5634 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5635 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5636 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5637
34f80b04
EG
5638 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5639 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5640 REG_WR(bp, i, 0xc0cac01a);
5641 /* TODO: replace with something meaningful */
5642 }
8d9c5f34 5643 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5644 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5645
34f80b04
EG
5646 if (sizeof(union cdu_context) != 1024)
5647 /* we currently assume that a context is 1024 bytes */
5648 printk(KERN_ALERT PFX "please adjust the size of"
5649 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5650
34f80b04
EG
5651 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5652 val = (4 << 24) + (0 << 12) + 1024;
5653 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5654 if (CHIP_IS_E1(bp)) {
5655 /* !!! fix pxp client crdit until excel update */
5656 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5657 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5658 }
a2fbb9ea 5659
34f80b04
EG
5660 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5661 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5662 /* enable context validation interrupt from CFC */
5663 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5664
5665 /* set the thresholds to prevent CFC/CDU race */
5666 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5667
34f80b04
EG
5668 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5669 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5670
34f80b04
EG
5671 /* PXPCS COMMON comes here */
5672 /* Reset PCIE errors for debug */
5673 REG_WR(bp, 0x2814, 0xffffffff);
5674 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5675
34f80b04
EG
5676 /* EMAC0 COMMON comes here */
5677 /* EMAC1 COMMON comes here */
5678 /* DBU COMMON comes here */
5679 /* DBG COMMON comes here */
5680
5681 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5682 if (CHIP_IS_E1H(bp)) {
5683 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5684 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5685 }
5686
5687 if (CHIP_REV_IS_SLOW(bp))
5688 msleep(200);
5689
5690 /* finish CFC init */
5691 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5692 if (val != 1) {
5693 BNX2X_ERR("CFC LL_INIT failed\n");
5694 return -EBUSY;
5695 }
5696 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5697 if (val != 1) {
5698 BNX2X_ERR("CFC AC_INIT failed\n");
5699 return -EBUSY;
5700 }
5701 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5702 if (val != 1) {
5703 BNX2X_ERR("CFC CAM_INIT failed\n");
5704 return -EBUSY;
5705 }
5706 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5707
34f80b04
EG
5708 /* read NIG statistic
5709 to see if this is our first up since powerup */
5710 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5711 val = *bnx2x_sp(bp, wb_data[0]);
5712
5713 /* do internal memory self test */
5714 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5715 BNX2X_ERR("internal mem self test failed\n");
5716 return -EBUSY;
5717 }
5718
35b19ba5 5719 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5723 bp->port.need_hw_lock = 1;
5724 break;
5725
35b19ba5 5726 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5727 /* Fan failure is indicated by SPIO 5 */
5728 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5729 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5730
5731 /* set to active low mode */
5732 val = REG_RD(bp, MISC_REG_SPIO_INT);
5733 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5734 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5735 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5736
34f80b04
EG
5737 /* enable interrupt to signal the IGU */
5738 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5739 val |= (1 << MISC_REGISTERS_SPIO_5);
5740 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5741 break;
f1410647 5742
34f80b04
EG
5743 default:
5744 break;
5745 }
f1410647 5746
34f80b04
EG
5747 /* clear PXP2 attentions */
5748 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5749
34f80b04 5750 enable_blocks_attention(bp);
a2fbb9ea 5751
6bbca910
YR
5752 if (!BP_NOMCP(bp)) {
5753 bnx2x_acquire_phy_lock(bp);
5754 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5755 bnx2x_release_phy_lock(bp);
5756 } else
5757 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5758
34f80b04
EG
5759 return 0;
5760}
a2fbb9ea 5761
34f80b04
EG
5762static int bnx2x_init_port(struct bnx2x *bp)
5763{
5764 int port = BP_PORT(bp);
1c06328c 5765 u32 low, high;
34f80b04 5766 u32 val;
a2fbb9ea 5767
34f80b04
EG
5768 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5769
5770 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5771
5772 /* Port PXP comes here */
5773 /* Port PXP2 comes here */
a2fbb9ea
ET
5774#ifdef BCM_ISCSI
5775 /* Port0 1
5776 * Port1 385 */
5777 i++;
5778 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5779 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5780 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5781 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5782
5783 /* Port0 2
5784 * Port1 386 */
5785 i++;
5786 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5787 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5788 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5789 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5790
5791 /* Port0 3
5792 * Port1 387 */
5793 i++;
5794 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5795 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5796 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5797 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5798#endif
34f80b04 5799 /* Port CMs come here */
8d9c5f34
EG
5800 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5801 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5802
5803 /* Port QM comes here */
a2fbb9ea
ET
5804#ifdef BCM_ISCSI
5805 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5806 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5807
5808 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5809 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5810#endif
5811 /* Port DQ comes here */
1c06328c
EG
5812
5813 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5814 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5815 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5816 /* no pause for emulation and FPGA */
5817 low = 0;
5818 high = 513;
5819 } else {
5820 if (IS_E1HMF(bp))
5821 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5822 else if (bp->dev->mtu > 4096) {
5823 if (bp->flags & ONE_PORT_FLAG)
5824 low = 160;
5825 else {
5826 val = bp->dev->mtu;
5827 /* (24*1024 + val*4)/256 */
5828 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5829 }
5830 } else
5831 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5832 high = low + 56; /* 14*1024/256 */
5833 }
5834 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5835 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5836
5837
ad8d3948 5838 /* Port PRS comes here */
a2fbb9ea
ET
5839 /* Port TSDM comes here */
5840 /* Port CSDM comes here */
5841 /* Port USDM comes here */
5842 /* Port XSDM comes here */
356e2385 5843
34f80b04
EG
5844 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5845 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5846 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5847 port ? USEM_PORT1_END : USEM_PORT0_END);
5848 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5849 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5850 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5851 port ? XSEM_PORT1_END : XSEM_PORT0_END);
356e2385 5852
a2fbb9ea 5853 /* Port UPB comes here */
34f80b04
EG
5854 /* Port XPB comes here */
5855
5856 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5857 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5858
5859 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5860 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5861
5862 /* update threshold */
34f80b04 5863 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5864 /* update init credit */
34f80b04 5865 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5866
5867 /* probe changes */
34f80b04 5868 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5869 msleep(5);
34f80b04 5870 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5871
5872#ifdef BCM_ISCSI
5873 /* tell the searcher where the T2 table is */
5874 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5875
5876 wb_write[0] = U64_LO(bp->t2_mapping);
5877 wb_write[1] = U64_HI(bp->t2_mapping);
5878 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5879 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5880 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5881 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5882
5883 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5884 /* Port SRCH comes here */
5885#endif
5886 /* Port CDU comes here */
5887 /* Port CFC comes here */
34f80b04
EG
5888
5889 if (CHIP_IS_E1(bp)) {
5890 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5891 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5892 }
5893 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5894 port ? HC_PORT1_END : HC_PORT0_END);
5895
5896 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5897 MISC_AEU_PORT0_START,
34f80b04
EG
5898 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5899 /* init aeu_mask_attn_func_0/1:
5900 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5901 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5902 * bits 4-7 are used for "per vn group attention" */
5903 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5904 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5905
a2fbb9ea
ET
5906 /* Port PXPCS comes here */
5907 /* Port EMAC0 comes here */
5908 /* Port EMAC1 comes here */
5909 /* Port DBU comes here */
5910 /* Port DBG comes here */
356e2385 5911
34f80b04
EG
5912 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5913 port ? NIG_PORT1_END : NIG_PORT0_END);
5914
5915 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5916
5917 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5918 /* 0x2 disable e1hov, 0x1 enable */
5919 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5920 (IS_E1HMF(bp) ? 0x1 : 0x2));
5921
1c06328c
EG
5922 /* support pause requests from USDM, TSDM and BRB */
5923 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5924
5925 {
5926 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5927 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5928 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5929 }
34f80b04
EG
5930 }
5931
a2fbb9ea
ET
5932 /* Port MCP comes here */
5933 /* Port DMAE comes here */
5934
35b19ba5 5935 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5936 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5937 {
5938 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5939
5940 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5941 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5942
5943 /* The GPIO should be swapped if the swap register is
5944 set and active */
5945 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5946 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5947
5948 /* Select function upon port-swap configuration */
5949 if (port == 0) {
5950 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5951 aeu_gpio_mask = (swap_val && swap_override) ?
5952 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5954 } else {
5955 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5956 aeu_gpio_mask = (swap_val && swap_override) ?
5957 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5958 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5959 }
5960 val = REG_RD(bp, offset);
5961 /* add GPIO3 to group */
5962 val |= aeu_gpio_mask;
5963 REG_WR(bp, offset, val);
5964 }
5965 break;
5966
35b19ba5 5967 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5968 /* add SPIO 5 to group 0 */
5969 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5970 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5971 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5972 break;
5973
5974 default:
5975 break;
5976 }
5977
c18487ee 5978 bnx2x__link_reset(bp);
a2fbb9ea 5979
34f80b04
EG
5980 return 0;
5981}
5982
5983#define ILT_PER_FUNC (768/2)
5984#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5985/* the phys address is shifted right 12 bits and has an added
5986 1=valid bit added to the 53rd bit
5987 then since this is a wide register(TM)
5988 we split it into two 32 bit writes
5989 */
5990#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5991#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5992#define PXP_ONE_ILT(x) (((x) << 10) | x)
5993#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5994
5995#define CNIC_ILT_LINES 0
5996
5997static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5998{
5999 int reg;
6000
6001 if (CHIP_IS_E1H(bp))
6002 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6003 else /* E1 */
6004 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6005
6006 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6007}
6008
6009static int bnx2x_init_func(struct bnx2x *bp)
6010{
6011 int port = BP_PORT(bp);
6012 int func = BP_FUNC(bp);
8badd27a 6013 u32 addr, val;
34f80b04
EG
6014 int i;
6015
6016 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6017
8badd27a
EG
6018 /* set MSI reconfigure capability */
6019 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6020 val = REG_RD(bp, addr);
6021 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6022 REG_WR(bp, addr, val);
6023
34f80b04
EG
6024 i = FUNC_ILT_BASE(func);
6025
6026 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6027 if (CHIP_IS_E1H(bp)) {
6028 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6029 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6030 } else /* E1 */
6031 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6032 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6033
6034
6035 if (CHIP_IS_E1H(bp)) {
6036 for (i = 0; i < 9; i++)
6037 bnx2x_init_block(bp,
6038 cm_start[func][i], cm_end[func][i]);
6039
6040 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6041 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6042 }
6043
6044 /* HC init per function */
6045 if (CHIP_IS_E1H(bp)) {
6046 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6047
6048 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6049 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6050 }
6051 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6052
c14423fe 6053 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6054 REG_WR(bp, 0x2114, 0xffffffff);
6055 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6056
34f80b04
EG
6057 return 0;
6058}
6059
6060static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6061{
6062 int i, rc = 0;
a2fbb9ea 6063
34f80b04
EG
6064 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6065 BP_FUNC(bp), load_code);
a2fbb9ea 6066
34f80b04
EG
6067 bp->dmae_ready = 0;
6068 mutex_init(&bp->dmae_mutex);
6069 bnx2x_gunzip_init(bp);
a2fbb9ea 6070
34f80b04
EG
6071 switch (load_code) {
6072 case FW_MSG_CODE_DRV_LOAD_COMMON:
6073 rc = bnx2x_init_common(bp);
6074 if (rc)
6075 goto init_hw_err;
6076 /* no break */
6077
6078 case FW_MSG_CODE_DRV_LOAD_PORT:
6079 bp->dmae_ready = 1;
6080 rc = bnx2x_init_port(bp);
6081 if (rc)
6082 goto init_hw_err;
6083 /* no break */
6084
6085 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6086 bp->dmae_ready = 1;
6087 rc = bnx2x_init_func(bp);
6088 if (rc)
6089 goto init_hw_err;
6090 break;
6091
6092 default:
6093 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6094 break;
6095 }
6096
6097 if (!BP_NOMCP(bp)) {
6098 int func = BP_FUNC(bp);
a2fbb9ea
ET
6099
6100 bp->fw_drv_pulse_wr_seq =
34f80b04 6101 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6102 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6103 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6104 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6105 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6106 } else
6107 bp->func_stx = 0;
a2fbb9ea 6108
34f80b04
EG
6109 /* this needs to be done before gunzip end */
6110 bnx2x_zero_def_sb(bp);
6111 for_each_queue(bp, i)
6112 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6113
6114init_hw_err:
6115 bnx2x_gunzip_end(bp);
6116
6117 return rc;
a2fbb9ea
ET
6118}
6119
c14423fe 6120/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6121static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6122{
34f80b04 6123 int func = BP_FUNC(bp);
f1410647
ET
6124 u32 seq = ++bp->fw_seq;
6125 u32 rc = 0;
19680c48
EG
6126 u32 cnt = 1;
6127 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6128
34f80b04 6129 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6130 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6131
19680c48
EG
6132 do {
6133 /* let the FW do it's magic ... */
6134 msleep(delay);
a2fbb9ea 6135
19680c48 6136 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6137
19680c48
EG
6138 /* Give the FW up to 2 second (200*10ms) */
6139 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6140
6141 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6142 cnt*delay, rc, seq);
a2fbb9ea
ET
6143
6144 /* is this a reply to our command? */
6145 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6146 rc &= FW_MSG_CODE_MASK;
f1410647 6147
a2fbb9ea
ET
6148 } else {
6149 /* FW BUG! */
6150 BNX2X_ERR("FW failed to respond!\n");
6151 bnx2x_fw_dump(bp);
6152 rc = 0;
6153 }
f1410647 6154
a2fbb9ea
ET
6155 return rc;
6156}
6157
6158static void bnx2x_free_mem(struct bnx2x *bp)
6159{
6160
6161#define BNX2X_PCI_FREE(x, y, size) \
6162 do { \
6163 if (x) { \
6164 pci_free_consistent(bp->pdev, size, x, y); \
6165 x = NULL; \
6166 y = 0; \
6167 } \
6168 } while (0)
6169
6170#define BNX2X_FREE(x) \
6171 do { \
6172 if (x) { \
6173 vfree(x); \
6174 x = NULL; \
6175 } \
6176 } while (0)
6177
6178 int i;
6179
6180 /* fastpath */
555f6c78 6181 /* Common */
a2fbb9ea
ET
6182 for_each_queue(bp, i) {
6183
555f6c78 6184 /* status blocks */
a2fbb9ea
ET
6185 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6186 bnx2x_fp(bp, i, status_blk_mapping),
6187 sizeof(struct host_status_block) +
6188 sizeof(struct eth_tx_db_data));
555f6c78
EG
6189 }
6190 /* Rx */
6191 for_each_rx_queue(bp, i) {
a2fbb9ea 6192
555f6c78 6193 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6194 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6195 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6196 bnx2x_fp(bp, i, rx_desc_mapping),
6197 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6198
6199 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6200 bnx2x_fp(bp, i, rx_comp_mapping),
6201 sizeof(struct eth_fast_path_rx_cqe) *
6202 NUM_RCQ_BD);
a2fbb9ea 6203
7a9b2557 6204 /* SGE ring */
32626230 6205 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6206 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6207 bnx2x_fp(bp, i, rx_sge_mapping),
6208 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6209 }
555f6c78
EG
6210 /* Tx */
6211 for_each_tx_queue(bp, i) {
6212
6213 /* fastpath tx rings: tx_buf tx_desc */
6214 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6215 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6216 bnx2x_fp(bp, i, tx_desc_mapping),
6217 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6218 }
a2fbb9ea
ET
6219 /* end of fastpath */
6220
6221 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6222 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6223
6224 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6225 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6226
6227#ifdef BCM_ISCSI
6228 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6229 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6230 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6231 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6232#endif
7a9b2557 6233 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6234
6235#undef BNX2X_PCI_FREE
6236#undef BNX2X_KFREE
6237}
6238
6239static int bnx2x_alloc_mem(struct bnx2x *bp)
6240{
6241
6242#define BNX2X_PCI_ALLOC(x, y, size) \
6243 do { \
6244 x = pci_alloc_consistent(bp->pdev, size, y); \
6245 if (x == NULL) \
6246 goto alloc_mem_err; \
6247 memset(x, 0, size); \
6248 } while (0)
6249
6250#define BNX2X_ALLOC(x, size) \
6251 do { \
6252 x = vmalloc(size); \
6253 if (x == NULL) \
6254 goto alloc_mem_err; \
6255 memset(x, 0, size); \
6256 } while (0)
6257
6258 int i;
6259
6260 /* fastpath */
555f6c78 6261 /* Common */
a2fbb9ea
ET
6262 for_each_queue(bp, i) {
6263 bnx2x_fp(bp, i, bp) = bp;
6264
555f6c78 6265 /* status blocks */
a2fbb9ea
ET
6266 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6267 &bnx2x_fp(bp, i, status_blk_mapping),
6268 sizeof(struct host_status_block) +
6269 sizeof(struct eth_tx_db_data));
555f6c78
EG
6270 }
6271 /* Rx */
6272 for_each_rx_queue(bp, i) {
a2fbb9ea 6273
555f6c78 6274 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6275 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6276 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6277 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6278 &bnx2x_fp(bp, i, rx_desc_mapping),
6279 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6280
6281 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6282 &bnx2x_fp(bp, i, rx_comp_mapping),
6283 sizeof(struct eth_fast_path_rx_cqe) *
6284 NUM_RCQ_BD);
6285
7a9b2557
VZ
6286 /* SGE ring */
6287 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6288 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6289 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6290 &bnx2x_fp(bp, i, rx_sge_mapping),
6291 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6292 }
555f6c78
EG
6293 /* Tx */
6294 for_each_tx_queue(bp, i) {
6295
6296 bnx2x_fp(bp, i, hw_tx_prods) =
6297 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6298
6299 bnx2x_fp(bp, i, tx_prods_mapping) =
6300 bnx2x_fp(bp, i, status_blk_mapping) +
6301 sizeof(struct host_status_block);
6302
6303 /* fastpath tx rings: tx_buf tx_desc */
6304 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6305 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6306 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6307 &bnx2x_fp(bp, i, tx_desc_mapping),
6308 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6309 }
a2fbb9ea
ET
6310 /* end of fastpath */
6311
6312 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6313 sizeof(struct host_def_status_block));
6314
6315 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6316 sizeof(struct bnx2x_slowpath));
6317
6318#ifdef BCM_ISCSI
6319 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6320
6321 /* Initialize T1 */
6322 for (i = 0; i < 64*1024; i += 64) {
6323 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6324 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6325 }
6326
6327 /* allocate searcher T2 table
6328 we allocate 1/4 of alloc num for T2
6329 (which is not entered into the ILT) */
6330 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6331
6332 /* Initialize T2 */
6333 for (i = 0; i < 16*1024; i += 64)
6334 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6335
c14423fe 6336 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6337 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6338
6339 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6340 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6341
6342 /* QM queues (128*MAX_CONN) */
6343 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6344#endif
6345
6346 /* Slow path ring */
6347 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6348
6349 return 0;
6350
6351alloc_mem_err:
6352 bnx2x_free_mem(bp);
6353 return -ENOMEM;
6354
6355#undef BNX2X_PCI_ALLOC
6356#undef BNX2X_ALLOC
6357}
6358
6359static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6360{
6361 int i;
6362
555f6c78 6363 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6364 struct bnx2x_fastpath *fp = &bp->fp[i];
6365
6366 u16 bd_cons = fp->tx_bd_cons;
6367 u16 sw_prod = fp->tx_pkt_prod;
6368 u16 sw_cons = fp->tx_pkt_cons;
6369
a2fbb9ea
ET
6370 while (sw_cons != sw_prod) {
6371 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6372 sw_cons++;
6373 }
6374 }
6375}
6376
6377static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6378{
6379 int i, j;
6380
555f6c78 6381 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6382 struct bnx2x_fastpath *fp = &bp->fp[j];
6383
a2fbb9ea
ET
6384 for (i = 0; i < NUM_RX_BD; i++) {
6385 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6386 struct sk_buff *skb = rx_buf->skb;
6387
6388 if (skb == NULL)
6389 continue;
6390
6391 pci_unmap_single(bp->pdev,
6392 pci_unmap_addr(rx_buf, mapping),
356e2385 6393 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6394
6395 rx_buf->skb = NULL;
6396 dev_kfree_skb(skb);
6397 }
7a9b2557 6398 if (!fp->disable_tpa)
32626230
EG
6399 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6400 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6401 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6402 }
6403}
6404
6405static void bnx2x_free_skbs(struct bnx2x *bp)
6406{
6407 bnx2x_free_tx_skbs(bp);
6408 bnx2x_free_rx_skbs(bp);
6409}
6410
6411static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6412{
34f80b04 6413 int i, offset = 1;
a2fbb9ea
ET
6414
6415 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6416 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6417 bp->msix_table[0].vector);
6418
6419 for_each_queue(bp, i) {
c14423fe 6420 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6421 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6422 bnx2x_fp(bp, i, state));
6423
34f80b04 6424 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6425 }
a2fbb9ea
ET
6426}
6427
6428static void bnx2x_free_irq(struct bnx2x *bp)
6429{
a2fbb9ea 6430 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6431 bnx2x_free_msix_irqs(bp);
6432 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6433 bp->flags &= ~USING_MSIX_FLAG;
6434
8badd27a
EG
6435 } else if (bp->flags & USING_MSI_FLAG) {
6436 free_irq(bp->pdev->irq, bp->dev);
6437 pci_disable_msi(bp->pdev);
6438 bp->flags &= ~USING_MSI_FLAG;
6439
a2fbb9ea
ET
6440 } else
6441 free_irq(bp->pdev->irq, bp->dev);
6442}
6443
6444static int bnx2x_enable_msix(struct bnx2x *bp)
6445{
8badd27a
EG
6446 int i, rc, offset = 1;
6447 int igu_vec = 0;
a2fbb9ea 6448
8badd27a
EG
6449 bp->msix_table[0].entry = igu_vec;
6450 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6451
34f80b04 6452 for_each_queue(bp, i) {
8badd27a 6453 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6454 bp->msix_table[i + offset].entry = igu_vec;
6455 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6456 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6457 }
6458
34f80b04 6459 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6460 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6461 if (rc) {
8badd27a
EG
6462 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6463 return rc;
34f80b04 6464 }
8badd27a 6465
a2fbb9ea
ET
6466 bp->flags |= USING_MSIX_FLAG;
6467
6468 return 0;
a2fbb9ea
ET
6469}
6470
a2fbb9ea
ET
6471static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6472{
34f80b04 6473 int i, rc, offset = 1;
a2fbb9ea 6474
a2fbb9ea
ET
6475 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6476 bp->dev->name, bp->dev);
a2fbb9ea
ET
6477 if (rc) {
6478 BNX2X_ERR("request sp irq failed\n");
6479 return -EBUSY;
6480 }
6481
6482 for_each_queue(bp, i) {
555f6c78
EG
6483 struct bnx2x_fastpath *fp = &bp->fp[i];
6484
6485 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6486 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6487 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6488 if (rc) {
555f6c78 6489 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6490 bnx2x_free_msix_irqs(bp);
6491 return -EBUSY;
6492 }
6493
555f6c78 6494 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6495 }
6496
555f6c78
EG
6497 i = BNX2X_NUM_QUEUES(bp);
6498 if (is_multi(bp))
6499 printk(KERN_INFO PFX
6500 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6501 bp->dev->name, bp->msix_table[0].vector,
6502 bp->msix_table[offset].vector,
6503 bp->msix_table[offset + i - 1].vector);
6504 else
6505 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6506 bp->dev->name, bp->msix_table[0].vector,
6507 bp->msix_table[offset + i - 1].vector);
6508
a2fbb9ea 6509 return 0;
a2fbb9ea
ET
6510}
6511
8badd27a
EG
6512static int bnx2x_enable_msi(struct bnx2x *bp)
6513{
6514 int rc;
6515
6516 rc = pci_enable_msi(bp->pdev);
6517 if (rc) {
6518 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6519 return -1;
6520 }
6521 bp->flags |= USING_MSI_FLAG;
6522
6523 return 0;
6524}
6525
a2fbb9ea
ET
6526static int bnx2x_req_irq(struct bnx2x *bp)
6527{
8badd27a 6528 unsigned long flags;
34f80b04 6529 int rc;
a2fbb9ea 6530
8badd27a
EG
6531 if (bp->flags & USING_MSI_FLAG)
6532 flags = 0;
6533 else
6534 flags = IRQF_SHARED;
6535
6536 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6537 bp->dev->name, bp->dev);
a2fbb9ea
ET
6538 if (!rc)
6539 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6540
6541 return rc;
a2fbb9ea
ET
6542}
6543
65abd74d
YG
6544static void bnx2x_napi_enable(struct bnx2x *bp)
6545{
6546 int i;
6547
555f6c78 6548 for_each_rx_queue(bp, i)
65abd74d
YG
6549 napi_enable(&bnx2x_fp(bp, i, napi));
6550}
6551
6552static void bnx2x_napi_disable(struct bnx2x *bp)
6553{
6554 int i;
6555
555f6c78 6556 for_each_rx_queue(bp, i)
65abd74d
YG
6557 napi_disable(&bnx2x_fp(bp, i, napi));
6558}
6559
6560static void bnx2x_netif_start(struct bnx2x *bp)
6561{
6562 if (atomic_dec_and_test(&bp->intr_sem)) {
6563 if (netif_running(bp->dev)) {
65abd74d
YG
6564 bnx2x_napi_enable(bp);
6565 bnx2x_int_enable(bp);
555f6c78
EG
6566 if (bp->state == BNX2X_STATE_OPEN)
6567 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6568 }
6569 }
6570}
6571
f8ef6e44 6572static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6573{
f8ef6e44 6574 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6575 bnx2x_napi_disable(bp);
762d5f6c
EG
6576 netif_tx_disable(bp->dev);
6577 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6578}
6579
a2fbb9ea
ET
6580/*
6581 * Init service functions
6582 */
6583
3101c2bc 6584static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6585{
6586 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6587 int port = BP_PORT(bp);
a2fbb9ea
ET
6588
6589 /* CAM allocation
6590 * unicasts 0-31:port0 32-63:port1
6591 * multicast 64-127:port0 128-191:port1
6592 */
8d9c5f34 6593 config->hdr.length = 2;
af246401 6594 config->hdr.offset = port ? 32 : 0;
0626b899 6595 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6596 config->hdr.reserved1 = 0;
6597
6598 /* primary MAC */
6599 config->config_table[0].cam_entry.msb_mac_addr =
6600 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6601 config->config_table[0].cam_entry.middle_mac_addr =
6602 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6603 config->config_table[0].cam_entry.lsb_mac_addr =
6604 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6605 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6606 if (set)
6607 config->config_table[0].target_table_entry.flags = 0;
6608 else
6609 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6610 config->config_table[0].target_table_entry.client_id = 0;
6611 config->config_table[0].target_table_entry.vlan_id = 0;
6612
3101c2bc
YG
6613 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6614 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6615 config->config_table[0].cam_entry.msb_mac_addr,
6616 config->config_table[0].cam_entry.middle_mac_addr,
6617 config->config_table[0].cam_entry.lsb_mac_addr);
6618
6619 /* broadcast */
4781bfad
EG
6620 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6621 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6622 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6623 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6624 if (set)
6625 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6626 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6627 else
6628 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6629 config->config_table[1].target_table_entry.client_id = 0;
6630 config->config_table[1].target_table_entry.vlan_id = 0;
6631
6632 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6633 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6634 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6635}
6636
3101c2bc 6637static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6638{
6639 struct mac_configuration_cmd_e1h *config =
6640 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6641
3101c2bc 6642 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6643 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6644 return;
6645 }
6646
6647 /* CAM allocation for E1H
6648 * unicasts: by func number
6649 * multicast: 20+FUNC*20, 20 each
6650 */
8d9c5f34 6651 config->hdr.length = 1;
34f80b04 6652 config->hdr.offset = BP_FUNC(bp);
0626b899 6653 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6654 config->hdr.reserved1 = 0;
6655
6656 /* primary MAC */
6657 config->config_table[0].msb_mac_addr =
6658 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6659 config->config_table[0].middle_mac_addr =
6660 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6661 config->config_table[0].lsb_mac_addr =
6662 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6663 config->config_table[0].client_id = BP_L_ID(bp);
6664 config->config_table[0].vlan_id = 0;
6665 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6666 if (set)
6667 config->config_table[0].flags = BP_PORT(bp);
6668 else
6669 config->config_table[0].flags =
6670 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6671
3101c2bc
YG
6672 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6673 (set ? "setting" : "clearing"),
34f80b04
EG
6674 config->config_table[0].msb_mac_addr,
6675 config->config_table[0].middle_mac_addr,
6676 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6677
6678 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6679 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6680 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6681}
6682
a2fbb9ea
ET
6683static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6684 int *state_p, int poll)
6685{
6686 /* can take a while if any port is running */
8b3a0f0b 6687 int cnt = 5000;
a2fbb9ea 6688
c14423fe
ET
6689 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6690 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6691
6692 might_sleep();
34f80b04 6693 while (cnt--) {
a2fbb9ea
ET
6694 if (poll) {
6695 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6696 /* if index is different from 0
6697 * the reply for some commands will
3101c2bc 6698 * be on the non default queue
a2fbb9ea
ET
6699 */
6700 if (idx)
6701 bnx2x_rx_int(&bp->fp[idx], 10);
6702 }
a2fbb9ea 6703
3101c2bc 6704 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6705 if (*state_p == state) {
6706#ifdef BNX2X_STOP_ON_ERROR
6707 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6708#endif
a2fbb9ea 6709 return 0;
8b3a0f0b 6710 }
a2fbb9ea 6711
a2fbb9ea 6712 msleep(1);
a2fbb9ea
ET
6713 }
6714
a2fbb9ea 6715 /* timeout! */
49d66772
ET
6716 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6717 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6718#ifdef BNX2X_STOP_ON_ERROR
6719 bnx2x_panic();
6720#endif
a2fbb9ea 6721
49d66772 6722 return -EBUSY;
a2fbb9ea
ET
6723}
6724
6725static int bnx2x_setup_leading(struct bnx2x *bp)
6726{
34f80b04 6727 int rc;
a2fbb9ea 6728
c14423fe 6729 /* reset IGU state */
34f80b04 6730 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6731
6732 /* SETUP ramrod */
6733 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6734
34f80b04
EG
6735 /* Wait for completion */
6736 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6737
34f80b04 6738 return rc;
a2fbb9ea
ET
6739}
6740
6741static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6742{
555f6c78
EG
6743 struct bnx2x_fastpath *fp = &bp->fp[index];
6744
a2fbb9ea 6745 /* reset IGU state */
555f6c78 6746 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6747
228241eb 6748 /* SETUP ramrod */
555f6c78
EG
6749 fp->state = BNX2X_FP_STATE_OPENING;
6750 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6751 fp->cl_id, 0);
a2fbb9ea
ET
6752
6753 /* Wait for completion */
6754 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6755 &(fp->state), 0);
a2fbb9ea
ET
6756}
6757
a2fbb9ea 6758static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6759
8badd27a 6760static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6761{
555f6c78 6762 int num_queues;
a2fbb9ea 6763
8badd27a
EG
6764 switch (int_mode) {
6765 case INT_MODE_INTx:
6766 case INT_MODE_MSI:
555f6c78
EG
6767 num_queues = 1;
6768 bp->num_rx_queues = num_queues;
6769 bp->num_tx_queues = num_queues;
6770 DP(NETIF_MSG_IFUP,
6771 "set number of queues to %d\n", num_queues);
8badd27a
EG
6772 break;
6773
6774 case INT_MODE_MSIX:
6775 default:
555f6c78
EG
6776 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6777 num_queues = min_t(u32, num_online_cpus(),
6778 BNX2X_MAX_QUEUES(bp));
34f80b04 6779 else
555f6c78
EG
6780 num_queues = 1;
6781 bp->num_rx_queues = num_queues;
6782 bp->num_tx_queues = num_queues;
6783 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6784 " number of tx queues to %d\n",
6785 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6786 /* if we can't use MSI-X we only need one fp,
6787 * so try to enable MSI-X with the requested number of fp's
6788 * and fallback to MSI or legacy INTx with one fp
6789 */
8badd27a 6790 if (bnx2x_enable_msix(bp)) {
34f80b04 6791 /* failed to enable MSI-X */
555f6c78
EG
6792 num_queues = 1;
6793 bp->num_rx_queues = num_queues;
6794 bp->num_tx_queues = num_queues;
6795 if (bp->multi_mode)
6796 BNX2X_ERR("Multi requested but failed to "
6797 "enable MSI-X set number of "
6798 "queues to %d\n", num_queues);
a2fbb9ea 6799 }
8badd27a 6800 break;
a2fbb9ea 6801 }
555f6c78 6802 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6803}
6804
6805static void bnx2x_set_rx_mode(struct net_device *dev);
6806
6807/* must be called with rtnl_lock */
6808static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6809{
6810 u32 load_code;
6811 int i, rc = 0;
6812#ifdef BNX2X_STOP_ON_ERROR
6813 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6814 if (unlikely(bp->panic))
6815 return -EPERM;
6816#endif
6817
6818 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6819
6820 bnx2x_set_int_mode(bp);
c14423fe 6821
a2fbb9ea
ET
6822 if (bnx2x_alloc_mem(bp))
6823 return -ENOMEM;
6824
555f6c78 6825 for_each_rx_queue(bp, i)
7a9b2557
VZ
6826 bnx2x_fp(bp, i, disable_tpa) =
6827 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6828
555f6c78 6829 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6830 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6831 bnx2x_poll, 128);
6832
6833#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6834 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6835 struct bnx2x_fastpath *fp = &bp->fp[i];
6836
6837 fp->poll_no_work = 0;
6838 fp->poll_calls = 0;
6839 fp->poll_max_calls = 0;
6840 fp->poll_complete = 0;
6841 fp->poll_exit = 0;
6842 }
6843#endif
6844 bnx2x_napi_enable(bp);
6845
34f80b04
EG
6846 if (bp->flags & USING_MSIX_FLAG) {
6847 rc = bnx2x_req_msix_irqs(bp);
6848 if (rc) {
6849 pci_disable_msix(bp->pdev);
2dfe0e1f 6850 goto load_error1;
34f80b04
EG
6851 }
6852 } else {
8badd27a
EG
6853 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6854 bnx2x_enable_msi(bp);
34f80b04
EG
6855 bnx2x_ack_int(bp);
6856 rc = bnx2x_req_irq(bp);
6857 if (rc) {
2dfe0e1f 6858 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6859 if (bp->flags & USING_MSI_FLAG)
6860 pci_disable_msi(bp->pdev);
2dfe0e1f 6861 goto load_error1;
a2fbb9ea 6862 }
8badd27a
EG
6863 if (bp->flags & USING_MSI_FLAG) {
6864 bp->dev->irq = bp->pdev->irq;
6865 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6866 bp->dev->name, bp->pdev->irq);
6867 }
a2fbb9ea
ET
6868 }
6869
2dfe0e1f
EG
6870 /* Send LOAD_REQUEST command to MCP
6871 Returns the type of LOAD command:
6872 if it is the first port to be initialized
6873 common blocks should be initialized, otherwise - not
6874 */
6875 if (!BP_NOMCP(bp)) {
6876 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6877 if (!load_code) {
6878 BNX2X_ERR("MCP response failure, aborting\n");
6879 rc = -EBUSY;
6880 goto load_error2;
6881 }
6882 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6883 rc = -EBUSY; /* other port in diagnostic mode */
6884 goto load_error2;
6885 }
6886
6887 } else {
6888 int port = BP_PORT(bp);
6889
f5372251 6890 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6891 load_count[0], load_count[1], load_count[2]);
6892 load_count[0]++;
6893 load_count[1 + port]++;
f5372251 6894 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6895 load_count[0], load_count[1], load_count[2]);
6896 if (load_count[0] == 1)
6897 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6898 else if (load_count[1 + port] == 1)
6899 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6900 else
6901 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6902 }
6903
6904 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6905 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6906 bp->port.pmf = 1;
6907 else
6908 bp->port.pmf = 0;
6909 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6910
a2fbb9ea 6911 /* Initialize HW */
34f80b04
EG
6912 rc = bnx2x_init_hw(bp, load_code);
6913 if (rc) {
a2fbb9ea 6914 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6915 goto load_error2;
a2fbb9ea
ET
6916 }
6917
a2fbb9ea 6918 /* Setup NIC internals and enable interrupts */
471de716 6919 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6920
6921 /* Send LOAD_DONE command to MCP */
34f80b04 6922 if (!BP_NOMCP(bp)) {
228241eb
ET
6923 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6924 if (!load_code) {
da5a662a 6925 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6926 rc = -EBUSY;
2dfe0e1f 6927 goto load_error3;
a2fbb9ea
ET
6928 }
6929 }
6930
6931 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6932
34f80b04
EG
6933 rc = bnx2x_setup_leading(bp);
6934 if (rc) {
da5a662a 6935 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6936 goto load_error3;
34f80b04 6937 }
a2fbb9ea 6938
34f80b04
EG
6939 if (CHIP_IS_E1H(bp))
6940 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 6941 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
6942 bp->state = BNX2X_STATE_DISABLED;
6943 }
a2fbb9ea 6944
34f80b04
EG
6945 if (bp->state == BNX2X_STATE_OPEN)
6946 for_each_nondefault_queue(bp, i) {
6947 rc = bnx2x_setup_multi(bp, i);
6948 if (rc)
2dfe0e1f 6949 goto load_error3;
34f80b04 6950 }
a2fbb9ea 6951
34f80b04 6952 if (CHIP_IS_E1(bp))
3101c2bc 6953 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6954 else
3101c2bc 6955 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6956
6957 if (bp->port.pmf)
b5bf9068 6958 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6959
6960 /* Start fast path */
34f80b04
EG
6961 switch (load_mode) {
6962 case LOAD_NORMAL:
6963 /* Tx queue should be only reenabled */
555f6c78 6964 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6965 /* Initialize the receive filter. */
34f80b04
EG
6966 bnx2x_set_rx_mode(bp->dev);
6967 break;
6968
6969 case LOAD_OPEN:
555f6c78 6970 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6971 /* Initialize the receive filter. */
34f80b04 6972 bnx2x_set_rx_mode(bp->dev);
34f80b04 6973 break;
a2fbb9ea 6974
34f80b04 6975 case LOAD_DIAG:
2dfe0e1f 6976 /* Initialize the receive filter. */
a2fbb9ea 6977 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6978 bp->state = BNX2X_STATE_DIAG;
6979 break;
6980
6981 default:
6982 break;
a2fbb9ea
ET
6983 }
6984
34f80b04
EG
6985 if (!bp->port.pmf)
6986 bnx2x__link_status_update(bp);
6987
a2fbb9ea
ET
6988 /* start the timer */
6989 mod_timer(&bp->timer, jiffies + bp->current_interval);
6990
34f80b04 6991
a2fbb9ea
ET
6992 return 0;
6993
2dfe0e1f
EG
6994load_error3:
6995 bnx2x_int_disable_sync(bp, 1);
6996 if (!BP_NOMCP(bp)) {
6997 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6999 }
7000 bp->port.pmf = 0;
7a9b2557
VZ
7001 /* Free SKBs, SGEs, TPA pool and driver internals */
7002 bnx2x_free_skbs(bp);
555f6c78 7003 for_each_rx_queue(bp, i)
3196a88a 7004 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7005load_error2:
d1014634
YG
7006 /* Release IRQs */
7007 bnx2x_free_irq(bp);
2dfe0e1f
EG
7008load_error1:
7009 bnx2x_napi_disable(bp);
555f6c78 7010 for_each_rx_queue(bp, i)
7cde1c8b 7011 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7012 bnx2x_free_mem(bp);
7013
34f80b04 7014 return rc;
a2fbb9ea
ET
7015}
7016
7017static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7018{
555f6c78 7019 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7020 int rc;
7021
c14423fe 7022 /* halt the connection */
555f6c78
EG
7023 fp->state = BNX2X_FP_STATE_HALTING;
7024 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7025
34f80b04 7026 /* Wait for completion */
a2fbb9ea 7027 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7028 &(fp->state), 1);
c14423fe 7029 if (rc) /* timeout */
a2fbb9ea
ET
7030 return rc;
7031
7032 /* delete cfc entry */
7033 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7034
34f80b04
EG
7035 /* Wait for completion */
7036 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7037 &(fp->state), 1);
34f80b04 7038 return rc;
a2fbb9ea
ET
7039}
7040
da5a662a 7041static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7042{
4781bfad 7043 __le16 dsb_sp_prod_idx;
c14423fe 7044 /* if the other port is handling traffic,
a2fbb9ea 7045 this can take a lot of time */
34f80b04
EG
7046 int cnt = 500;
7047 int rc;
a2fbb9ea
ET
7048
7049 might_sleep();
7050
7051 /* Send HALT ramrod */
7052 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7053 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7054
34f80b04
EG
7055 /* Wait for completion */
7056 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7057 &(bp->fp[0].state), 1);
7058 if (rc) /* timeout */
da5a662a 7059 return rc;
a2fbb9ea 7060
49d66772 7061 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7062
228241eb 7063 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7064 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7065
49d66772 7066 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7067 we are going to reset the chip anyway
7068 so there is not much to do if this times out
7069 */
34f80b04 7070 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7071 if (!cnt) {
7072 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7073 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7074 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7075#ifdef BNX2X_STOP_ON_ERROR
7076 bnx2x_panic();
7077#endif
36e552ab 7078 rc = -EBUSY;
34f80b04
EG
7079 break;
7080 }
7081 cnt--;
da5a662a 7082 msleep(1);
5650d9d4 7083 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7084 }
7085 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7086 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7087
7088 return rc;
a2fbb9ea
ET
7089}
7090
34f80b04
EG
7091static void bnx2x_reset_func(struct bnx2x *bp)
7092{
7093 int port = BP_PORT(bp);
7094 int func = BP_FUNC(bp);
7095 int base, i;
7096
7097 /* Configure IGU */
7098 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7099 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7100
34f80b04
EG
7101 /* Clear ILT */
7102 base = FUNC_ILT_BASE(func);
7103 for (i = base; i < base + ILT_PER_FUNC; i++)
7104 bnx2x_ilt_wr(bp, i, 0);
7105}
7106
7107static void bnx2x_reset_port(struct bnx2x *bp)
7108{
7109 int port = BP_PORT(bp);
7110 u32 val;
7111
7112 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7113
7114 /* Do not rcv packets to BRB */
7115 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7116 /* Do not direct rcv packets that are not for MCP to the BRB */
7117 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7118 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7119
7120 /* Configure AEU */
7121 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7122
7123 msleep(100);
7124 /* Check for BRB port occupancy */
7125 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7126 if (val)
7127 DP(NETIF_MSG_IFDOWN,
33471629 7128 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7129
7130 /* TODO: Close Doorbell port? */
7131}
7132
34f80b04
EG
7133static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7134{
7135 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7136 BP_FUNC(bp), reset_code);
7137
7138 switch (reset_code) {
7139 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7140 bnx2x_reset_port(bp);
7141 bnx2x_reset_func(bp);
7142 bnx2x_reset_common(bp);
7143 break;
7144
7145 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7146 bnx2x_reset_port(bp);
7147 bnx2x_reset_func(bp);
7148 break;
7149
7150 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7151 bnx2x_reset_func(bp);
7152 break;
49d66772 7153
34f80b04
EG
7154 default:
7155 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7156 break;
7157 }
7158}
7159
33471629 7160/* must be called with rtnl_lock */
34f80b04 7161static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7162{
da5a662a 7163 int port = BP_PORT(bp);
a2fbb9ea 7164 u32 reset_code = 0;
da5a662a 7165 int i, cnt, rc;
a2fbb9ea
ET
7166
7167 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7168
228241eb
ET
7169 bp->rx_mode = BNX2X_RX_MODE_NONE;
7170 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7171
f8ef6e44 7172 bnx2x_netif_stop(bp, 1);
e94d8af3 7173
34f80b04
EG
7174 del_timer_sync(&bp->timer);
7175 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7176 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7177 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7178
70b9986c
EG
7179 /* Release IRQs */
7180 bnx2x_free_irq(bp);
7181
555f6c78
EG
7182 /* Wait until tx fastpath tasks complete */
7183 for_each_tx_queue(bp, i) {
228241eb
ET
7184 struct bnx2x_fastpath *fp = &bp->fp[i];
7185
34f80b04 7186 cnt = 1000;
e8b5fc51 7187 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7188
7961f791 7189 bnx2x_tx_int(fp);
34f80b04
EG
7190 if (!cnt) {
7191 BNX2X_ERR("timeout waiting for queue[%d]\n",
7192 i);
7193#ifdef BNX2X_STOP_ON_ERROR
7194 bnx2x_panic();
7195 return -EBUSY;
7196#else
7197 break;
7198#endif
7199 }
7200 cnt--;
da5a662a 7201 msleep(1);
34f80b04 7202 }
228241eb 7203 }
da5a662a
VZ
7204 /* Give HW time to discard old tx messages */
7205 msleep(1);
a2fbb9ea 7206
3101c2bc
YG
7207 if (CHIP_IS_E1(bp)) {
7208 struct mac_configuration_cmd *config =
7209 bnx2x_sp(bp, mcast_config);
7210
7211 bnx2x_set_mac_addr_e1(bp, 0);
7212
8d9c5f34 7213 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7214 CAM_INVALIDATE(config->config_table[i]);
7215
8d9c5f34 7216 config->hdr.length = i;
3101c2bc
YG
7217 if (CHIP_REV_IS_SLOW(bp))
7218 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7219 else
7220 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7221 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7222 config->hdr.reserved1 = 0;
7223
7224 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7225 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7226 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7227
7228 } else { /* E1H */
65abd74d
YG
7229 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7230
3101c2bc
YG
7231 bnx2x_set_mac_addr_e1h(bp, 0);
7232
7233 for (i = 0; i < MC_HASH_SIZE; i++)
7234 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7235 }
7236
65abd74d
YG
7237 if (unload_mode == UNLOAD_NORMAL)
7238 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7239
7240 else if (bp->flags & NO_WOL_FLAG) {
7241 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7242 if (CHIP_IS_E1H(bp))
7243 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7244
7245 } else if (bp->wol) {
7246 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7247 u8 *mac_addr = bp->dev->dev_addr;
7248 u32 val;
7249 /* The mac address is written to entries 1-4 to
7250 preserve entry 0 which is used by the PMF */
7251 u8 entry = (BP_E1HVN(bp) + 1)*8;
7252
7253 val = (mac_addr[0] << 8) | mac_addr[1];
7254 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7255
7256 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7257 (mac_addr[4] << 8) | mac_addr[5];
7258 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7259
7260 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7261
7262 } else
7263 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7264
34f80b04
EG
7265 /* Close multi and leading connections
7266 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7267 for_each_nondefault_queue(bp, i)
7268 if (bnx2x_stop_multi(bp, i))
228241eb 7269 goto unload_error;
a2fbb9ea 7270
da5a662a
VZ
7271 rc = bnx2x_stop_leading(bp);
7272 if (rc) {
34f80b04 7273 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7274#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7275 return -EBUSY;
da5a662a
VZ
7276#else
7277 goto unload_error;
34f80b04 7278#endif
228241eb
ET
7279 }
7280
7281unload_error:
34f80b04 7282 if (!BP_NOMCP(bp))
228241eb 7283 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7284 else {
f5372251 7285 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7286 load_count[0], load_count[1], load_count[2]);
7287 load_count[0]--;
da5a662a 7288 load_count[1 + port]--;
f5372251 7289 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7290 load_count[0], load_count[1], load_count[2]);
7291 if (load_count[0] == 0)
7292 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7293 else if (load_count[1 + port] == 0)
34f80b04
EG
7294 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7295 else
7296 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7297 }
a2fbb9ea 7298
34f80b04
EG
7299 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7300 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7301 bnx2x__link_reset(bp);
a2fbb9ea
ET
7302
7303 /* Reset the chip */
228241eb 7304 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7305
7306 /* Report UNLOAD_DONE to MCP */
34f80b04 7307 if (!BP_NOMCP(bp))
a2fbb9ea 7308 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7309
9a035440 7310 bp->port.pmf = 0;
a2fbb9ea 7311
7a9b2557 7312 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7313 bnx2x_free_skbs(bp);
555f6c78 7314 for_each_rx_queue(bp, i)
3196a88a 7315 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7316 for_each_rx_queue(bp, i)
7cde1c8b 7317 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7318 bnx2x_free_mem(bp);
7319
7320 bp->state = BNX2X_STATE_CLOSED;
228241eb 7321
a2fbb9ea
ET
7322 netif_carrier_off(bp->dev);
7323
7324 return 0;
7325}
7326
34f80b04
EG
7327static void bnx2x_reset_task(struct work_struct *work)
7328{
7329 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7330
7331#ifdef BNX2X_STOP_ON_ERROR
7332 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7333 " so reset not done to allow debug dump,\n"
7334 KERN_ERR " you will need to reboot when done\n");
7335 return;
7336#endif
7337
7338 rtnl_lock();
7339
7340 if (!netif_running(bp->dev))
7341 goto reset_task_exit;
7342
7343 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7344 bnx2x_nic_load(bp, LOAD_NORMAL);
7345
7346reset_task_exit:
7347 rtnl_unlock();
7348}
7349
a2fbb9ea
ET
7350/* end of nic load/unload */
7351
7352/* ethtool_ops */
7353
7354/*
7355 * Init service functions
7356 */
7357
f1ef27ef
EG
7358static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7359{
7360 switch (func) {
7361 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7362 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7363 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7364 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7365 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7366 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7367 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7368 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7369 default:
7370 BNX2X_ERR("Unsupported function index: %d\n", func);
7371 return (u32)(-1);
7372 }
7373}
7374
7375static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7376{
7377 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7378
7379 /* Flush all outstanding writes */
7380 mmiowb();
7381
7382 /* Pretend to be function 0 */
7383 REG_WR(bp, reg, 0);
7384 /* Flush the GRC transaction (in the chip) */
7385 new_val = REG_RD(bp, reg);
7386 if (new_val != 0) {
7387 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7388 new_val);
7389 BUG();
7390 }
7391
7392 /* From now we are in the "like-E1" mode */
7393 bnx2x_int_disable(bp);
7394
7395 /* Flush all outstanding writes */
7396 mmiowb();
7397
7398 /* Restore the original funtion settings */
7399 REG_WR(bp, reg, orig_func);
7400 new_val = REG_RD(bp, reg);
7401 if (new_val != orig_func) {
7402 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7403 orig_func, new_val);
7404 BUG();
7405 }
7406}
7407
7408static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7409{
7410 if (CHIP_IS_E1H(bp))
7411 bnx2x_undi_int_disable_e1h(bp, func);
7412 else
7413 bnx2x_int_disable(bp);
7414}
7415
34f80b04
EG
7416static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7417{
7418 u32 val;
7419
7420 /* Check if there is any driver already loaded */
7421 val = REG_RD(bp, MISC_REG_UNPREPARED);
7422 if (val == 0x1) {
7423 /* Check if it is the UNDI driver
7424 * UNDI driver initializes CID offset for normal bell to 0x7
7425 */
4a37fb66 7426 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7427 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7428 if (val == 0x7) {
7429 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7430 /* save our func */
34f80b04 7431 int func = BP_FUNC(bp);
da5a662a
VZ
7432 u32 swap_en;
7433 u32 swap_val;
34f80b04 7434
b4661739
EG
7435 /* clear the UNDI indication */
7436 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7437
34f80b04
EG
7438 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7439
7440 /* try unload UNDI on port 0 */
7441 bp->func = 0;
da5a662a
VZ
7442 bp->fw_seq =
7443 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7444 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7445 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7446
7447 /* if UNDI is loaded on the other port */
7448 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7449
da5a662a
VZ
7450 /* send "DONE" for previous unload */
7451 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7452
7453 /* unload UNDI on port 1 */
34f80b04 7454 bp->func = 1;
da5a662a
VZ
7455 bp->fw_seq =
7456 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7457 DRV_MSG_SEQ_NUMBER_MASK);
7458 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7459
7460 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7461 }
7462
b4661739
EG
7463 /* now it's safe to release the lock */
7464 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7465
f1ef27ef 7466 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7467
7468 /* close input traffic and wait for it */
7469 /* Do not rcv packets to BRB */
7470 REG_WR(bp,
7471 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7472 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7473 /* Do not direct rcv packets that are not for MCP to
7474 * the BRB */
7475 REG_WR(bp,
7476 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7477 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7478 /* clear AEU */
7479 REG_WR(bp,
7480 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7481 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7482 msleep(10);
7483
7484 /* save NIG port swap info */
7485 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7486 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7487 /* reset device */
7488 REG_WR(bp,
7489 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7490 0xd3ffffff);
34f80b04
EG
7491 REG_WR(bp,
7492 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7493 0x1403);
da5a662a
VZ
7494 /* take the NIG out of reset and restore swap values */
7495 REG_WR(bp,
7496 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7497 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7498 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7499 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7500
7501 /* send unload done to the MCP */
7502 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7503
7504 /* restore our func and fw_seq */
7505 bp->func = func;
7506 bp->fw_seq =
7507 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7508 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7509
7510 } else
7511 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7512 }
7513}
7514
7515static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7516{
7517 u32 val, val2, val3, val4, id;
72ce58c3 7518 u16 pmc;
34f80b04
EG
7519
7520 /* Get the chip revision id and number. */
7521 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7522 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7523 id = ((val & 0xffff) << 16);
7524 val = REG_RD(bp, MISC_REG_CHIP_REV);
7525 id |= ((val & 0xf) << 12);
7526 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7527 id |= ((val & 0xff) << 4);
5a40e08e 7528 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7529 id |= (val & 0xf);
7530 bp->common.chip_id = id;
7531 bp->link_params.chip_id = bp->common.chip_id;
7532 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7533
1c06328c
EG
7534 val = (REG_RD(bp, 0x2874) & 0x55);
7535 if ((bp->common.chip_id & 0x1) ||
7536 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7537 bp->flags |= ONE_PORT_FLAG;
7538 BNX2X_DEV_INFO("single port device\n");
7539 }
7540
34f80b04
EG
7541 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7542 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7543 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7544 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7545 bp->common.flash_size, bp->common.flash_size);
7546
7547 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7548 bp->link_params.shmem_base = bp->common.shmem_base;
7549 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7550
7551 if (!bp->common.shmem_base ||
7552 (bp->common.shmem_base < 0xA0000) ||
7553 (bp->common.shmem_base >= 0xC0000)) {
7554 BNX2X_DEV_INFO("MCP not active\n");
7555 bp->flags |= NO_MCP_FLAG;
7556 return;
7557 }
7558
7559 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7560 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7561 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7562 BNX2X_ERR("BAD MCP validity signature\n");
7563
7564 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7565 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7566
7567 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7568 SHARED_HW_CFG_LED_MODE_MASK) >>
7569 SHARED_HW_CFG_LED_MODE_SHIFT);
7570
c2c8b03e
EG
7571 bp->link_params.feature_config_flags = 0;
7572 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7573 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7574 bp->link_params.feature_config_flags |=
7575 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7576 else
7577 bp->link_params.feature_config_flags &=
7578 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7579
34f80b04
EG
7580 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7581 bp->common.bc_ver = val;
7582 BNX2X_DEV_INFO("bc_ver %X\n", val);
7583 if (val < BNX2X_BC_VER) {
7584 /* for now only warn
7585 * later we might need to enforce this */
7586 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7587 " please upgrade BC\n", BNX2X_BC_VER, val);
7588 }
72ce58c3
EG
7589
7590 if (BP_E1HVN(bp) == 0) {
7591 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7592 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7593 } else {
7594 /* no WOL capability for E1HVN != 0 */
7595 bp->flags |= NO_WOL_FLAG;
7596 }
7597 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7598 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7599
7600 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7601 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7602 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7603 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7604
7605 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7606 val, val2, val3, val4);
7607}
7608
7609static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7610 u32 switch_cfg)
a2fbb9ea 7611{
34f80b04 7612 int port = BP_PORT(bp);
a2fbb9ea
ET
7613 u32 ext_phy_type;
7614
a2fbb9ea
ET
7615 switch (switch_cfg) {
7616 case SWITCH_CFG_1G:
7617 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7618
c18487ee
YR
7619 ext_phy_type =
7620 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7621 switch (ext_phy_type) {
7622 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7623 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7624 ext_phy_type);
7625
34f80b04
EG
7626 bp->port.supported |= (SUPPORTED_10baseT_Half |
7627 SUPPORTED_10baseT_Full |
7628 SUPPORTED_100baseT_Half |
7629 SUPPORTED_100baseT_Full |
7630 SUPPORTED_1000baseT_Full |
7631 SUPPORTED_2500baseX_Full |
7632 SUPPORTED_TP |
7633 SUPPORTED_FIBRE |
7634 SUPPORTED_Autoneg |
7635 SUPPORTED_Pause |
7636 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7637 break;
7638
7639 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7640 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7641 ext_phy_type);
7642
34f80b04
EG
7643 bp->port.supported |= (SUPPORTED_10baseT_Half |
7644 SUPPORTED_10baseT_Full |
7645 SUPPORTED_100baseT_Half |
7646 SUPPORTED_100baseT_Full |
7647 SUPPORTED_1000baseT_Full |
7648 SUPPORTED_TP |
7649 SUPPORTED_FIBRE |
7650 SUPPORTED_Autoneg |
7651 SUPPORTED_Pause |
7652 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7653 break;
7654
7655 default:
7656 BNX2X_ERR("NVRAM config error. "
7657 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7658 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7659 return;
7660 }
7661
34f80b04
EG
7662 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7663 port*0x10);
7664 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7665 break;
7666
7667 case SWITCH_CFG_10G:
7668 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7669
c18487ee
YR
7670 ext_phy_type =
7671 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7672 switch (ext_phy_type) {
7673 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7674 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7675 ext_phy_type);
7676
34f80b04
EG
7677 bp->port.supported |= (SUPPORTED_10baseT_Half |
7678 SUPPORTED_10baseT_Full |
7679 SUPPORTED_100baseT_Half |
7680 SUPPORTED_100baseT_Full |
7681 SUPPORTED_1000baseT_Full |
7682 SUPPORTED_2500baseX_Full |
7683 SUPPORTED_10000baseT_Full |
7684 SUPPORTED_TP |
7685 SUPPORTED_FIBRE |
7686 SUPPORTED_Autoneg |
7687 SUPPORTED_Pause |
7688 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7689 break;
7690
589abe3a
EG
7691 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7692 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7693 ext_phy_type);
f1410647 7694
34f80b04 7695 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7696 SUPPORTED_1000baseT_Full |
34f80b04 7697 SUPPORTED_FIBRE |
589abe3a 7698 SUPPORTED_Autoneg |
34f80b04
EG
7699 SUPPORTED_Pause |
7700 SUPPORTED_Asym_Pause);
f1410647
ET
7701 break;
7702
589abe3a
EG
7703 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7704 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7705 ext_phy_type);
7706
34f80b04 7707 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7708 SUPPORTED_2500baseX_Full |
34f80b04 7709 SUPPORTED_1000baseT_Full |
589abe3a
EG
7710 SUPPORTED_FIBRE |
7711 SUPPORTED_Autoneg |
7712 SUPPORTED_Pause |
7713 SUPPORTED_Asym_Pause);
7714 break;
7715
7716 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7717 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7718 ext_phy_type);
7719
7720 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7721 SUPPORTED_FIBRE |
7722 SUPPORTED_Pause |
7723 SUPPORTED_Asym_Pause);
f1410647
ET
7724 break;
7725
589abe3a
EG
7726 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7727 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7728 ext_phy_type);
7729
34f80b04
EG
7730 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7731 SUPPORTED_1000baseT_Full |
7732 SUPPORTED_FIBRE |
34f80b04
EG
7733 SUPPORTED_Pause |
7734 SUPPORTED_Asym_Pause);
f1410647
ET
7735 break;
7736
589abe3a
EG
7737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7738 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7739 ext_phy_type);
7740
34f80b04 7741 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7742 SUPPORTED_1000baseT_Full |
34f80b04 7743 SUPPORTED_Autoneg |
589abe3a 7744 SUPPORTED_FIBRE |
34f80b04
EG
7745 SUPPORTED_Pause |
7746 SUPPORTED_Asym_Pause);
c18487ee
YR
7747 break;
7748
f1410647
ET
7749 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7750 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7751 ext_phy_type);
7752
34f80b04
EG
7753 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7754 SUPPORTED_TP |
7755 SUPPORTED_Autoneg |
7756 SUPPORTED_Pause |
7757 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7758 break;
7759
28577185
EG
7760 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7761 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7762 ext_phy_type);
7763
7764 bp->port.supported |= (SUPPORTED_10baseT_Half |
7765 SUPPORTED_10baseT_Full |
7766 SUPPORTED_100baseT_Half |
7767 SUPPORTED_100baseT_Full |
7768 SUPPORTED_1000baseT_Full |
7769 SUPPORTED_10000baseT_Full |
7770 SUPPORTED_TP |
7771 SUPPORTED_Autoneg |
7772 SUPPORTED_Pause |
7773 SUPPORTED_Asym_Pause);
7774 break;
7775
c18487ee
YR
7776 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7777 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7778 bp->link_params.ext_phy_config);
7779 break;
7780
a2fbb9ea
ET
7781 default:
7782 BNX2X_ERR("NVRAM config error. "
7783 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7784 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7785 return;
7786 }
7787
34f80b04
EG
7788 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7789 port*0x18);
7790 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7791
a2fbb9ea
ET
7792 break;
7793
7794 default:
7795 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7796 bp->port.link_config);
a2fbb9ea
ET
7797 return;
7798 }
34f80b04 7799 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7800
7801 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7802 if (!(bp->link_params.speed_cap_mask &
7803 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7804 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7805
c18487ee
YR
7806 if (!(bp->link_params.speed_cap_mask &
7807 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7808 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7809
c18487ee
YR
7810 if (!(bp->link_params.speed_cap_mask &
7811 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7812 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7813
c18487ee
YR
7814 if (!(bp->link_params.speed_cap_mask &
7815 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7816 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7817
c18487ee
YR
7818 if (!(bp->link_params.speed_cap_mask &
7819 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7820 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7821 SUPPORTED_1000baseT_Full);
a2fbb9ea 7822
c18487ee
YR
7823 if (!(bp->link_params.speed_cap_mask &
7824 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7825 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7826
c18487ee
YR
7827 if (!(bp->link_params.speed_cap_mask &
7828 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7829 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7830
34f80b04 7831 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7832}
7833
34f80b04 7834static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7835{
c18487ee 7836 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7837
34f80b04 7838 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7839 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7840 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7841 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7842 bp->port.advertising = bp->port.supported;
a2fbb9ea 7843 } else {
c18487ee
YR
7844 u32 ext_phy_type =
7845 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7846
7847 if ((ext_phy_type ==
7848 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7849 (ext_phy_type ==
7850 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7851 /* force 10G, no AN */
c18487ee 7852 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7853 bp->port.advertising =
a2fbb9ea
ET
7854 (ADVERTISED_10000baseT_Full |
7855 ADVERTISED_FIBRE);
7856 break;
7857 }
7858 BNX2X_ERR("NVRAM config error. "
7859 "Invalid link_config 0x%x"
7860 " Autoneg not supported\n",
34f80b04 7861 bp->port.link_config);
a2fbb9ea
ET
7862 return;
7863 }
7864 break;
7865
7866 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7867 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7868 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7869 bp->port.advertising = (ADVERTISED_10baseT_Full |
7870 ADVERTISED_TP);
a2fbb9ea
ET
7871 } else {
7872 BNX2X_ERR("NVRAM config error. "
7873 "Invalid link_config 0x%x"
7874 " speed_cap_mask 0x%x\n",
34f80b04 7875 bp->port.link_config,
c18487ee 7876 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7877 return;
7878 }
7879 break;
7880
7881 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7882 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7883 bp->link_params.req_line_speed = SPEED_10;
7884 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7885 bp->port.advertising = (ADVERTISED_10baseT_Half |
7886 ADVERTISED_TP);
a2fbb9ea
ET
7887 } else {
7888 BNX2X_ERR("NVRAM config error. "
7889 "Invalid link_config 0x%x"
7890 " speed_cap_mask 0x%x\n",
34f80b04 7891 bp->port.link_config,
c18487ee 7892 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7893 return;
7894 }
7895 break;
7896
7897 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7898 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7899 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7900 bp->port.advertising = (ADVERTISED_100baseT_Full |
7901 ADVERTISED_TP);
a2fbb9ea
ET
7902 } else {
7903 BNX2X_ERR("NVRAM config error. "
7904 "Invalid link_config 0x%x"
7905 " speed_cap_mask 0x%x\n",
34f80b04 7906 bp->port.link_config,
c18487ee 7907 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7908 return;
7909 }
7910 break;
7911
7912 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7913 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7914 bp->link_params.req_line_speed = SPEED_100;
7915 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7916 bp->port.advertising = (ADVERTISED_100baseT_Half |
7917 ADVERTISED_TP);
a2fbb9ea
ET
7918 } else {
7919 BNX2X_ERR("NVRAM config error. "
7920 "Invalid link_config 0x%x"
7921 " speed_cap_mask 0x%x\n",
34f80b04 7922 bp->port.link_config,
c18487ee 7923 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7924 return;
7925 }
7926 break;
7927
7928 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7929 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7930 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7931 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7932 ADVERTISED_TP);
a2fbb9ea
ET
7933 } else {
7934 BNX2X_ERR("NVRAM config error. "
7935 "Invalid link_config 0x%x"
7936 " speed_cap_mask 0x%x\n",
34f80b04 7937 bp->port.link_config,
c18487ee 7938 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7939 return;
7940 }
7941 break;
7942
7943 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7944 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7945 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7946 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7947 ADVERTISED_TP);
a2fbb9ea
ET
7948 } else {
7949 BNX2X_ERR("NVRAM config error. "
7950 "Invalid link_config 0x%x"
7951 " speed_cap_mask 0x%x\n",
34f80b04 7952 bp->port.link_config,
c18487ee 7953 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7954 return;
7955 }
7956 break;
7957
7958 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7959 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7960 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7961 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7962 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7963 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7964 ADVERTISED_FIBRE);
a2fbb9ea
ET
7965 } else {
7966 BNX2X_ERR("NVRAM config error. "
7967 "Invalid link_config 0x%x"
7968 " speed_cap_mask 0x%x\n",
34f80b04 7969 bp->port.link_config,
c18487ee 7970 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7971 return;
7972 }
7973 break;
7974
7975 default:
7976 BNX2X_ERR("NVRAM config error. "
7977 "BAD link speed link_config 0x%x\n",
34f80b04 7978 bp->port.link_config);
c18487ee 7979 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7980 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7981 break;
7982 }
a2fbb9ea 7983
34f80b04
EG
7984 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7985 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7986 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7987 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7988 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7989
c18487ee 7990 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7991 " advertising 0x%x\n",
c18487ee
YR
7992 bp->link_params.req_line_speed,
7993 bp->link_params.req_duplex,
34f80b04 7994 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7995}
7996
34f80b04 7997static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7998{
34f80b04
EG
7999 int port = BP_PORT(bp);
8000 u32 val, val2;
589abe3a 8001 u32 config;
c2c8b03e 8002 u16 i;
a2fbb9ea 8003
c18487ee 8004 bp->link_params.bp = bp;
34f80b04 8005 bp->link_params.port = port;
c18487ee 8006
c18487ee 8007 bp->link_params.lane_config =
a2fbb9ea 8008 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8009 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8010 SHMEM_RD(bp,
8011 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8012 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8013 SHMEM_RD(bp,
8014 dev_info.port_hw_config[port].speed_capability_mask);
8015
34f80b04 8016 bp->port.link_config =
a2fbb9ea
ET
8017 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8018
c2c8b03e
EG
8019 /* Get the 4 lanes xgxs config rx and tx */
8020 for (i = 0; i < 2; i++) {
8021 val = SHMEM_RD(bp,
8022 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8023 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8024 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8025
8026 val = SHMEM_RD(bp,
8027 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8028 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8029 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8030 }
8031
589abe3a
EG
8032 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8033 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8034 bp->link_params.feature_config_flags |=
8035 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8036 else
8037 bp->link_params.feature_config_flags &=
8038 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8039
3ce2c3f9
EG
8040 /* If the device is capable of WoL, set the default state according
8041 * to the HW
8042 */
8043 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8044 (config & PORT_FEATURE_WOL_ENABLED));
8045
c2c8b03e
EG
8046 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8047 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8048 bp->link_params.lane_config,
8049 bp->link_params.ext_phy_config,
34f80b04 8050 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8051
34f80b04 8052 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8053 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8054 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8055
8056 bnx2x_link_settings_requested(bp);
8057
8058 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8059 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8060 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8061 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8062 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8063 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8064 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8065 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8066 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8067 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8068}
8069
8070static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8071{
8072 int func = BP_FUNC(bp);
8073 u32 val, val2;
8074 int rc = 0;
a2fbb9ea 8075
34f80b04 8076 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8077
34f80b04
EG
8078 bp->e1hov = 0;
8079 bp->e1hmf = 0;
8080 if (CHIP_IS_E1H(bp)) {
8081 bp->mf_config =
8082 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8083
3196a88a
EG
8084 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8085 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8086 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8087
34f80b04
EG
8088 bp->e1hov = val;
8089 bp->e1hmf = 1;
8090 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8091 "(0x%04x)\n",
8092 func, bp->e1hov, bp->e1hov);
8093 } else {
f5372251 8094 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8095 if (BP_E1HVN(bp)) {
8096 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8097 " aborting\n", func);
8098 rc = -EPERM;
8099 }
8100 }
8101 }
a2fbb9ea 8102
34f80b04
EG
8103 if (!BP_NOMCP(bp)) {
8104 bnx2x_get_port_hwinfo(bp);
8105
8106 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8107 DRV_MSG_SEQ_NUMBER_MASK);
8108 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8109 }
8110
8111 if (IS_E1HMF(bp)) {
8112 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8113 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8114 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8115 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8116 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8117 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8118 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8119 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8120 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8121 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8122 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8123 ETH_ALEN);
8124 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8125 ETH_ALEN);
a2fbb9ea 8126 }
34f80b04
EG
8127
8128 return rc;
a2fbb9ea
ET
8129 }
8130
34f80b04
EG
8131 if (BP_NOMCP(bp)) {
8132 /* only supposed to happen on emulation/FPGA */
33471629 8133 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8134 random_ether_addr(bp->dev->dev_addr);
8135 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8136 }
a2fbb9ea 8137
34f80b04
EG
8138 return rc;
8139}
8140
8141static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8142{
8143 int func = BP_FUNC(bp);
87942b46 8144 int timer_interval;
34f80b04
EG
8145 int rc;
8146
da5a662a
VZ
8147 /* Disable interrupt handling until HW is initialized */
8148 atomic_set(&bp->intr_sem, 1);
8149
34f80b04 8150 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8151
1cf167f2 8152 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8153 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8154
8155 rc = bnx2x_get_hwinfo(bp);
8156
8157 /* need to reset chip if undi was active */
8158 if (!BP_NOMCP(bp))
8159 bnx2x_undi_unload(bp);
8160
8161 if (CHIP_REV_IS_FPGA(bp))
8162 printk(KERN_ERR PFX "FPGA detected\n");
8163
8164 if (BP_NOMCP(bp) && (func == 0))
8165 printk(KERN_ERR PFX
8166 "MCP disabled, must load devices in order!\n");
8167
555f6c78 8168 /* Set multi queue mode */
8badd27a
EG
8169 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8170 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8171 printk(KERN_ERR PFX
8badd27a 8172 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8173 multi_mode = ETH_RSS_MODE_DISABLED;
8174 }
8175 bp->multi_mode = multi_mode;
8176
8177
7a9b2557
VZ
8178 /* Set TPA flags */
8179 if (disable_tpa) {
8180 bp->flags &= ~TPA_ENABLE_FLAG;
8181 bp->dev->features &= ~NETIF_F_LRO;
8182 } else {
8183 bp->flags |= TPA_ENABLE_FLAG;
8184 bp->dev->features |= NETIF_F_LRO;
8185 }
8186
8d5726c4 8187 bp->mrrs = mrrs;
7a9b2557 8188
34f80b04
EG
8189 bp->tx_ring_size = MAX_TX_AVAIL;
8190 bp->rx_ring_size = MAX_RX_AVAIL;
8191
8192 bp->rx_csum = 1;
34f80b04
EG
8193
8194 bp->tx_ticks = 50;
8195 bp->rx_ticks = 25;
8196
87942b46
EG
8197 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8198 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8199
8200 init_timer(&bp->timer);
8201 bp->timer.expires = jiffies + bp->current_interval;
8202 bp->timer.data = (unsigned long) bp;
8203 bp->timer.function = bnx2x_timer;
8204
8205 return rc;
a2fbb9ea
ET
8206}
8207
8208/*
8209 * ethtool service functions
8210 */
8211
8212/* All ethtool functions called with rtnl_lock */
8213
8214static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8215{
8216 struct bnx2x *bp = netdev_priv(dev);
8217
34f80b04
EG
8218 cmd->supported = bp->port.supported;
8219 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8220
8221 if (netif_carrier_ok(dev)) {
c18487ee
YR
8222 cmd->speed = bp->link_vars.line_speed;
8223 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8224 } else {
c18487ee
YR
8225 cmd->speed = bp->link_params.req_line_speed;
8226 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8227 }
34f80b04
EG
8228 if (IS_E1HMF(bp)) {
8229 u16 vn_max_rate;
8230
8231 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8232 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8233 if (vn_max_rate < cmd->speed)
8234 cmd->speed = vn_max_rate;
8235 }
a2fbb9ea 8236
c18487ee
YR
8237 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8238 u32 ext_phy_type =
8239 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8240
8241 switch (ext_phy_type) {
8242 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8244 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8245 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8246 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8247 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8248 cmd->port = PORT_FIBRE;
8249 break;
8250
8251 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8252 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8253 cmd->port = PORT_TP;
8254 break;
8255
c18487ee
YR
8256 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8257 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8258 bp->link_params.ext_phy_config);
8259 break;
8260
f1410647
ET
8261 default:
8262 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8263 bp->link_params.ext_phy_config);
8264 break;
f1410647
ET
8265 }
8266 } else
a2fbb9ea 8267 cmd->port = PORT_TP;
a2fbb9ea 8268
34f80b04 8269 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8270 cmd->transceiver = XCVR_INTERNAL;
8271
c18487ee 8272 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8273 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8274 else
a2fbb9ea 8275 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8276
8277 cmd->maxtxpkt = 0;
8278 cmd->maxrxpkt = 0;
8279
8280 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8281 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8282 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8283 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8284 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8285 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8286 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8287
8288 return 0;
8289}
8290
8291static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8292{
8293 struct bnx2x *bp = netdev_priv(dev);
8294 u32 advertising;
8295
34f80b04
EG
8296 if (IS_E1HMF(bp))
8297 return 0;
8298
a2fbb9ea
ET
8299 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8300 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8301 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8302 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8303 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8304 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8305 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8306
a2fbb9ea 8307 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8308 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8309 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8310 return -EINVAL;
f1410647 8311 }
a2fbb9ea
ET
8312
8313 /* advertise the requested speed and duplex if supported */
34f80b04 8314 cmd->advertising &= bp->port.supported;
a2fbb9ea 8315
c18487ee
YR
8316 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8317 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8318 bp->port.advertising |= (ADVERTISED_Autoneg |
8319 cmd->advertising);
a2fbb9ea
ET
8320
8321 } else { /* forced speed */
8322 /* advertise the requested speed and duplex if supported */
8323 switch (cmd->speed) {
8324 case SPEED_10:
8325 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8326 if (!(bp->port.supported &
f1410647
ET
8327 SUPPORTED_10baseT_Full)) {
8328 DP(NETIF_MSG_LINK,
8329 "10M full not supported\n");
a2fbb9ea 8330 return -EINVAL;
f1410647 8331 }
a2fbb9ea
ET
8332
8333 advertising = (ADVERTISED_10baseT_Full |
8334 ADVERTISED_TP);
8335 } else {
34f80b04 8336 if (!(bp->port.supported &
f1410647
ET
8337 SUPPORTED_10baseT_Half)) {
8338 DP(NETIF_MSG_LINK,
8339 "10M half not supported\n");
a2fbb9ea 8340 return -EINVAL;
f1410647 8341 }
a2fbb9ea
ET
8342
8343 advertising = (ADVERTISED_10baseT_Half |
8344 ADVERTISED_TP);
8345 }
8346 break;
8347
8348 case SPEED_100:
8349 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8350 if (!(bp->port.supported &
f1410647
ET
8351 SUPPORTED_100baseT_Full)) {
8352 DP(NETIF_MSG_LINK,
8353 "100M full not supported\n");
a2fbb9ea 8354 return -EINVAL;
f1410647 8355 }
a2fbb9ea
ET
8356
8357 advertising = (ADVERTISED_100baseT_Full |
8358 ADVERTISED_TP);
8359 } else {
34f80b04 8360 if (!(bp->port.supported &
f1410647
ET
8361 SUPPORTED_100baseT_Half)) {
8362 DP(NETIF_MSG_LINK,
8363 "100M half not supported\n");
a2fbb9ea 8364 return -EINVAL;
f1410647 8365 }
a2fbb9ea
ET
8366
8367 advertising = (ADVERTISED_100baseT_Half |
8368 ADVERTISED_TP);
8369 }
8370 break;
8371
8372 case SPEED_1000:
f1410647
ET
8373 if (cmd->duplex != DUPLEX_FULL) {
8374 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8375 return -EINVAL;
f1410647 8376 }
a2fbb9ea 8377
34f80b04 8378 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8379 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8380 return -EINVAL;
f1410647 8381 }
a2fbb9ea
ET
8382
8383 advertising = (ADVERTISED_1000baseT_Full |
8384 ADVERTISED_TP);
8385 break;
8386
8387 case SPEED_2500:
f1410647
ET
8388 if (cmd->duplex != DUPLEX_FULL) {
8389 DP(NETIF_MSG_LINK,
8390 "2.5G half not supported\n");
a2fbb9ea 8391 return -EINVAL;
f1410647 8392 }
a2fbb9ea 8393
34f80b04 8394 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8395 DP(NETIF_MSG_LINK,
8396 "2.5G full not supported\n");
a2fbb9ea 8397 return -EINVAL;
f1410647 8398 }
a2fbb9ea 8399
f1410647 8400 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8401 ADVERTISED_TP);
8402 break;
8403
8404 case SPEED_10000:
f1410647
ET
8405 if (cmd->duplex != DUPLEX_FULL) {
8406 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8407 return -EINVAL;
f1410647 8408 }
a2fbb9ea 8409
34f80b04 8410 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8411 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8412 return -EINVAL;
f1410647 8413 }
a2fbb9ea
ET
8414
8415 advertising = (ADVERTISED_10000baseT_Full |
8416 ADVERTISED_FIBRE);
8417 break;
8418
8419 default:
f1410647 8420 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8421 return -EINVAL;
8422 }
8423
c18487ee
YR
8424 bp->link_params.req_line_speed = cmd->speed;
8425 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8426 bp->port.advertising = advertising;
a2fbb9ea
ET
8427 }
8428
c18487ee 8429 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8430 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8431 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8432 bp->port.advertising);
a2fbb9ea 8433
34f80b04 8434 if (netif_running(dev)) {
bb2a0f7a 8435 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8436 bnx2x_link_set(bp);
8437 }
a2fbb9ea
ET
8438
8439 return 0;
8440}
8441
c18487ee
YR
8442#define PHY_FW_VER_LEN 10
8443
a2fbb9ea
ET
8444static void bnx2x_get_drvinfo(struct net_device *dev,
8445 struct ethtool_drvinfo *info)
8446{
8447 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8448 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8449
8450 strcpy(info->driver, DRV_MODULE_NAME);
8451 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8452
8453 phy_fw_ver[0] = '\0';
34f80b04 8454 if (bp->port.pmf) {
4a37fb66 8455 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8456 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8457 (bp->state != BNX2X_STATE_CLOSED),
8458 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8459 bnx2x_release_phy_lock(bp);
34f80b04 8460 }
c18487ee 8461
f0e53a84
EG
8462 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8463 (bp->common.bc_ver & 0xff0000) >> 16,
8464 (bp->common.bc_ver & 0xff00) >> 8,
8465 (bp->common.bc_ver & 0xff),
8466 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8467 strcpy(info->bus_info, pci_name(bp->pdev));
8468 info->n_stats = BNX2X_NUM_STATS;
8469 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8470 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8471 info->regdump_len = 0;
8472}
8473
8474static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8475{
8476 struct bnx2x *bp = netdev_priv(dev);
8477
8478 if (bp->flags & NO_WOL_FLAG) {
8479 wol->supported = 0;
8480 wol->wolopts = 0;
8481 } else {
8482 wol->supported = WAKE_MAGIC;
8483 if (bp->wol)
8484 wol->wolopts = WAKE_MAGIC;
8485 else
8486 wol->wolopts = 0;
8487 }
8488 memset(&wol->sopass, 0, sizeof(wol->sopass));
8489}
8490
8491static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8492{
8493 struct bnx2x *bp = netdev_priv(dev);
8494
8495 if (wol->wolopts & ~WAKE_MAGIC)
8496 return -EINVAL;
8497
8498 if (wol->wolopts & WAKE_MAGIC) {
8499 if (bp->flags & NO_WOL_FLAG)
8500 return -EINVAL;
8501
8502 bp->wol = 1;
34f80b04 8503 } else
a2fbb9ea 8504 bp->wol = 0;
34f80b04 8505
a2fbb9ea
ET
8506 return 0;
8507}
8508
8509static u32 bnx2x_get_msglevel(struct net_device *dev)
8510{
8511 struct bnx2x *bp = netdev_priv(dev);
8512
8513 return bp->msglevel;
8514}
8515
8516static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8517{
8518 struct bnx2x *bp = netdev_priv(dev);
8519
8520 if (capable(CAP_NET_ADMIN))
8521 bp->msglevel = level;
8522}
8523
8524static int bnx2x_nway_reset(struct net_device *dev)
8525{
8526 struct bnx2x *bp = netdev_priv(dev);
8527
34f80b04
EG
8528 if (!bp->port.pmf)
8529 return 0;
a2fbb9ea 8530
34f80b04 8531 if (netif_running(dev)) {
bb2a0f7a 8532 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8533 bnx2x_link_set(bp);
8534 }
a2fbb9ea
ET
8535
8536 return 0;
8537}
8538
8539static int bnx2x_get_eeprom_len(struct net_device *dev)
8540{
8541 struct bnx2x *bp = netdev_priv(dev);
8542
34f80b04 8543 return bp->common.flash_size;
a2fbb9ea
ET
8544}
8545
8546static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8547{
34f80b04 8548 int port = BP_PORT(bp);
a2fbb9ea
ET
8549 int count, i;
8550 u32 val = 0;
8551
8552 /* adjust timeout for emulation/FPGA */
8553 count = NVRAM_TIMEOUT_COUNT;
8554 if (CHIP_REV_IS_SLOW(bp))
8555 count *= 100;
8556
8557 /* request access to nvram interface */
8558 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8559 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8560
8561 for (i = 0; i < count*10; i++) {
8562 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8563 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8564 break;
8565
8566 udelay(5);
8567 }
8568
8569 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8570 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8571 return -EBUSY;
8572 }
8573
8574 return 0;
8575}
8576
8577static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8578{
34f80b04 8579 int port = BP_PORT(bp);
a2fbb9ea
ET
8580 int count, i;
8581 u32 val = 0;
8582
8583 /* adjust timeout for emulation/FPGA */
8584 count = NVRAM_TIMEOUT_COUNT;
8585 if (CHIP_REV_IS_SLOW(bp))
8586 count *= 100;
8587
8588 /* relinquish nvram interface */
8589 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8590 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8591
8592 for (i = 0; i < count*10; i++) {
8593 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8594 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8595 break;
8596
8597 udelay(5);
8598 }
8599
8600 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8601 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8602 return -EBUSY;
8603 }
8604
8605 return 0;
8606}
8607
8608static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8609{
8610 u32 val;
8611
8612 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8613
8614 /* enable both bits, even on read */
8615 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8616 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8617 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8618}
8619
8620static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8621{
8622 u32 val;
8623
8624 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8625
8626 /* disable both bits, even after read */
8627 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8628 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8629 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8630}
8631
4781bfad 8632static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8633 u32 cmd_flags)
8634{
f1410647 8635 int count, i, rc;
a2fbb9ea
ET
8636 u32 val;
8637
8638 /* build the command word */
8639 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8640
8641 /* need to clear DONE bit separately */
8642 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8643
8644 /* address of the NVRAM to read from */
8645 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8646 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8647
8648 /* issue a read command */
8649 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8650
8651 /* adjust timeout for emulation/FPGA */
8652 count = NVRAM_TIMEOUT_COUNT;
8653 if (CHIP_REV_IS_SLOW(bp))
8654 count *= 100;
8655
8656 /* wait for completion */
8657 *ret_val = 0;
8658 rc = -EBUSY;
8659 for (i = 0; i < count; i++) {
8660 udelay(5);
8661 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8662
8663 if (val & MCPR_NVM_COMMAND_DONE) {
8664 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8665 /* we read nvram data in cpu order
8666 * but ethtool sees it as an array of bytes
8667 * converting to big-endian will do the work */
4781bfad 8668 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8669 rc = 0;
8670 break;
8671 }
8672 }
8673
8674 return rc;
8675}
8676
8677static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8678 int buf_size)
8679{
8680 int rc;
8681 u32 cmd_flags;
4781bfad 8682 __be32 val;
a2fbb9ea
ET
8683
8684 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8685 DP(BNX2X_MSG_NVM,
c14423fe 8686 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8687 offset, buf_size);
8688 return -EINVAL;
8689 }
8690
34f80b04
EG
8691 if (offset + buf_size > bp->common.flash_size) {
8692 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8693 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8694 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8695 return -EINVAL;
8696 }
8697
8698 /* request access to nvram interface */
8699 rc = bnx2x_acquire_nvram_lock(bp);
8700 if (rc)
8701 return rc;
8702
8703 /* enable access to nvram interface */
8704 bnx2x_enable_nvram_access(bp);
8705
8706 /* read the first word(s) */
8707 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8708 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8709 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8710 memcpy(ret_buf, &val, 4);
8711
8712 /* advance to the next dword */
8713 offset += sizeof(u32);
8714 ret_buf += sizeof(u32);
8715 buf_size -= sizeof(u32);
8716 cmd_flags = 0;
8717 }
8718
8719 if (rc == 0) {
8720 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8721 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8722 memcpy(ret_buf, &val, 4);
8723 }
8724
8725 /* disable access to nvram interface */
8726 bnx2x_disable_nvram_access(bp);
8727 bnx2x_release_nvram_lock(bp);
8728
8729 return rc;
8730}
8731
8732static int bnx2x_get_eeprom(struct net_device *dev,
8733 struct ethtool_eeprom *eeprom, u8 *eebuf)
8734{
8735 struct bnx2x *bp = netdev_priv(dev);
8736 int rc;
8737
2add3acb
EG
8738 if (!netif_running(dev))
8739 return -EAGAIN;
8740
34f80b04 8741 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8742 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8743 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8744 eeprom->len, eeprom->len);
8745
8746 /* parameters already validated in ethtool_get_eeprom */
8747
8748 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8749
8750 return rc;
8751}
8752
8753static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8754 u32 cmd_flags)
8755{
f1410647 8756 int count, i, rc;
a2fbb9ea
ET
8757
8758 /* build the command word */
8759 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8760
8761 /* need to clear DONE bit separately */
8762 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8763
8764 /* write the data */
8765 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8766
8767 /* address of the NVRAM to write to */
8768 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8769 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8770
8771 /* issue the write command */
8772 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8773
8774 /* adjust timeout for emulation/FPGA */
8775 count = NVRAM_TIMEOUT_COUNT;
8776 if (CHIP_REV_IS_SLOW(bp))
8777 count *= 100;
8778
8779 /* wait for completion */
8780 rc = -EBUSY;
8781 for (i = 0; i < count; i++) {
8782 udelay(5);
8783 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8784 if (val & MCPR_NVM_COMMAND_DONE) {
8785 rc = 0;
8786 break;
8787 }
8788 }
8789
8790 return rc;
8791}
8792
f1410647 8793#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8794
8795static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8796 int buf_size)
8797{
8798 int rc;
8799 u32 cmd_flags;
8800 u32 align_offset;
4781bfad 8801 __be32 val;
a2fbb9ea 8802
34f80b04
EG
8803 if (offset + buf_size > bp->common.flash_size) {
8804 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8805 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8806 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8807 return -EINVAL;
8808 }
8809
8810 /* request access to nvram interface */
8811 rc = bnx2x_acquire_nvram_lock(bp);
8812 if (rc)
8813 return rc;
8814
8815 /* enable access to nvram interface */
8816 bnx2x_enable_nvram_access(bp);
8817
8818 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8819 align_offset = (offset & ~0x03);
8820 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8821
8822 if (rc == 0) {
8823 val &= ~(0xff << BYTE_OFFSET(offset));
8824 val |= (*data_buf << BYTE_OFFSET(offset));
8825
8826 /* nvram data is returned as an array of bytes
8827 * convert it back to cpu order */
8828 val = be32_to_cpu(val);
8829
a2fbb9ea
ET
8830 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8831 cmd_flags);
8832 }
8833
8834 /* disable access to nvram interface */
8835 bnx2x_disable_nvram_access(bp);
8836 bnx2x_release_nvram_lock(bp);
8837
8838 return rc;
8839}
8840
8841static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8842 int buf_size)
8843{
8844 int rc;
8845 u32 cmd_flags;
8846 u32 val;
8847 u32 written_so_far;
8848
34f80b04 8849 if (buf_size == 1) /* ethtool */
a2fbb9ea 8850 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8851
8852 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8853 DP(BNX2X_MSG_NVM,
c14423fe 8854 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8855 offset, buf_size);
8856 return -EINVAL;
8857 }
8858
34f80b04
EG
8859 if (offset + buf_size > bp->common.flash_size) {
8860 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8861 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8862 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8863 return -EINVAL;
8864 }
8865
8866 /* request access to nvram interface */
8867 rc = bnx2x_acquire_nvram_lock(bp);
8868 if (rc)
8869 return rc;
8870
8871 /* enable access to nvram interface */
8872 bnx2x_enable_nvram_access(bp);
8873
8874 written_so_far = 0;
8875 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8876 while ((written_so_far < buf_size) && (rc == 0)) {
8877 if (written_so_far == (buf_size - sizeof(u32)))
8878 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8879 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8880 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8881 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8882 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8883
8884 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8885
8886 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8887
8888 /* advance to the next dword */
8889 offset += sizeof(u32);
8890 data_buf += sizeof(u32);
8891 written_so_far += sizeof(u32);
8892 cmd_flags = 0;
8893 }
8894
8895 /* disable access to nvram interface */
8896 bnx2x_disable_nvram_access(bp);
8897 bnx2x_release_nvram_lock(bp);
8898
8899 return rc;
8900}
8901
8902static int bnx2x_set_eeprom(struct net_device *dev,
8903 struct ethtool_eeprom *eeprom, u8 *eebuf)
8904{
8905 struct bnx2x *bp = netdev_priv(dev);
8906 int rc;
8907
9f4c9583
EG
8908 if (!netif_running(dev))
8909 return -EAGAIN;
8910
34f80b04 8911 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8912 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8913 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8914 eeprom->len, eeprom->len);
8915
8916 /* parameters already validated in ethtool_set_eeprom */
8917
c18487ee 8918 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8919 if (eeprom->magic == 0x00504859)
8920 if (bp->port.pmf) {
8921
4a37fb66 8922 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8923 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8924 bp->link_params.ext_phy_config,
8925 (bp->state != BNX2X_STATE_CLOSED),
8926 eebuf, eeprom->len);
bb2a0f7a
YG
8927 if ((bp->state == BNX2X_STATE_OPEN) ||
8928 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8929 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8930 &bp->link_vars, 1);
34f80b04
EG
8931 rc |= bnx2x_phy_init(&bp->link_params,
8932 &bp->link_vars);
bb2a0f7a 8933 }
4a37fb66 8934 bnx2x_release_phy_lock(bp);
34f80b04
EG
8935
8936 } else /* Only the PMF can access the PHY */
8937 return -EINVAL;
8938 else
c18487ee 8939 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8940
8941 return rc;
8942}
8943
8944static int bnx2x_get_coalesce(struct net_device *dev,
8945 struct ethtool_coalesce *coal)
8946{
8947 struct bnx2x *bp = netdev_priv(dev);
8948
8949 memset(coal, 0, sizeof(struct ethtool_coalesce));
8950
8951 coal->rx_coalesce_usecs = bp->rx_ticks;
8952 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8953
8954 return 0;
8955}
8956
8957static int bnx2x_set_coalesce(struct net_device *dev,
8958 struct ethtool_coalesce *coal)
8959{
8960 struct bnx2x *bp = netdev_priv(dev);
8961
8962 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8963 if (bp->rx_ticks > 3000)
8964 bp->rx_ticks = 3000;
8965
8966 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8967 if (bp->tx_ticks > 0x3000)
8968 bp->tx_ticks = 0x3000;
8969
34f80b04 8970 if (netif_running(dev))
a2fbb9ea
ET
8971 bnx2x_update_coalesce(bp);
8972
8973 return 0;
8974}
8975
8976static void bnx2x_get_ringparam(struct net_device *dev,
8977 struct ethtool_ringparam *ering)
8978{
8979 struct bnx2x *bp = netdev_priv(dev);
8980
8981 ering->rx_max_pending = MAX_RX_AVAIL;
8982 ering->rx_mini_max_pending = 0;
8983 ering->rx_jumbo_max_pending = 0;
8984
8985 ering->rx_pending = bp->rx_ring_size;
8986 ering->rx_mini_pending = 0;
8987 ering->rx_jumbo_pending = 0;
8988
8989 ering->tx_max_pending = MAX_TX_AVAIL;
8990 ering->tx_pending = bp->tx_ring_size;
8991}
8992
8993static int bnx2x_set_ringparam(struct net_device *dev,
8994 struct ethtool_ringparam *ering)
8995{
8996 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8997 int rc = 0;
a2fbb9ea
ET
8998
8999 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9000 (ering->tx_pending > MAX_TX_AVAIL) ||
9001 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9002 return -EINVAL;
9003
9004 bp->rx_ring_size = ering->rx_pending;
9005 bp->tx_ring_size = ering->tx_pending;
9006
34f80b04
EG
9007 if (netif_running(dev)) {
9008 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9009 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9010 }
9011
34f80b04 9012 return rc;
a2fbb9ea
ET
9013}
9014
9015static void bnx2x_get_pauseparam(struct net_device *dev,
9016 struct ethtool_pauseparam *epause)
9017{
9018 struct bnx2x *bp = netdev_priv(dev);
9019
356e2385
EG
9020 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9021 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9022 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9023
c0700f90
DM
9024 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9025 BNX2X_FLOW_CTRL_RX);
9026 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9027 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9028
9029 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9030 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9031 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9032}
9033
9034static int bnx2x_set_pauseparam(struct net_device *dev,
9035 struct ethtool_pauseparam *epause)
9036{
9037 struct bnx2x *bp = netdev_priv(dev);
9038
34f80b04
EG
9039 if (IS_E1HMF(bp))
9040 return 0;
9041
a2fbb9ea
ET
9042 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9043 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9044 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9045
c0700f90 9046 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9047
f1410647 9048 if (epause->rx_pause)
c0700f90 9049 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9050
f1410647 9051 if (epause->tx_pause)
c0700f90 9052 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9053
c0700f90
DM
9054 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9055 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9056
c18487ee 9057 if (epause->autoneg) {
34f80b04 9058 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9059 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9060 return -EINVAL;
9061 }
a2fbb9ea 9062
c18487ee 9063 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9064 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9065 }
a2fbb9ea 9066
c18487ee
YR
9067 DP(NETIF_MSG_LINK,
9068 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9069
9070 if (netif_running(dev)) {
bb2a0f7a 9071 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9072 bnx2x_link_set(bp);
9073 }
a2fbb9ea
ET
9074
9075 return 0;
9076}
9077
df0f2343
VZ
9078static int bnx2x_set_flags(struct net_device *dev, u32 data)
9079{
9080 struct bnx2x *bp = netdev_priv(dev);
9081 int changed = 0;
9082 int rc = 0;
9083
9084 /* TPA requires Rx CSUM offloading */
9085 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9086 if (!(dev->features & NETIF_F_LRO)) {
9087 dev->features |= NETIF_F_LRO;
9088 bp->flags |= TPA_ENABLE_FLAG;
9089 changed = 1;
9090 }
9091
9092 } else if (dev->features & NETIF_F_LRO) {
9093 dev->features &= ~NETIF_F_LRO;
9094 bp->flags &= ~TPA_ENABLE_FLAG;
9095 changed = 1;
9096 }
9097
9098 if (changed && netif_running(dev)) {
9099 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9100 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9101 }
9102
9103 return rc;
9104}
9105
a2fbb9ea
ET
9106static u32 bnx2x_get_rx_csum(struct net_device *dev)
9107{
9108 struct bnx2x *bp = netdev_priv(dev);
9109
9110 return bp->rx_csum;
9111}
9112
9113static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9114{
9115 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9116 int rc = 0;
a2fbb9ea
ET
9117
9118 bp->rx_csum = data;
df0f2343
VZ
9119
9120 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9121 TPA'ed packets will be discarded due to wrong TCP CSUM */
9122 if (!data) {
9123 u32 flags = ethtool_op_get_flags(dev);
9124
9125 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9126 }
9127
9128 return rc;
a2fbb9ea
ET
9129}
9130
9131static int bnx2x_set_tso(struct net_device *dev, u32 data)
9132{
755735eb 9133 if (data) {
a2fbb9ea 9134 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9135 dev->features |= NETIF_F_TSO6;
9136 } else {
a2fbb9ea 9137 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9138 dev->features &= ~NETIF_F_TSO6;
9139 }
9140
a2fbb9ea
ET
9141 return 0;
9142}
9143
f3c87cdd 9144static const struct {
a2fbb9ea
ET
9145 char string[ETH_GSTRING_LEN];
9146} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9147 { "register_test (offline)" },
9148 { "memory_test (offline)" },
9149 { "loopback_test (offline)" },
9150 { "nvram_test (online)" },
9151 { "interrupt_test (online)" },
9152 { "link_test (online)" },
d3d4f495 9153 { "idle check (online)" }
a2fbb9ea
ET
9154};
9155
9156static int bnx2x_self_test_count(struct net_device *dev)
9157{
9158 return BNX2X_NUM_TESTS;
9159}
9160
f3c87cdd
YG
9161static int bnx2x_test_registers(struct bnx2x *bp)
9162{
9163 int idx, i, rc = -ENODEV;
9164 u32 wr_val = 0;
9dabc424 9165 int port = BP_PORT(bp);
f3c87cdd
YG
9166 static const struct {
9167 u32 offset0;
9168 u32 offset1;
9169 u32 mask;
9170 } reg_tbl[] = {
9171/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9172 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9173 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9174 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9175 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9176 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9177 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9178 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9179 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9180 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9181/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9182 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9183 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9184 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9185 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9186 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9187 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9188 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9189 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9190 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9191/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9192 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9193 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9194 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9195 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9196 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9197 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9198 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9199 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9200 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9201/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9202 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9203 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9204 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9205 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9206 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9207 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9208 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9209
9210 { 0xffffffff, 0, 0x00000000 }
9211 };
9212
9213 if (!netif_running(bp->dev))
9214 return rc;
9215
9216 /* Repeat the test twice:
9217 First by writing 0x00000000, second by writing 0xffffffff */
9218 for (idx = 0; idx < 2; idx++) {
9219
9220 switch (idx) {
9221 case 0:
9222 wr_val = 0;
9223 break;
9224 case 1:
9225 wr_val = 0xffffffff;
9226 break;
9227 }
9228
9229 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9230 u32 offset, mask, save_val, val;
f3c87cdd
YG
9231
9232 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9233 mask = reg_tbl[i].mask;
9234
9235 save_val = REG_RD(bp, offset);
9236
9237 REG_WR(bp, offset, wr_val);
9238 val = REG_RD(bp, offset);
9239
9240 /* Restore the original register's value */
9241 REG_WR(bp, offset, save_val);
9242
9243 /* verify that value is as expected value */
9244 if ((val & mask) != (wr_val & mask))
9245 goto test_reg_exit;
9246 }
9247 }
9248
9249 rc = 0;
9250
9251test_reg_exit:
9252 return rc;
9253}
9254
9255static int bnx2x_test_memory(struct bnx2x *bp)
9256{
9257 int i, j, rc = -ENODEV;
9258 u32 val;
9259 static const struct {
9260 u32 offset;
9261 int size;
9262 } mem_tbl[] = {
9263 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9264 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9265 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9266 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9267 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9268 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9269 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9270
9271 { 0xffffffff, 0 }
9272 };
9273 static const struct {
9274 char *name;
9275 u32 offset;
9dabc424
YG
9276 u32 e1_mask;
9277 u32 e1h_mask;
f3c87cdd 9278 } prty_tbl[] = {
9dabc424
YG
9279 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9280 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9281 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9282 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9283 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9284 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9285
9286 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9287 };
9288
9289 if (!netif_running(bp->dev))
9290 return rc;
9291
9292 /* Go through all the memories */
9293 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9294 for (j = 0; j < mem_tbl[i].size; j++)
9295 REG_RD(bp, mem_tbl[i].offset + j*4);
9296
9297 /* Check the parity status */
9298 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9299 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9300 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9301 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9302 DP(NETIF_MSG_HW,
9303 "%s is 0x%x\n", prty_tbl[i].name, val);
9304 goto test_mem_exit;
9305 }
9306 }
9307
9308 rc = 0;
9309
9310test_mem_exit:
9311 return rc;
9312}
9313
f3c87cdd
YG
9314static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9315{
9316 int cnt = 1000;
9317
9318 if (link_up)
9319 while (bnx2x_link_test(bp) && cnt--)
9320 msleep(10);
9321}
9322
9323static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9324{
9325 unsigned int pkt_size, num_pkts, i;
9326 struct sk_buff *skb;
9327 unsigned char *packet;
9328 struct bnx2x_fastpath *fp = &bp->fp[0];
9329 u16 tx_start_idx, tx_idx;
9330 u16 rx_start_idx, rx_idx;
9331 u16 pkt_prod;
9332 struct sw_tx_bd *tx_buf;
9333 struct eth_tx_bd *tx_bd;
9334 dma_addr_t mapping;
9335 union eth_rx_cqe *cqe;
9336 u8 cqe_fp_flags;
9337 struct sw_rx_bd *rx_buf;
9338 u16 len;
9339 int rc = -ENODEV;
9340
b5bf9068
EG
9341 /* check the loopback mode */
9342 switch (loopback_mode) {
9343 case BNX2X_PHY_LOOPBACK:
9344 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9345 return -EINVAL;
9346 break;
9347 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9348 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9349 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9350 break;
9351 default:
f3c87cdd 9352 return -EINVAL;
b5bf9068 9353 }
f3c87cdd 9354
b5bf9068
EG
9355 /* prepare the loopback packet */
9356 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9357 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9358 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9359 if (!skb) {
9360 rc = -ENOMEM;
9361 goto test_loopback_exit;
9362 }
9363 packet = skb_put(skb, pkt_size);
9364 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9365 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9366 for (i = ETH_HLEN; i < pkt_size; i++)
9367 packet[i] = (unsigned char) (i & 0xff);
9368
b5bf9068 9369 /* send the loopback packet */
f3c87cdd
YG
9370 num_pkts = 0;
9371 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9372 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9373
9374 pkt_prod = fp->tx_pkt_prod++;
9375 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9376 tx_buf->first_bd = fp->tx_bd_prod;
9377 tx_buf->skb = skb;
9378
9379 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9380 mapping = pci_map_single(bp->pdev, skb->data,
9381 skb_headlen(skb), PCI_DMA_TODEVICE);
9382 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9383 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9384 tx_bd->nbd = cpu_to_le16(1);
9385 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9386 tx_bd->vlan = cpu_to_le16(pkt_prod);
9387 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9388 ETH_TX_BD_FLAGS_END_BD);
9389 tx_bd->general_data = ((UNICAST_ADDRESS <<
9390 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9391
58f4c4cf
EG
9392 wmb();
9393
4781bfad 9394 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9395 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9396 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9397 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9398
9399 mmiowb();
9400
9401 num_pkts++;
9402 fp->tx_bd_prod++;
9403 bp->dev->trans_start = jiffies;
9404
9405 udelay(100);
9406
9407 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9408 if (tx_idx != tx_start_idx + num_pkts)
9409 goto test_loopback_exit;
9410
9411 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9412 if (rx_idx != rx_start_idx + num_pkts)
9413 goto test_loopback_exit;
9414
9415 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9416 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9417 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9418 goto test_loopback_rx_exit;
9419
9420 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9421 if (len != pkt_size)
9422 goto test_loopback_rx_exit;
9423
9424 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9425 skb = rx_buf->skb;
9426 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9427 for (i = ETH_HLEN; i < pkt_size; i++)
9428 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9429 goto test_loopback_rx_exit;
9430
9431 rc = 0;
9432
9433test_loopback_rx_exit:
f3c87cdd
YG
9434
9435 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9436 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9437 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9438 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9439
9440 /* Update producers */
9441 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9442 fp->rx_sge_prod);
f3c87cdd
YG
9443
9444test_loopback_exit:
9445 bp->link_params.loopback_mode = LOOPBACK_NONE;
9446
9447 return rc;
9448}
9449
9450static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9451{
b5bf9068 9452 int rc = 0, res;
f3c87cdd
YG
9453
9454 if (!netif_running(bp->dev))
9455 return BNX2X_LOOPBACK_FAILED;
9456
f8ef6e44 9457 bnx2x_netif_stop(bp, 1);
3910c8ae 9458 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9459
b5bf9068
EG
9460 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9461 if (res) {
9462 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9463 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9464 }
9465
b5bf9068
EG
9466 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9467 if (res) {
9468 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9469 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9470 }
9471
3910c8ae 9472 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9473 bnx2x_netif_start(bp);
9474
9475 return rc;
9476}
9477
9478#define CRC32_RESIDUAL 0xdebb20e3
9479
9480static int bnx2x_test_nvram(struct bnx2x *bp)
9481{
9482 static const struct {
9483 int offset;
9484 int size;
9485 } nvram_tbl[] = {
9486 { 0, 0x14 }, /* bootstrap */
9487 { 0x14, 0xec }, /* dir */
9488 { 0x100, 0x350 }, /* manuf_info */
9489 { 0x450, 0xf0 }, /* feature_info */
9490 { 0x640, 0x64 }, /* upgrade_key_info */
9491 { 0x6a4, 0x64 },
9492 { 0x708, 0x70 }, /* manuf_key_info */
9493 { 0x778, 0x70 },
9494 { 0, 0 }
9495 };
4781bfad 9496 __be32 buf[0x350 / 4];
f3c87cdd
YG
9497 u8 *data = (u8 *)buf;
9498 int i, rc;
9499 u32 magic, csum;
9500
9501 rc = bnx2x_nvram_read(bp, 0, data, 4);
9502 if (rc) {
f5372251 9503 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9504 goto test_nvram_exit;
9505 }
9506
9507 magic = be32_to_cpu(buf[0]);
9508 if (magic != 0x669955aa) {
9509 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9510 rc = -ENODEV;
9511 goto test_nvram_exit;
9512 }
9513
9514 for (i = 0; nvram_tbl[i].size; i++) {
9515
9516 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9517 nvram_tbl[i].size);
9518 if (rc) {
9519 DP(NETIF_MSG_PROBE,
f5372251 9520 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9521 goto test_nvram_exit;
9522 }
9523
9524 csum = ether_crc_le(nvram_tbl[i].size, data);
9525 if (csum != CRC32_RESIDUAL) {
9526 DP(NETIF_MSG_PROBE,
9527 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9528 rc = -ENODEV;
9529 goto test_nvram_exit;
9530 }
9531 }
9532
9533test_nvram_exit:
9534 return rc;
9535}
9536
9537static int bnx2x_test_intr(struct bnx2x *bp)
9538{
9539 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9540 int i, rc;
9541
9542 if (!netif_running(bp->dev))
9543 return -ENODEV;
9544
8d9c5f34 9545 config->hdr.length = 0;
af246401
EG
9546 if (CHIP_IS_E1(bp))
9547 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9548 else
9549 config->hdr.offset = BP_FUNC(bp);
0626b899 9550 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9551 config->hdr.reserved1 = 0;
9552
9553 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9554 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9555 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9556 if (rc == 0) {
9557 bp->set_mac_pending++;
9558 for (i = 0; i < 10; i++) {
9559 if (!bp->set_mac_pending)
9560 break;
9561 msleep_interruptible(10);
9562 }
9563 if (i == 10)
9564 rc = -ENODEV;
9565 }
9566
9567 return rc;
9568}
9569
a2fbb9ea
ET
9570static void bnx2x_self_test(struct net_device *dev,
9571 struct ethtool_test *etest, u64 *buf)
9572{
9573 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9574
9575 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9576
f3c87cdd 9577 if (!netif_running(dev))
a2fbb9ea 9578 return;
a2fbb9ea 9579
33471629 9580 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9581 if (IS_E1HMF(bp))
9582 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9583
9584 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9585 u8 link_up;
9586
9587 link_up = bp->link_vars.link_up;
9588 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9589 bnx2x_nic_load(bp, LOAD_DIAG);
9590 /* wait until link state is restored */
9591 bnx2x_wait_for_link(bp, link_up);
9592
9593 if (bnx2x_test_registers(bp) != 0) {
9594 buf[0] = 1;
9595 etest->flags |= ETH_TEST_FL_FAILED;
9596 }
9597 if (bnx2x_test_memory(bp) != 0) {
9598 buf[1] = 1;
9599 etest->flags |= ETH_TEST_FL_FAILED;
9600 }
9601 buf[2] = bnx2x_test_loopback(bp, link_up);
9602 if (buf[2] != 0)
9603 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9604
f3c87cdd
YG
9605 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9606 bnx2x_nic_load(bp, LOAD_NORMAL);
9607 /* wait until link state is restored */
9608 bnx2x_wait_for_link(bp, link_up);
9609 }
9610 if (bnx2x_test_nvram(bp) != 0) {
9611 buf[3] = 1;
a2fbb9ea
ET
9612 etest->flags |= ETH_TEST_FL_FAILED;
9613 }
f3c87cdd
YG
9614 if (bnx2x_test_intr(bp) != 0) {
9615 buf[4] = 1;
9616 etest->flags |= ETH_TEST_FL_FAILED;
9617 }
9618 if (bp->port.pmf)
9619 if (bnx2x_link_test(bp) != 0) {
9620 buf[5] = 1;
9621 etest->flags |= ETH_TEST_FL_FAILED;
9622 }
f3c87cdd
YG
9623
9624#ifdef BNX2X_EXTRA_DEBUG
9625 bnx2x_panic_dump(bp);
9626#endif
a2fbb9ea
ET
9627}
9628
de832a55
EG
9629static const struct {
9630 long offset;
9631 int size;
9632 u8 string[ETH_GSTRING_LEN];
9633} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9634/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9635 { Q_STATS_OFFSET32(error_bytes_received_hi),
9636 8, "[%d]: rx_error_bytes" },
9637 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9638 8, "[%d]: rx_ucast_packets" },
9639 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9640 8, "[%d]: rx_mcast_packets" },
9641 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9642 8, "[%d]: rx_bcast_packets" },
9643 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9644 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9645 4, "[%d]: rx_phy_ip_err_discards"},
9646 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9647 4, "[%d]: rx_skb_alloc_discard" },
9648 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9649
9650/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9651 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9652 8, "[%d]: tx_packets" }
9653};
9654
bb2a0f7a
YG
9655static const struct {
9656 long offset;
9657 int size;
9658 u32 flags;
66e855f3
YG
9659#define STATS_FLAGS_PORT 1
9660#define STATS_FLAGS_FUNC 2
de832a55 9661#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9662 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9663} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9664/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9665 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9666 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9667 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9668 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9669 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9670 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9671 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9672 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9673 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9674 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9675 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9676 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9677 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9678 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9679 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9680 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9681 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9682/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9683 8, STATS_FLAGS_PORT, "rx_fragments" },
9684 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9685 8, STATS_FLAGS_PORT, "rx_jabbers" },
9686 { STATS_OFFSET32(no_buff_discard_hi),
9687 8, STATS_FLAGS_BOTH, "rx_discards" },
9688 { STATS_OFFSET32(mac_filter_discard),
9689 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9690 { STATS_OFFSET32(xxoverflow_discard),
9691 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9692 { STATS_OFFSET32(brb_drop_hi),
9693 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9694 { STATS_OFFSET32(brb_truncate_hi),
9695 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9696 { STATS_OFFSET32(pause_frames_received_hi),
9697 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9698 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9699 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9700 { STATS_OFFSET32(nig_timer_max),
9701 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9702/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9703 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9704 { STATS_OFFSET32(rx_skb_alloc_failed),
9705 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9706 { STATS_OFFSET32(hw_csum_err),
9707 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9708
9709 { STATS_OFFSET32(total_bytes_transmitted_hi),
9710 8, STATS_FLAGS_BOTH, "tx_bytes" },
9711 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9712 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9713 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9714 8, STATS_FLAGS_BOTH, "tx_packets" },
9715 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9716 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9717 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9718 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9719 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9720 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9721 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9722 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9723/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9724 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9725 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9726 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9727 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9728 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9729 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9730 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9731 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9732 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9733 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9734 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9735 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9736 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9737 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9738 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9739 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9740 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9741 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9742 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9743/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9744 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9745 { STATS_OFFSET32(pause_frames_sent_hi),
9746 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9747};
9748
de832a55
EG
9749#define IS_PORT_STAT(i) \
9750 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9751#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9752#define IS_E1HMF_MODE_STAT(bp) \
9753 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9754
a2fbb9ea
ET
9755static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9756{
bb2a0f7a 9757 struct bnx2x *bp = netdev_priv(dev);
de832a55 9758 int i, j, k;
bb2a0f7a 9759
a2fbb9ea
ET
9760 switch (stringset) {
9761 case ETH_SS_STATS:
de832a55
EG
9762 if (is_multi(bp)) {
9763 k = 0;
9764 for_each_queue(bp, i) {
9765 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9766 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9767 bnx2x_q_stats_arr[j].string, i);
9768 k += BNX2X_NUM_Q_STATS;
9769 }
9770 if (IS_E1HMF_MODE_STAT(bp))
9771 break;
9772 for (j = 0; j < BNX2X_NUM_STATS; j++)
9773 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9774 bnx2x_stats_arr[j].string);
9775 } else {
9776 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9777 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9778 continue;
9779 strcpy(buf + j*ETH_GSTRING_LEN,
9780 bnx2x_stats_arr[i].string);
9781 j++;
9782 }
bb2a0f7a 9783 }
a2fbb9ea
ET
9784 break;
9785
9786 case ETH_SS_TEST:
9787 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9788 break;
9789 }
9790}
9791
9792static int bnx2x_get_stats_count(struct net_device *dev)
9793{
bb2a0f7a 9794 struct bnx2x *bp = netdev_priv(dev);
de832a55 9795 int i, num_stats;
bb2a0f7a 9796
de832a55
EG
9797 if (is_multi(bp)) {
9798 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9799 if (!IS_E1HMF_MODE_STAT(bp))
9800 num_stats += BNX2X_NUM_STATS;
9801 } else {
9802 if (IS_E1HMF_MODE_STAT(bp)) {
9803 num_stats = 0;
9804 for (i = 0; i < BNX2X_NUM_STATS; i++)
9805 if (IS_FUNC_STAT(i))
9806 num_stats++;
9807 } else
9808 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9809 }
de832a55 9810
bb2a0f7a 9811 return num_stats;
a2fbb9ea
ET
9812}
9813
9814static void bnx2x_get_ethtool_stats(struct net_device *dev,
9815 struct ethtool_stats *stats, u64 *buf)
9816{
9817 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9818 u32 *hw_stats, *offset;
9819 int i, j, k;
bb2a0f7a 9820
de832a55
EG
9821 if (is_multi(bp)) {
9822 k = 0;
9823 for_each_queue(bp, i) {
9824 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9825 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9826 if (bnx2x_q_stats_arr[j].size == 0) {
9827 /* skip this counter */
9828 buf[k + j] = 0;
9829 continue;
9830 }
9831 offset = (hw_stats +
9832 bnx2x_q_stats_arr[j].offset);
9833 if (bnx2x_q_stats_arr[j].size == 4) {
9834 /* 4-byte counter */
9835 buf[k + j] = (u64) *offset;
9836 continue;
9837 }
9838 /* 8-byte counter */
9839 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9840 }
9841 k += BNX2X_NUM_Q_STATS;
9842 }
9843 if (IS_E1HMF_MODE_STAT(bp))
9844 return;
9845 hw_stats = (u32 *)&bp->eth_stats;
9846 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9847 if (bnx2x_stats_arr[j].size == 0) {
9848 /* skip this counter */
9849 buf[k + j] = 0;
9850 continue;
9851 }
9852 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9853 if (bnx2x_stats_arr[j].size == 4) {
9854 /* 4-byte counter */
9855 buf[k + j] = (u64) *offset;
9856 continue;
9857 }
9858 /* 8-byte counter */
9859 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9860 }
de832a55
EG
9861 } else {
9862 hw_stats = (u32 *)&bp->eth_stats;
9863 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9864 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9865 continue;
9866 if (bnx2x_stats_arr[i].size == 0) {
9867 /* skip this counter */
9868 buf[j] = 0;
9869 j++;
9870 continue;
9871 }
9872 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9873 if (bnx2x_stats_arr[i].size == 4) {
9874 /* 4-byte counter */
9875 buf[j] = (u64) *offset;
9876 j++;
9877 continue;
9878 }
9879 /* 8-byte counter */
9880 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9881 j++;
a2fbb9ea 9882 }
a2fbb9ea
ET
9883 }
9884}
9885
9886static int bnx2x_phys_id(struct net_device *dev, u32 data)
9887{
9888 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9889 int port = BP_PORT(bp);
a2fbb9ea
ET
9890 int i;
9891
34f80b04
EG
9892 if (!netif_running(dev))
9893 return 0;
9894
9895 if (!bp->port.pmf)
9896 return 0;
9897
a2fbb9ea
ET
9898 if (data == 0)
9899 data = 2;
9900
9901 for (i = 0; i < (data * 2); i++) {
c18487ee 9902 if ((i % 2) == 0)
34f80b04 9903 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9904 bp->link_params.hw_led_mode,
9905 bp->link_params.chip_id);
9906 else
34f80b04 9907 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9908 bp->link_params.hw_led_mode,
9909 bp->link_params.chip_id);
9910
a2fbb9ea
ET
9911 msleep_interruptible(500);
9912 if (signal_pending(current))
9913 break;
9914 }
9915
c18487ee 9916 if (bp->link_vars.link_up)
34f80b04 9917 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9918 bp->link_vars.line_speed,
9919 bp->link_params.hw_led_mode,
9920 bp->link_params.chip_id);
a2fbb9ea
ET
9921
9922 return 0;
9923}
9924
9925static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9926 .get_settings = bnx2x_get_settings,
9927 .set_settings = bnx2x_set_settings,
9928 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9929 .get_wol = bnx2x_get_wol,
9930 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9931 .get_msglevel = bnx2x_get_msglevel,
9932 .set_msglevel = bnx2x_set_msglevel,
9933 .nway_reset = bnx2x_nway_reset,
9934 .get_link = ethtool_op_get_link,
9935 .get_eeprom_len = bnx2x_get_eeprom_len,
9936 .get_eeprom = bnx2x_get_eeprom,
9937 .set_eeprom = bnx2x_set_eeprom,
9938 .get_coalesce = bnx2x_get_coalesce,
9939 .set_coalesce = bnx2x_set_coalesce,
9940 .get_ringparam = bnx2x_get_ringparam,
9941 .set_ringparam = bnx2x_set_ringparam,
9942 .get_pauseparam = bnx2x_get_pauseparam,
9943 .set_pauseparam = bnx2x_set_pauseparam,
9944 .get_rx_csum = bnx2x_get_rx_csum,
9945 .set_rx_csum = bnx2x_set_rx_csum,
9946 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9947 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9948 .set_flags = bnx2x_set_flags,
9949 .get_flags = ethtool_op_get_flags,
9950 .get_sg = ethtool_op_get_sg,
9951 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9952 .get_tso = ethtool_op_get_tso,
9953 .set_tso = bnx2x_set_tso,
9954 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9955 .self_test = bnx2x_self_test,
9956 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9957 .phys_id = bnx2x_phys_id,
9958 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9959 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9960};
9961
9962/* end of ethtool_ops */
9963
9964/****************************************************************************
9965* General service functions
9966****************************************************************************/
9967
9968static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9969{
9970 u16 pmcsr;
9971
9972 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9973
9974 switch (state) {
9975 case PCI_D0:
34f80b04 9976 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9977 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9978 PCI_PM_CTRL_PME_STATUS));
9979
9980 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9981 /* delay required during transition out of D3hot */
a2fbb9ea 9982 msleep(20);
34f80b04 9983 break;
a2fbb9ea 9984
34f80b04
EG
9985 case PCI_D3hot:
9986 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9987 pmcsr |= 3;
a2fbb9ea 9988
34f80b04
EG
9989 if (bp->wol)
9990 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9991
34f80b04
EG
9992 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9993 pmcsr);
a2fbb9ea 9994
34f80b04
EG
9995 /* No more memory access after this point until
9996 * device is brought back to D0.
9997 */
9998 break;
9999
10000 default:
10001 return -EINVAL;
10002 }
10003 return 0;
a2fbb9ea
ET
10004}
10005
237907c1
EG
10006static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10007{
10008 u16 rx_cons_sb;
10009
10010 /* Tell compiler that status block fields can change */
10011 barrier();
10012 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10013 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10014 rx_cons_sb++;
10015 return (fp->rx_comp_cons != rx_cons_sb);
10016}
10017
34f80b04
EG
10018/*
10019 * net_device service functions
10020 */
10021
a2fbb9ea
ET
10022static int bnx2x_poll(struct napi_struct *napi, int budget)
10023{
10024 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10025 napi);
10026 struct bnx2x *bp = fp->bp;
10027 int work_done = 0;
10028
10029#ifdef BNX2X_STOP_ON_ERROR
10030 if (unlikely(bp->panic))
34f80b04 10031 goto poll_panic;
a2fbb9ea
ET
10032#endif
10033
10034 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10035 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10036 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10037
10038 bnx2x_update_fpsb_idx(fp);
10039
237907c1 10040 if (bnx2x_has_tx_work(fp))
7961f791 10041 bnx2x_tx_int(fp);
a2fbb9ea 10042
8534f32c 10043 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10044 work_done = bnx2x_rx_int(fp, budget);
356e2385 10045
8534f32c
EG
10046 /* must not complete if we consumed full budget */
10047 if (work_done >= budget)
10048 goto poll_again;
10049 }
a2fbb9ea 10050
8534f32c
EG
10051 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10052 * ensure that status block indices have been actually read
10053 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10054 * so that we won't write the "newer" value of the status block to IGU
10055 * (if there was a DMA right after BNX2X_HAS_WORK and
10056 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10057 * may be postponed to right before bnx2x_ack_sb). In this case
10058 * there will never be another interrupt until there is another update
10059 * of the status block, while there is still unhandled work.
10060 */
10061 rmb();
a2fbb9ea 10062
8534f32c 10063 if (!BNX2X_HAS_WORK(fp)) {
a2fbb9ea 10064#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10065poll_panic:
a2fbb9ea 10066#endif
288379f0 10067 napi_complete(napi);
a2fbb9ea 10068
0626b899 10069 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10070 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10071 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10072 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10073 }
356e2385 10074
8534f32c 10075poll_again:
a2fbb9ea
ET
10076 return work_done;
10077}
10078
755735eb
EG
10079
10080/* we split the first BD into headers and data BDs
33471629 10081 * to ease the pain of our fellow microcode engineers
755735eb
EG
10082 * we use one mapping for both BDs
10083 * So far this has only been observed to happen
10084 * in Other Operating Systems(TM)
10085 */
10086static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10087 struct bnx2x_fastpath *fp,
10088 struct eth_tx_bd **tx_bd, u16 hlen,
10089 u16 bd_prod, int nbd)
10090{
10091 struct eth_tx_bd *h_tx_bd = *tx_bd;
10092 struct eth_tx_bd *d_tx_bd;
10093 dma_addr_t mapping;
10094 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10095
10096 /* first fix first BD */
10097 h_tx_bd->nbd = cpu_to_le16(nbd);
10098 h_tx_bd->nbytes = cpu_to_le16(hlen);
10099
10100 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10101 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10102 h_tx_bd->addr_lo, h_tx_bd->nbd);
10103
10104 /* now get a new data BD
10105 * (after the pbd) and fill it */
10106 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10107 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10108
10109 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10110 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10111
10112 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10113 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10114 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10115 d_tx_bd->vlan = 0;
10116 /* this marks the BD as one that has no individual mapping
10117 * the FW ignores this flag in a BD not marked start
10118 */
10119 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10120 DP(NETIF_MSG_TX_QUEUED,
10121 "TSO split data size is %d (%x:%x)\n",
10122 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10123
10124 /* update tx_bd for marking the last BD flag */
10125 *tx_bd = d_tx_bd;
10126
10127 return bd_prod;
10128}
10129
10130static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10131{
10132 if (fix > 0)
10133 csum = (u16) ~csum_fold(csum_sub(csum,
10134 csum_partial(t_header - fix, fix, 0)));
10135
10136 else if (fix < 0)
10137 csum = (u16) ~csum_fold(csum_add(csum,
10138 csum_partial(t_header, -fix, 0)));
10139
10140 return swab16(csum);
10141}
10142
10143static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10144{
10145 u32 rc;
10146
10147 if (skb->ip_summed != CHECKSUM_PARTIAL)
10148 rc = XMIT_PLAIN;
10149
10150 else {
4781bfad 10151 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10152 rc = XMIT_CSUM_V6;
10153 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10154 rc |= XMIT_CSUM_TCP;
10155
10156 } else {
10157 rc = XMIT_CSUM_V4;
10158 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10159 rc |= XMIT_CSUM_TCP;
10160 }
10161 }
10162
10163 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10164 rc |= XMIT_GSO_V4;
10165
10166 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10167 rc |= XMIT_GSO_V6;
10168
10169 return rc;
10170}
10171
632da4d6 10172#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10173/* check if packet requires linearization (packet is too fragmented)
10174 no need to check fragmentation if page size > 8K (there will be no
10175 violation to FW restrictions) */
755735eb
EG
10176static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10177 u32 xmit_type)
10178{
10179 int to_copy = 0;
10180 int hlen = 0;
10181 int first_bd_sz = 0;
10182
10183 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10184 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10185
10186 if (xmit_type & XMIT_GSO) {
10187 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10188 /* Check if LSO packet needs to be copied:
10189 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10190 int wnd_size = MAX_FETCH_BD - 3;
33471629 10191 /* Number of windows to check */
755735eb
EG
10192 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10193 int wnd_idx = 0;
10194 int frag_idx = 0;
10195 u32 wnd_sum = 0;
10196
10197 /* Headers length */
10198 hlen = (int)(skb_transport_header(skb) - skb->data) +
10199 tcp_hdrlen(skb);
10200
10201 /* Amount of data (w/o headers) on linear part of SKB*/
10202 first_bd_sz = skb_headlen(skb) - hlen;
10203
10204 wnd_sum = first_bd_sz;
10205
10206 /* Calculate the first sum - it's special */
10207 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10208 wnd_sum +=
10209 skb_shinfo(skb)->frags[frag_idx].size;
10210
10211 /* If there was data on linear skb data - check it */
10212 if (first_bd_sz > 0) {
10213 if (unlikely(wnd_sum < lso_mss)) {
10214 to_copy = 1;
10215 goto exit_lbl;
10216 }
10217
10218 wnd_sum -= first_bd_sz;
10219 }
10220
10221 /* Others are easier: run through the frag list and
10222 check all windows */
10223 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10224 wnd_sum +=
10225 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10226
10227 if (unlikely(wnd_sum < lso_mss)) {
10228 to_copy = 1;
10229 break;
10230 }
10231 wnd_sum -=
10232 skb_shinfo(skb)->frags[wnd_idx].size;
10233 }
755735eb
EG
10234 } else {
10235 /* in non-LSO too fragmented packet should always
10236 be linearized */
10237 to_copy = 1;
10238 }
10239 }
10240
10241exit_lbl:
10242 if (unlikely(to_copy))
10243 DP(NETIF_MSG_TX_QUEUED,
10244 "Linearization IS REQUIRED for %s packet. "
10245 "num_frags %d hlen %d first_bd_sz %d\n",
10246 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10247 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10248
10249 return to_copy;
10250}
632da4d6 10251#endif
755735eb
EG
10252
10253/* called with netif_tx_lock
a2fbb9ea 10254 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10255 * netif_wake_queue()
a2fbb9ea
ET
10256 */
10257static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10258{
10259 struct bnx2x *bp = netdev_priv(dev);
10260 struct bnx2x_fastpath *fp;
555f6c78 10261 struct netdev_queue *txq;
a2fbb9ea
ET
10262 struct sw_tx_bd *tx_buf;
10263 struct eth_tx_bd *tx_bd;
10264 struct eth_tx_parse_bd *pbd = NULL;
10265 u16 pkt_prod, bd_prod;
755735eb 10266 int nbd, fp_index;
a2fbb9ea 10267 dma_addr_t mapping;
755735eb
EG
10268 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10269 int vlan_off = (bp->e1hov ? 4 : 0);
10270 int i;
10271 u8 hlen = 0;
a2fbb9ea
ET
10272
10273#ifdef BNX2X_STOP_ON_ERROR
10274 if (unlikely(bp->panic))
10275 return NETDEV_TX_BUSY;
10276#endif
10277
555f6c78
EG
10278 fp_index = skb_get_queue_mapping(skb);
10279 txq = netdev_get_tx_queue(dev, fp_index);
10280
a2fbb9ea 10281 fp = &bp->fp[fp_index];
755735eb 10282
231fd58a 10283 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10284 fp->eth_q_stats.driver_xoff++,
555f6c78 10285 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10286 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10287 return NETDEV_TX_BUSY;
10288 }
10289
755735eb
EG
10290 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10291 " gso type %x xmit_type %x\n",
10292 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10293 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10294
632da4d6 10295#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10296 /* First, check if we need to linearize the skb (due to FW
10297 restrictions). No need to check fragmentation if page size > 8K
10298 (there will be no violation to FW restrictions) */
755735eb
EG
10299 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10300 /* Statistics of linearization */
10301 bp->lin_cnt++;
10302 if (skb_linearize(skb) != 0) {
10303 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10304 "silently dropping this SKB\n");
10305 dev_kfree_skb_any(skb);
da5a662a 10306 return NETDEV_TX_OK;
755735eb
EG
10307 }
10308 }
632da4d6 10309#endif
755735eb 10310
a2fbb9ea 10311 /*
755735eb 10312 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10313 then for TSO or xsum we have a parsing info BD,
755735eb 10314 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10315 (don't forget to mark the last one as last,
10316 and to unmap only AFTER you write to the BD ...)
755735eb 10317 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10318 */
10319
10320 pkt_prod = fp->tx_pkt_prod++;
755735eb 10321 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10322
755735eb 10323 /* get a tx_buf and first BD */
a2fbb9ea
ET
10324 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10325 tx_bd = &fp->tx_desc_ring[bd_prod];
10326
10327 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10328 tx_bd->general_data = (UNICAST_ADDRESS <<
10329 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10330 /* header nbd */
10331 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10332
755735eb
EG
10333 /* remember the first BD of the packet */
10334 tx_buf->first_bd = fp->tx_bd_prod;
10335 tx_buf->skb = skb;
a2fbb9ea
ET
10336
10337 DP(NETIF_MSG_TX_QUEUED,
10338 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10339 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10340
0c6671b0
EG
10341#ifdef BCM_VLAN
10342 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10343 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10344 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10345 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10346 vlan_off += 4;
10347 } else
0c6671b0 10348#endif
755735eb 10349 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10350
755735eb 10351 if (xmit_type) {
755735eb 10352 /* turn on parsing and get a BD */
a2fbb9ea
ET
10353 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10354 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10355
10356 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10357 }
10358
10359 if (xmit_type & XMIT_CSUM) {
10360 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10361
10362 /* for now NS flag is not used in Linux */
4781bfad
EG
10363 pbd->global_data =
10364 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10365 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10366
755735eb
EG
10367 pbd->ip_hlen = (skb_transport_header(skb) -
10368 skb_network_header(skb)) / 2;
10369
10370 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10371
755735eb
EG
10372 pbd->total_hlen = cpu_to_le16(hlen);
10373 hlen = hlen*2 - vlan_off;
a2fbb9ea 10374
755735eb
EG
10375 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10376
10377 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10378 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10379 ETH_TX_BD_FLAGS_IP_CSUM;
10380 else
10381 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10382
10383 if (xmit_type & XMIT_CSUM_TCP) {
10384 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10385
10386 } else {
10387 s8 fix = SKB_CS_OFF(skb); /* signed! */
10388
a2fbb9ea 10389 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10390 pbd->cs_offset = fix / 2;
a2fbb9ea 10391
755735eb
EG
10392 DP(NETIF_MSG_TX_QUEUED,
10393 "hlen %d offset %d fix %d csum before fix %x\n",
10394 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10395 SKB_CS(skb));
10396
10397 /* HW bug: fixup the CSUM */
10398 pbd->tcp_pseudo_csum =
10399 bnx2x_csum_fix(skb_transport_header(skb),
10400 SKB_CS(skb), fix);
10401
10402 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10403 pbd->tcp_pseudo_csum);
10404 }
a2fbb9ea
ET
10405 }
10406
10407 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10408 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10409
10410 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10411 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10412 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10413 tx_bd->nbd = cpu_to_le16(nbd);
10414 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10415
10416 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10417 " nbytes %d flags %x vlan %x\n",
10418 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10419 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10420 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10421
755735eb 10422 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10423
10424 DP(NETIF_MSG_TX_QUEUED,
10425 "TSO packet len %d hlen %d total len %d tso size %d\n",
10426 skb->len, hlen, skb_headlen(skb),
10427 skb_shinfo(skb)->gso_size);
10428
10429 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10430
755735eb
EG
10431 if (unlikely(skb_headlen(skb) > hlen))
10432 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10433 bd_prod, ++nbd);
a2fbb9ea
ET
10434
10435 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10436 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10437 pbd->tcp_flags = pbd_tcp_flags(skb);
10438
10439 if (xmit_type & XMIT_GSO_V4) {
10440 pbd->ip_id = swab16(ip_hdr(skb)->id);
10441 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10442 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10443 ip_hdr(skb)->daddr,
10444 0, IPPROTO_TCP, 0));
755735eb
EG
10445
10446 } else
10447 pbd->tcp_pseudo_csum =
10448 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10449 &ipv6_hdr(skb)->daddr,
10450 0, IPPROTO_TCP, 0));
10451
a2fbb9ea
ET
10452 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10453 }
10454
755735eb
EG
10455 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10456 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10457
755735eb
EG
10458 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10459 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10460
755735eb
EG
10461 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10462 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10463
755735eb
EG
10464 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10465 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10466 tx_bd->nbytes = cpu_to_le16(frag->size);
10467 tx_bd->vlan = cpu_to_le16(pkt_prod);
10468 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10469
755735eb
EG
10470 DP(NETIF_MSG_TX_QUEUED,
10471 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10472 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10473 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10474 }
10475
755735eb 10476 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10477 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10478
10479 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10480 tx_bd, tx_bd->bd_flags.as_bitfield);
10481
a2fbb9ea
ET
10482 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10483
755735eb 10484 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10485 * if the packet contains or ends with it
10486 */
10487 if (TX_BD_POFF(bd_prod) < nbd)
10488 nbd++;
10489
10490 if (pbd)
10491 DP(NETIF_MSG_TX_QUEUED,
10492 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10493 " tcp_flags %x xsum %x seq %u hlen %u\n",
10494 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10495 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10496 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10497
755735eb 10498 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10499
58f4c4cf
EG
10500 /*
10501 * Make sure that the BD data is updated before updating the producer
10502 * since FW might read the BD right after the producer is updated.
10503 * This is only applicable for weak-ordered memory model archs such
10504 * as IA-64. The following barrier is also mandatory since FW will
10505 * assumes packets must have BDs.
10506 */
10507 wmb();
10508
4781bfad 10509 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10510 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10511 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10512 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10513
10514 mmiowb();
10515
755735eb 10516 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10517 dev->trans_start = jiffies;
10518
10519 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10520 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10521 if we put Tx into XOFF state. */
10522 smp_mb();
555f6c78 10523 netif_tx_stop_queue(txq);
de832a55 10524 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10525 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10526 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10527 }
10528 fp->tx_pkt++;
10529
10530 return NETDEV_TX_OK;
10531}
10532
bb2a0f7a 10533/* called with rtnl_lock */
a2fbb9ea
ET
10534static int bnx2x_open(struct net_device *dev)
10535{
10536 struct bnx2x *bp = netdev_priv(dev);
10537
6eccabb3
EG
10538 netif_carrier_off(dev);
10539
a2fbb9ea
ET
10540 bnx2x_set_power_state(bp, PCI_D0);
10541
bb2a0f7a 10542 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10543}
10544
bb2a0f7a 10545/* called with rtnl_lock */
a2fbb9ea
ET
10546static int bnx2x_close(struct net_device *dev)
10547{
a2fbb9ea
ET
10548 struct bnx2x *bp = netdev_priv(dev);
10549
10550 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10551 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10552 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10553 if (!CHIP_REV_IS_SLOW(bp))
10554 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10555
10556 return 0;
10557}
10558
f5372251 10559/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10560static void bnx2x_set_rx_mode(struct net_device *dev)
10561{
10562 struct bnx2x *bp = netdev_priv(dev);
10563 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10564 int port = BP_PORT(bp);
10565
10566 if (bp->state != BNX2X_STATE_OPEN) {
10567 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10568 return;
10569 }
10570
10571 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10572
10573 if (dev->flags & IFF_PROMISC)
10574 rx_mode = BNX2X_RX_MODE_PROMISC;
10575
10576 else if ((dev->flags & IFF_ALLMULTI) ||
10577 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10578 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10579
10580 else { /* some multicasts */
10581 if (CHIP_IS_E1(bp)) {
10582 int i, old, offset;
10583 struct dev_mc_list *mclist;
10584 struct mac_configuration_cmd *config =
10585 bnx2x_sp(bp, mcast_config);
10586
10587 for (i = 0, mclist = dev->mc_list;
10588 mclist && (i < dev->mc_count);
10589 i++, mclist = mclist->next) {
10590
10591 config->config_table[i].
10592 cam_entry.msb_mac_addr =
10593 swab16(*(u16 *)&mclist->dmi_addr[0]);
10594 config->config_table[i].
10595 cam_entry.middle_mac_addr =
10596 swab16(*(u16 *)&mclist->dmi_addr[2]);
10597 config->config_table[i].
10598 cam_entry.lsb_mac_addr =
10599 swab16(*(u16 *)&mclist->dmi_addr[4]);
10600 config->config_table[i].cam_entry.flags =
10601 cpu_to_le16(port);
10602 config->config_table[i].
10603 target_table_entry.flags = 0;
10604 config->config_table[i].
10605 target_table_entry.client_id = 0;
10606 config->config_table[i].
10607 target_table_entry.vlan_id = 0;
10608
10609 DP(NETIF_MSG_IFUP,
10610 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10611 config->config_table[i].
10612 cam_entry.msb_mac_addr,
10613 config->config_table[i].
10614 cam_entry.middle_mac_addr,
10615 config->config_table[i].
10616 cam_entry.lsb_mac_addr);
10617 }
8d9c5f34 10618 old = config->hdr.length;
34f80b04
EG
10619 if (old > i) {
10620 for (; i < old; i++) {
10621 if (CAM_IS_INVALID(config->
10622 config_table[i])) {
af246401 10623 /* already invalidated */
34f80b04
EG
10624 break;
10625 }
10626 /* invalidate */
10627 CAM_INVALIDATE(config->
10628 config_table[i]);
10629 }
10630 }
10631
10632 if (CHIP_REV_IS_SLOW(bp))
10633 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10634 else
10635 offset = BNX2X_MAX_MULTICAST*(1 + port);
10636
8d9c5f34 10637 config->hdr.length = i;
34f80b04 10638 config->hdr.offset = offset;
8d9c5f34 10639 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10640 config->hdr.reserved1 = 0;
10641
10642 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10643 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10644 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10645 0);
10646 } else { /* E1H */
10647 /* Accept one or more multicasts */
10648 struct dev_mc_list *mclist;
10649 u32 mc_filter[MC_HASH_SIZE];
10650 u32 crc, bit, regidx;
10651 int i;
10652
10653 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10654
10655 for (i = 0, mclist = dev->mc_list;
10656 mclist && (i < dev->mc_count);
10657 i++, mclist = mclist->next) {
10658
7c510e4b
JB
10659 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10660 mclist->dmi_addr);
34f80b04
EG
10661
10662 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10663 bit = (crc >> 24) & 0xff;
10664 regidx = bit >> 5;
10665 bit &= 0x1f;
10666 mc_filter[regidx] |= (1 << bit);
10667 }
10668
10669 for (i = 0; i < MC_HASH_SIZE; i++)
10670 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10671 mc_filter[i]);
10672 }
10673 }
10674
10675 bp->rx_mode = rx_mode;
10676 bnx2x_set_storm_rx_mode(bp);
10677}
10678
10679/* called with rtnl_lock */
a2fbb9ea
ET
10680static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10681{
10682 struct sockaddr *addr = p;
10683 struct bnx2x *bp = netdev_priv(dev);
10684
34f80b04 10685 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10686 return -EINVAL;
10687
10688 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10689 if (netif_running(dev)) {
10690 if (CHIP_IS_E1(bp))
3101c2bc 10691 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10692 else
3101c2bc 10693 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10694 }
a2fbb9ea
ET
10695
10696 return 0;
10697}
10698
c18487ee 10699/* called with rtnl_lock */
a2fbb9ea
ET
10700static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10701{
10702 struct mii_ioctl_data *data = if_mii(ifr);
10703 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10704 int port = BP_PORT(bp);
a2fbb9ea
ET
10705 int err;
10706
10707 switch (cmd) {
10708 case SIOCGMIIPHY:
34f80b04 10709 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10710
c14423fe 10711 /* fallthrough */
c18487ee 10712
a2fbb9ea 10713 case SIOCGMIIREG: {
c18487ee 10714 u16 mii_regval;
a2fbb9ea 10715
c18487ee
YR
10716 if (!netif_running(dev))
10717 return -EAGAIN;
a2fbb9ea 10718
34f80b04 10719 mutex_lock(&bp->port.phy_mutex);
3196a88a 10720 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10721 DEFAULT_PHY_DEV_ADDR,
10722 (data->reg_num & 0x1f), &mii_regval);
10723 data->val_out = mii_regval;
34f80b04 10724 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10725 return err;
10726 }
10727
10728 case SIOCSMIIREG:
10729 if (!capable(CAP_NET_ADMIN))
10730 return -EPERM;
10731
c18487ee
YR
10732 if (!netif_running(dev))
10733 return -EAGAIN;
10734
34f80b04 10735 mutex_lock(&bp->port.phy_mutex);
3196a88a 10736 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10737 DEFAULT_PHY_DEV_ADDR,
10738 (data->reg_num & 0x1f), data->val_in);
34f80b04 10739 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10740 return err;
10741
10742 default:
10743 /* do nothing */
10744 break;
10745 }
10746
10747 return -EOPNOTSUPP;
10748}
10749
34f80b04 10750/* called with rtnl_lock */
a2fbb9ea
ET
10751static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10752{
10753 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10754 int rc = 0;
a2fbb9ea
ET
10755
10756 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10757 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10758 return -EINVAL;
10759
10760 /* This does not race with packet allocation
c14423fe 10761 * because the actual alloc size is
a2fbb9ea
ET
10762 * only updated as part of load
10763 */
10764 dev->mtu = new_mtu;
10765
10766 if (netif_running(dev)) {
34f80b04
EG
10767 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10768 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10769 }
34f80b04
EG
10770
10771 return rc;
a2fbb9ea
ET
10772}
10773
10774static void bnx2x_tx_timeout(struct net_device *dev)
10775{
10776 struct bnx2x *bp = netdev_priv(dev);
10777
10778#ifdef BNX2X_STOP_ON_ERROR
10779 if (!bp->panic)
10780 bnx2x_panic();
10781#endif
10782 /* This allows the netif to be shutdown gracefully before resetting */
10783 schedule_work(&bp->reset_task);
10784}
10785
10786#ifdef BCM_VLAN
34f80b04 10787/* called with rtnl_lock */
a2fbb9ea
ET
10788static void bnx2x_vlan_rx_register(struct net_device *dev,
10789 struct vlan_group *vlgrp)
10790{
10791 struct bnx2x *bp = netdev_priv(dev);
10792
10793 bp->vlgrp = vlgrp;
0c6671b0
EG
10794
10795 /* Set flags according to the required capabilities */
10796 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10797
10798 if (dev->features & NETIF_F_HW_VLAN_TX)
10799 bp->flags |= HW_VLAN_TX_FLAG;
10800
10801 if (dev->features & NETIF_F_HW_VLAN_RX)
10802 bp->flags |= HW_VLAN_RX_FLAG;
10803
a2fbb9ea 10804 if (netif_running(dev))
49d66772 10805 bnx2x_set_client_config(bp);
a2fbb9ea 10806}
34f80b04 10807
a2fbb9ea
ET
10808#endif
10809
10810#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10811static void poll_bnx2x(struct net_device *dev)
10812{
10813 struct bnx2x *bp = netdev_priv(dev);
10814
10815 disable_irq(bp->pdev->irq);
10816 bnx2x_interrupt(bp->pdev->irq, dev);
10817 enable_irq(bp->pdev->irq);
10818}
10819#endif
10820
c64213cd
SH
10821static const struct net_device_ops bnx2x_netdev_ops = {
10822 .ndo_open = bnx2x_open,
10823 .ndo_stop = bnx2x_close,
10824 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 10825 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
10826 .ndo_set_mac_address = bnx2x_change_mac_addr,
10827 .ndo_validate_addr = eth_validate_addr,
10828 .ndo_do_ioctl = bnx2x_ioctl,
10829 .ndo_change_mtu = bnx2x_change_mtu,
10830 .ndo_tx_timeout = bnx2x_tx_timeout,
10831#ifdef BCM_VLAN
10832 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10833#endif
10834#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10835 .ndo_poll_controller = poll_bnx2x,
10836#endif
10837};
10838
34f80b04
EG
10839static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10840 struct net_device *dev)
a2fbb9ea
ET
10841{
10842 struct bnx2x *bp;
10843 int rc;
10844
10845 SET_NETDEV_DEV(dev, &pdev->dev);
10846 bp = netdev_priv(dev);
10847
34f80b04
EG
10848 bp->dev = dev;
10849 bp->pdev = pdev;
a2fbb9ea 10850 bp->flags = 0;
34f80b04 10851 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10852
10853 rc = pci_enable_device(pdev);
10854 if (rc) {
10855 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10856 goto err_out;
10857 }
10858
10859 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10860 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10861 " aborting\n");
10862 rc = -ENODEV;
10863 goto err_out_disable;
10864 }
10865
10866 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10867 printk(KERN_ERR PFX "Cannot find second PCI device"
10868 " base address, aborting\n");
10869 rc = -ENODEV;
10870 goto err_out_disable;
10871 }
10872
34f80b04
EG
10873 if (atomic_read(&pdev->enable_cnt) == 1) {
10874 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10875 if (rc) {
10876 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10877 " aborting\n");
10878 goto err_out_disable;
10879 }
a2fbb9ea 10880
34f80b04
EG
10881 pci_set_master(pdev);
10882 pci_save_state(pdev);
10883 }
a2fbb9ea
ET
10884
10885 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10886 if (bp->pm_cap == 0) {
10887 printk(KERN_ERR PFX "Cannot find power management"
10888 " capability, aborting\n");
10889 rc = -EIO;
10890 goto err_out_release;
10891 }
10892
10893 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10894 if (bp->pcie_cap == 0) {
10895 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10896 " aborting\n");
10897 rc = -EIO;
10898 goto err_out_release;
10899 }
10900
10901 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10902 bp->flags |= USING_DAC_FLAG;
10903 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10904 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10905 " failed, aborting\n");
10906 rc = -EIO;
10907 goto err_out_release;
10908 }
10909
10910 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10911 printk(KERN_ERR PFX "System does not support DMA,"
10912 " aborting\n");
10913 rc = -EIO;
10914 goto err_out_release;
10915 }
10916
34f80b04
EG
10917 dev->mem_start = pci_resource_start(pdev, 0);
10918 dev->base_addr = dev->mem_start;
10919 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10920
10921 dev->irq = pdev->irq;
10922
275f165f 10923 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10924 if (!bp->regview) {
10925 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10926 rc = -ENOMEM;
10927 goto err_out_release;
10928 }
10929
34f80b04
EG
10930 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10931 min_t(u64, BNX2X_DB_SIZE,
10932 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10933 if (!bp->doorbells) {
10934 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10935 rc = -ENOMEM;
10936 goto err_out_unmap;
10937 }
10938
10939 bnx2x_set_power_state(bp, PCI_D0);
10940
34f80b04
EG
10941 /* clean indirect addresses */
10942 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10943 PCICFG_VENDOR_ID_OFFSET);
10944 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10945 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10946 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10947 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10948
34f80b04 10949 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10950
c64213cd 10951 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10952 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10953 dev->features |= NETIF_F_SG;
10954 dev->features |= NETIF_F_HW_CSUM;
10955 if (bp->flags & USING_DAC_FLAG)
10956 dev->features |= NETIF_F_HIGHDMA;
10957#ifdef BCM_VLAN
10958 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10959 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10960#endif
10961 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10962 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10963
10964 return 0;
10965
10966err_out_unmap:
10967 if (bp->regview) {
10968 iounmap(bp->regview);
10969 bp->regview = NULL;
10970 }
a2fbb9ea
ET
10971 if (bp->doorbells) {
10972 iounmap(bp->doorbells);
10973 bp->doorbells = NULL;
10974 }
10975
10976err_out_release:
34f80b04
EG
10977 if (atomic_read(&pdev->enable_cnt) == 1)
10978 pci_release_regions(pdev);
a2fbb9ea
ET
10979
10980err_out_disable:
10981 pci_disable_device(pdev);
10982 pci_set_drvdata(pdev, NULL);
10983
10984err_out:
10985 return rc;
10986}
10987
25047950
ET
10988static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10989{
10990 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10991
10992 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10993 return val;
10994}
10995
10996/* return value of 1=2.5GHz 2=5GHz */
10997static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10998{
10999 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11000
11001 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11002 return val;
11003}
11004
a2fbb9ea
ET
11005static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11006 const struct pci_device_id *ent)
11007{
11008 static int version_printed;
11009 struct net_device *dev = NULL;
11010 struct bnx2x *bp;
25047950 11011 int rc;
a2fbb9ea
ET
11012
11013 if (version_printed++ == 0)
11014 printk(KERN_INFO "%s", version);
11015
11016 /* dev zeroed in init_etherdev */
555f6c78 11017 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11018 if (!dev) {
11019 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11020 return -ENOMEM;
34f80b04 11021 }
a2fbb9ea 11022
a2fbb9ea
ET
11023 bp = netdev_priv(dev);
11024 bp->msglevel = debug;
11025
34f80b04 11026 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11027 if (rc < 0) {
11028 free_netdev(dev);
11029 return rc;
11030 }
11031
a2fbb9ea
ET
11032 pci_set_drvdata(pdev, dev);
11033
34f80b04 11034 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11035 if (rc)
11036 goto init_one_exit;
11037
11038 rc = register_netdev(dev);
34f80b04 11039 if (rc) {
693fc0d1 11040 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11041 goto init_one_exit;
11042 }
11043
25047950 11044 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11045 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11046 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11047 bnx2x_get_pcie_width(bp),
11048 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11049 dev->base_addr, bp->pdev->irq);
e174961c 11050 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11051
a2fbb9ea 11052 return 0;
34f80b04
EG
11053
11054init_one_exit:
11055 if (bp->regview)
11056 iounmap(bp->regview);
11057
11058 if (bp->doorbells)
11059 iounmap(bp->doorbells);
11060
11061 free_netdev(dev);
11062
11063 if (atomic_read(&pdev->enable_cnt) == 1)
11064 pci_release_regions(pdev);
11065
11066 pci_disable_device(pdev);
11067 pci_set_drvdata(pdev, NULL);
11068
11069 return rc;
a2fbb9ea
ET
11070}
11071
11072static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11073{
11074 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11075 struct bnx2x *bp;
11076
11077 if (!dev) {
228241eb
ET
11078 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11079 return;
11080 }
228241eb 11081 bp = netdev_priv(dev);
a2fbb9ea 11082
a2fbb9ea
ET
11083 unregister_netdev(dev);
11084
11085 if (bp->regview)
11086 iounmap(bp->regview);
11087
11088 if (bp->doorbells)
11089 iounmap(bp->doorbells);
11090
11091 free_netdev(dev);
34f80b04
EG
11092
11093 if (atomic_read(&pdev->enable_cnt) == 1)
11094 pci_release_regions(pdev);
11095
a2fbb9ea
ET
11096 pci_disable_device(pdev);
11097 pci_set_drvdata(pdev, NULL);
11098}
11099
11100static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11101{
11102 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11103 struct bnx2x *bp;
11104
34f80b04
EG
11105 if (!dev) {
11106 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11107 return -ENODEV;
11108 }
11109 bp = netdev_priv(dev);
a2fbb9ea 11110
34f80b04 11111 rtnl_lock();
a2fbb9ea 11112
34f80b04 11113 pci_save_state(pdev);
228241eb 11114
34f80b04
EG
11115 if (!netif_running(dev)) {
11116 rtnl_unlock();
11117 return 0;
11118 }
a2fbb9ea
ET
11119
11120 netif_device_detach(dev);
a2fbb9ea 11121
da5a662a 11122 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11123
a2fbb9ea 11124 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11125
34f80b04
EG
11126 rtnl_unlock();
11127
a2fbb9ea
ET
11128 return 0;
11129}
11130
11131static int bnx2x_resume(struct pci_dev *pdev)
11132{
11133 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11134 struct bnx2x *bp;
a2fbb9ea
ET
11135 int rc;
11136
228241eb
ET
11137 if (!dev) {
11138 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11139 return -ENODEV;
11140 }
228241eb 11141 bp = netdev_priv(dev);
a2fbb9ea 11142
34f80b04
EG
11143 rtnl_lock();
11144
228241eb 11145 pci_restore_state(pdev);
34f80b04
EG
11146
11147 if (!netif_running(dev)) {
11148 rtnl_unlock();
11149 return 0;
11150 }
11151
a2fbb9ea
ET
11152 bnx2x_set_power_state(bp, PCI_D0);
11153 netif_device_attach(dev);
11154
da5a662a 11155 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11156
34f80b04
EG
11157 rtnl_unlock();
11158
11159 return rc;
a2fbb9ea
ET
11160}
11161
f8ef6e44
YG
11162static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11163{
11164 int i;
11165
11166 bp->state = BNX2X_STATE_ERROR;
11167
11168 bp->rx_mode = BNX2X_RX_MODE_NONE;
11169
11170 bnx2x_netif_stop(bp, 0);
11171
11172 del_timer_sync(&bp->timer);
11173 bp->stats_state = STATS_STATE_DISABLED;
11174 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11175
11176 /* Release IRQs */
11177 bnx2x_free_irq(bp);
11178
11179 if (CHIP_IS_E1(bp)) {
11180 struct mac_configuration_cmd *config =
11181 bnx2x_sp(bp, mcast_config);
11182
8d9c5f34 11183 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11184 CAM_INVALIDATE(config->config_table[i]);
11185 }
11186
11187 /* Free SKBs, SGEs, TPA pool and driver internals */
11188 bnx2x_free_skbs(bp);
555f6c78 11189 for_each_rx_queue(bp, i)
f8ef6e44 11190 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11191 for_each_rx_queue(bp, i)
7cde1c8b 11192 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11193 bnx2x_free_mem(bp);
11194
11195 bp->state = BNX2X_STATE_CLOSED;
11196
11197 netif_carrier_off(bp->dev);
11198
11199 return 0;
11200}
11201
11202static void bnx2x_eeh_recover(struct bnx2x *bp)
11203{
11204 u32 val;
11205
11206 mutex_init(&bp->port.phy_mutex);
11207
11208 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11209 bp->link_params.shmem_base = bp->common.shmem_base;
11210 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11211
11212 if (!bp->common.shmem_base ||
11213 (bp->common.shmem_base < 0xA0000) ||
11214 (bp->common.shmem_base >= 0xC0000)) {
11215 BNX2X_DEV_INFO("MCP not active\n");
11216 bp->flags |= NO_MCP_FLAG;
11217 return;
11218 }
11219
11220 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11221 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11222 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11223 BNX2X_ERR("BAD MCP validity signature\n");
11224
11225 if (!BP_NOMCP(bp)) {
11226 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11227 & DRV_MSG_SEQ_NUMBER_MASK);
11228 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11229 }
11230}
11231
493adb1f
WX
11232/**
11233 * bnx2x_io_error_detected - called when PCI error is detected
11234 * @pdev: Pointer to PCI device
11235 * @state: The current pci connection state
11236 *
11237 * This function is called after a PCI bus error affecting
11238 * this device has been detected.
11239 */
11240static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11241 pci_channel_state_t state)
11242{
11243 struct net_device *dev = pci_get_drvdata(pdev);
11244 struct bnx2x *bp = netdev_priv(dev);
11245
11246 rtnl_lock();
11247
11248 netif_device_detach(dev);
11249
11250 if (netif_running(dev))
f8ef6e44 11251 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11252
11253 pci_disable_device(pdev);
11254
11255 rtnl_unlock();
11256
11257 /* Request a slot reset */
11258 return PCI_ERS_RESULT_NEED_RESET;
11259}
11260
11261/**
11262 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11263 * @pdev: Pointer to PCI device
11264 *
11265 * Restart the card from scratch, as if from a cold-boot.
11266 */
11267static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11268{
11269 struct net_device *dev = pci_get_drvdata(pdev);
11270 struct bnx2x *bp = netdev_priv(dev);
11271
11272 rtnl_lock();
11273
11274 if (pci_enable_device(pdev)) {
11275 dev_err(&pdev->dev,
11276 "Cannot re-enable PCI device after reset\n");
11277 rtnl_unlock();
11278 return PCI_ERS_RESULT_DISCONNECT;
11279 }
11280
11281 pci_set_master(pdev);
11282 pci_restore_state(pdev);
11283
11284 if (netif_running(dev))
11285 bnx2x_set_power_state(bp, PCI_D0);
11286
11287 rtnl_unlock();
11288
11289 return PCI_ERS_RESULT_RECOVERED;
11290}
11291
11292/**
11293 * bnx2x_io_resume - called when traffic can start flowing again
11294 * @pdev: Pointer to PCI device
11295 *
11296 * This callback is called when the error recovery driver tells us that
11297 * its OK to resume normal operation.
11298 */
11299static void bnx2x_io_resume(struct pci_dev *pdev)
11300{
11301 struct net_device *dev = pci_get_drvdata(pdev);
11302 struct bnx2x *bp = netdev_priv(dev);
11303
11304 rtnl_lock();
11305
f8ef6e44
YG
11306 bnx2x_eeh_recover(bp);
11307
493adb1f 11308 if (netif_running(dev))
f8ef6e44 11309 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11310
11311 netif_device_attach(dev);
11312
11313 rtnl_unlock();
11314}
11315
11316static struct pci_error_handlers bnx2x_err_handler = {
11317 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11318 .slot_reset = bnx2x_io_slot_reset,
11319 .resume = bnx2x_io_resume,
493adb1f
WX
11320};
11321
a2fbb9ea 11322static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11323 .name = DRV_MODULE_NAME,
11324 .id_table = bnx2x_pci_tbl,
11325 .probe = bnx2x_init_one,
11326 .remove = __devexit_p(bnx2x_remove_one),
11327 .suspend = bnx2x_suspend,
11328 .resume = bnx2x_resume,
11329 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11330};
11331
11332static int __init bnx2x_init(void)
11333{
1cf167f2
EG
11334 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11335 if (bnx2x_wq == NULL) {
11336 printk(KERN_ERR PFX "Cannot create workqueue\n");
11337 return -ENOMEM;
11338 }
11339
a2fbb9ea
ET
11340 return pci_register_driver(&bnx2x_pci_driver);
11341}
11342
11343static void __exit bnx2x_cleanup(void)
11344{
11345 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11346
11347 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11348}
11349
11350module_init(bnx2x_init);
11351module_exit(bnx2x_cleanup);
11352