]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Sparse endianity annotation
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
8d5726c4
EG
91
92static int mrrs = -1;
93module_param(mrrs, int, 0);
94MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
95
a2fbb9ea 96module_param(debug, int, 0);
19680c48 97MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 98MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 99MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 100
1cf167f2 101static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
102
103enum bnx2x_board_type {
104 BCM57710 = 0,
34f80b04
EG
105 BCM57711 = 1,
106 BCM57711E = 2,
a2fbb9ea
ET
107};
108
34f80b04 109/* indexed by board_type, above */
53a10565 110static struct {
a2fbb9ea
ET
111 char *name;
112} board_info[] __devinitdata = {
34f80b04
EG
113 { "Broadcom NetXtreme II BCM57710 XGb" },
114 { "Broadcom NetXtreme II BCM57711 XGb" },
115 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
116};
117
34f80b04 118
a2fbb9ea
ET
119static const struct pci_device_id bnx2x_pci_tbl[] = {
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
126 { 0 }
127};
128
129MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
130
131/****************************************************************************
132* General service functions
133****************************************************************************/
134
135/* used only at init
136 * locking is done by mcp
137 */
138static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
139{
140 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
141 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
142 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
143 PCICFG_VENDOR_ID_OFFSET);
144}
145
a2fbb9ea
ET
146static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147{
148 u32 val;
149
150 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
151 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
152 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
153 PCICFG_VENDOR_ID_OFFSET);
154
155 return val;
156}
a2fbb9ea
ET
157
158static const u32 dmae_reg_go_c[] = {
159 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
160 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
161 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
162 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
163};
164
165/* copy command into DMAE command memory and set DMAE command go */
166static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 int idx)
168{
169 u32 cmd_offset;
170 int i;
171
172 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
173 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
174 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
175
ad8d3948
EG
176 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
177 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
178 }
179 REG_WR(bp, dmae_reg_go_c[idx], 1);
180}
181
ad8d3948
EG
182void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
183 u32 len32)
a2fbb9ea 184{
ad8d3948 185 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 186 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
187 int cnt = 200;
188
189 if (!bp->dmae_ready) {
190 u32 *data = bnx2x_sp(bp, wb_data[0]);
191
192 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
193 " using indirect\n", dst_addr, len32);
194 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 return;
196 }
197
198 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
199
200 memset(dmae, 0, sizeof(struct dmae_command));
201
202 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
203 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
205#ifdef __BIG_ENDIAN
206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
207#else
208 DMAE_CMD_ENDIANITY_DW_SWAP |
209#endif
34f80b04
EG
210 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
211 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
212 dmae->src_addr_lo = U64_LO(dma_addr);
213 dmae->src_addr_hi = U64_HI(dma_addr);
214 dmae->dst_addr_lo = dst_addr >> 2;
215 dmae->dst_addr_hi = 0;
216 dmae->len = len32;
217 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
218 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 219 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 220
ad8d3948 221 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
222 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
223 "dst_addr [%x:%08x (%08x)]\n"
224 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
225 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
226 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
227 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 228 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
229 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
230 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
231
232 *wb_comp = 0;
233
34f80b04 234 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
235
236 udelay(5);
ad8d3948
EG
237
238 while (*wb_comp != DMAE_COMP_VAL) {
239 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
240
ad8d3948 241 if (!cnt) {
a2fbb9ea
ET
242 BNX2X_ERR("dmae timeout!\n");
243 break;
244 }
ad8d3948 245 cnt--;
12469401
YG
246 /* adjust delay for emulation/FPGA */
247 if (CHIP_REV_IS_SLOW(bp))
248 msleep(100);
249 else
250 udelay(5);
a2fbb9ea 251 }
ad8d3948
EG
252
253 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
254}
255
c18487ee 256void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 257{
ad8d3948 258 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 259 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
260 int cnt = 200;
261
262 if (!bp->dmae_ready) {
263 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 int i;
265
266 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
267 " using indirect\n", src_addr, len32);
268 for (i = 0; i < len32; i++)
269 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 return;
271 }
272
273 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
274
275 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
276 memset(dmae, 0, sizeof(struct dmae_command));
277
278 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
279 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
280 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
281#ifdef __BIG_ENDIAN
282 DMAE_CMD_ENDIANITY_B_DW_SWAP |
283#else
284 DMAE_CMD_ENDIANITY_DW_SWAP |
285#endif
34f80b04
EG
286 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
287 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
288 dmae->src_addr_lo = src_addr >> 2;
289 dmae->src_addr_hi = 0;
290 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
292 dmae->len = len32;
293 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
294 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 295 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 296
ad8d3948 297 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
298 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
299 "dst_addr [%x:%08x (%08x)]\n"
300 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
301 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
302 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
303 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
304
305 *wb_comp = 0;
306
34f80b04 307 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
308
309 udelay(5);
ad8d3948
EG
310
311 while (*wb_comp != DMAE_COMP_VAL) {
312
ad8d3948 313 if (!cnt) {
a2fbb9ea
ET
314 BNX2X_ERR("dmae timeout!\n");
315 break;
316 }
ad8d3948 317 cnt--;
12469401
YG
318 /* adjust delay for emulation/FPGA */
319 if (CHIP_REV_IS_SLOW(bp))
320 msleep(100);
321 else
322 udelay(5);
a2fbb9ea 323 }
ad8d3948 324 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
325 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
326 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
327
328 mutex_unlock(&bp->dmae_mutex);
329}
330
331/* used only for slowpath so not inlined */
332static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333{
334 u32 wb_write[2];
335
336 wb_write[0] = val_hi;
337 wb_write[1] = val_lo;
338 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 339}
a2fbb9ea 340
ad8d3948
EG
341#ifdef USE_WB_RD
342static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343{
344 u32 wb_data[2];
345
346 REG_RD_DMAE(bp, reg, wb_data, 2);
347
348 return HILO_U64(wb_data[0], wb_data[1]);
349}
350#endif
351
a2fbb9ea
ET
352static int bnx2x_mc_assert(struct bnx2x *bp)
353{
a2fbb9ea 354 char last_idx;
34f80b04
EG
355 int i, rc = 0;
356 u32 row0, row1, row2, row3;
357
358 /* XSTORM */
359 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
360 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 if (last_idx)
362 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363
364 /* print the asserts */
365 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366
367 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i));
369 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
371 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
372 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
373 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375
376 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
377 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
378 " 0x%08x 0x%08x 0x%08x\n",
379 i, row3, row2, row1, row0);
380 rc++;
381 } else {
382 break;
383 }
384 }
385
386 /* TSTORM */
387 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
388 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 if (last_idx)
390 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391
392 /* print the asserts */
393 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394
395 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i));
397 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
399 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
400 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
401 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403
404 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
405 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
406 " 0x%08x 0x%08x 0x%08x\n",
407 i, row3, row2, row1, row0);
408 rc++;
409 } else {
410 break;
411 }
412 }
413
414 /* CSTORM */
415 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
416 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 if (last_idx)
418 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419
420 /* print the asserts */
421 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422
423 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i));
425 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
427 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
428 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
429 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431
432 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
433 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
434 " 0x%08x 0x%08x 0x%08x\n",
435 i, row3, row2, row1, row0);
436 rc++;
437 } else {
438 break;
439 }
440 }
441
442 /* USTORM */
443 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
444 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 if (last_idx)
446 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447
448 /* print the asserts */
449 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450
451 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i));
453 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 4);
455 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
456 USTORM_ASSERT_LIST_OFFSET(i) + 8);
457 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459
460 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
461 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
462 " 0x%08x 0x%08x 0x%08x\n",
463 i, row3, row2, row1, row0);
464 rc++;
465 } else {
466 break;
a2fbb9ea
ET
467 }
468 }
34f80b04 469
a2fbb9ea
ET
470 return rc;
471}
c14423fe 472
a2fbb9ea
ET
473static void bnx2x_fw_dump(struct bnx2x *bp)
474{
475 u32 mark, offset;
4781bfad 476 __be32 data[9];
a2fbb9ea
ET
477 int word;
478
479 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
480 mark = ((mark + 0x3) & ~0x3);
481 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
482
483 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
484 for (word = 0; word < 8; word++)
485 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
486 offset + 4*word));
487 data[8] = 0x0;
49d66772 488 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
489 }
490 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
491 for (word = 0; word < 8; word++)
492 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
493 offset + 4*word));
494 data[8] = 0x0;
49d66772 495 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
496 }
497 printk("\n" KERN_ERR PFX "end of fw dump\n");
498}
499
500static void bnx2x_panic_dump(struct bnx2x *bp)
501{
502 int i;
503 u16 j, start, end;
504
66e855f3
YG
505 bp->stats_state = STATS_STATE_DISABLED;
506 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507
a2fbb9ea
ET
508 BNX2X_ERR("begin crash dump -----------------\n");
509
510 for_each_queue(bp, i) {
511 struct bnx2x_fastpath *fp = &bp->fp[i];
512 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
513
514 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 515 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 516 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 517 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
518 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
519 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
520 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
521 fp->rx_bd_prod, fp->rx_bd_cons,
522 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
523 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
524 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
525 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
526 " *sb_u_idx(%x) bd data(%x,%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
528 fp->status_blk->c_status_block.status_block_index,
529 fp->fp_u_idx,
530 fp->status_blk->u_status_block.status_block_index,
531 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
532
533 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
534 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
535 for (j = start; j < end; j++) {
536 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
537
538 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
539 sw_bd->skb, sw_bd->first_bd);
540 }
541
542 start = TX_BD(fp->tx_bd_cons - 10);
543 end = TX_BD(fp->tx_bd_cons + 254);
544 for (j = start; j < end; j++) {
545 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
546
547 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
548 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
549 }
550
551 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
552 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
553 for (j = start; j < end; j++) {
554 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
555 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
556
557 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 558 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
559 }
560
3196a88a
EG
561 start = RX_SGE(fp->rx_sge_prod);
562 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
563 for (j = start; j < end; j++) {
564 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
565 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
566
567 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
568 j, rx_sge[1], rx_sge[0], sw_page->page);
569 }
570
a2fbb9ea
ET
571 start = RCQ_BD(fp->rx_comp_cons - 10);
572 end = RCQ_BD(fp->rx_comp_cons + 503);
573 for (j = start; j < end; j++) {
574 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
575
576 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
577 j, cqe[0], cqe[1], cqe[2], cqe[3]);
578 }
579 }
580
49d66772
ET
581 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
582 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 583 " spq_prod_idx(%u)\n",
49d66772 584 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
585 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
586
34f80b04 587 bnx2x_fw_dump(bp);
a2fbb9ea
ET
588 bnx2x_mc_assert(bp);
589 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
590}
591
615f8fd9 592static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 593{
34f80b04 594 int port = BP_PORT(bp);
a2fbb9ea
ET
595 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
596 u32 val = REG_RD(bp, addr);
597 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 598 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
599
600 if (msix) {
8badd27a
EG
601 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
603 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
605 } else if (msi) {
606 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
607 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
608 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
610 } else {
611 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 612 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
613 HC_CONFIG_0_REG_INT_LINE_EN_0 |
614 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 615
8badd27a
EG
616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
617 val, port, addr);
615f8fd9
ET
618
619 REG_WR(bp, addr, val);
620
a2fbb9ea
ET
621 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
622 }
623
8badd27a
EG
624 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
625 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
626
627 REG_WR(bp, addr, val);
34f80b04
EG
628
629 if (CHIP_IS_E1H(bp)) {
630 /* init leading/trailing edge */
631 if (IS_E1HMF(bp)) {
8badd27a 632 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 633 if (bp->port.pmf)
4acac6a5
EG
634 /* enable nig and gpio3 attention */
635 val |= 0x1100;
34f80b04
EG
636 } else
637 val = 0xffff;
638
639 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
640 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
641 }
a2fbb9ea
ET
642}
643
615f8fd9 644static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 645{
34f80b04 646 int port = BP_PORT(bp);
a2fbb9ea
ET
647 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
648 u32 val = REG_RD(bp, addr);
649
650 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
651 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
652 HC_CONFIG_0_REG_INT_LINE_EN_0 |
653 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
654
655 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
656 val, port, addr);
657
8badd27a
EG
658 /* flush all outstanding writes */
659 mmiowb();
660
a2fbb9ea
ET
661 REG_WR(bp, addr, val);
662 if (REG_RD(bp, addr) != val)
663 BNX2X_ERR("BUG! proper val not read from IGU!\n");
664}
665
f8ef6e44 666static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 667{
a2fbb9ea 668 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 669 int i, offset;
a2fbb9ea 670
34f80b04 671 /* disable interrupt handling */
a2fbb9ea 672 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
673 if (disable_hw)
674 /* prevent the HW from sending interrupts */
675 bnx2x_int_disable(bp);
a2fbb9ea
ET
676
677 /* make sure all ISRs are done */
678 if (msix) {
8badd27a
EG
679 synchronize_irq(bp->msix_table[0].vector);
680 offset = 1;
a2fbb9ea 681 for_each_queue(bp, i)
8badd27a 682 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
683 } else
684 synchronize_irq(bp->pdev->irq);
685
686 /* make sure sp_task is not running */
1cf167f2
EG
687 cancel_delayed_work(&bp->sp_task);
688 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
689}
690
34f80b04 691/* fast path */
a2fbb9ea
ET
692
693/*
34f80b04 694 * General service functions
a2fbb9ea
ET
695 */
696
34f80b04 697static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
698 u8 storm, u16 index, u8 op, u8 update)
699{
5c862848
EG
700 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
701 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
702 struct igu_ack_register igu_ack;
703
704 igu_ack.status_block_index = index;
705 igu_ack.sb_id_and_flags =
34f80b04 706 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
707 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
708 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
709 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
710
5c862848
EG
711 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
712 (*(u32 *)&igu_ack), hc_addr);
713 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
714}
715
716static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
717{
718 struct host_status_block *fpsb = fp->status_blk;
719 u16 rc = 0;
720
721 barrier(); /* status block is written to by the chip */
722 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
723 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
724 rc |= 1;
725 }
726 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
727 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
728 rc |= 2;
729 }
730 return rc;
731}
732
a2fbb9ea
ET
733static u16 bnx2x_ack_int(struct bnx2x *bp)
734{
5c862848
EG
735 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
736 COMMAND_REG_SIMD_MASK);
737 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 738
5c862848
EG
739 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
740 result, hc_addr);
a2fbb9ea 741
a2fbb9ea
ET
742 return result;
743}
744
745
746/*
747 * fast path service functions
748 */
749
237907c1
EG
750static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
751{
752 u16 tx_cons_sb;
753
754 /* Tell compiler that status block fields can change */
755 barrier();
756 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
757 return (fp->tx_pkt_cons != tx_cons_sb);
758}
759
760static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
761{
762 /* Tell compiler that consumer and producer can change */
763 barrier();
764 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
765
237907c1
EG
766}
767
a2fbb9ea
ET
768/* free skb in the packet ring at pos idx
769 * return idx of last bd freed
770 */
771static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
772 u16 idx)
773{
774 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
775 struct eth_tx_bd *tx_bd;
776 struct sk_buff *skb = tx_buf->skb;
34f80b04 777 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
778 int nbd;
779
780 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
781 idx, tx_buf, skb);
782
783 /* unmap first bd */
784 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
785 tx_bd = &fp->tx_desc_ring[bd_idx];
786 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
787 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
788
789 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 790 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
791#ifdef BNX2X_STOP_ON_ERROR
792 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 793 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
794 bnx2x_panic();
795 }
796#endif
797
798 /* Skip a parse bd and the TSO split header bd
799 since they have no mapping */
800 if (nbd)
801 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
802
803 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
804 ETH_TX_BD_FLAGS_TCP_CSUM |
805 ETH_TX_BD_FLAGS_SW_LSO)) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 tx_bd = &fp->tx_desc_ring[bd_idx];
809 /* is this a TSO split header bd? */
810 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
811 if (--nbd)
812 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
813 }
814 }
815
816 /* now free frags */
817 while (nbd > 0) {
818
819 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
820 tx_bd = &fp->tx_desc_ring[bd_idx];
821 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
822 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
823 if (--nbd)
824 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
825 }
826
827 /* release skb */
53e5e96e 828 WARN_ON(!skb);
a2fbb9ea
ET
829 dev_kfree_skb(skb);
830 tx_buf->first_bd = 0;
831 tx_buf->skb = NULL;
832
34f80b04 833 return new_cons;
a2fbb9ea
ET
834}
835
34f80b04 836static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 837{
34f80b04
EG
838 s16 used;
839 u16 prod;
840 u16 cons;
a2fbb9ea 841
34f80b04 842 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
843 prod = fp->tx_bd_prod;
844 cons = fp->tx_bd_cons;
845
34f80b04
EG
846 /* NUM_TX_RINGS = number of "next-page" entries
847 It will be used as a threshold */
848 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 849
34f80b04 850#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
851 WARN_ON(used < 0);
852 WARN_ON(used > fp->bp->tx_ring_size);
853 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 854#endif
a2fbb9ea 855
34f80b04 856 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
857}
858
859static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
860{
861 struct bnx2x *bp = fp->bp;
555f6c78 862 struct netdev_queue *txq;
a2fbb9ea
ET
863 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
864 int done = 0;
865
866#ifdef BNX2X_STOP_ON_ERROR
867 if (unlikely(bp->panic))
868 return;
869#endif
870
555f6c78 871 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
872 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
873 sw_cons = fp->tx_pkt_cons;
874
875 while (sw_cons != hw_cons) {
876 u16 pkt_cons;
877
878 pkt_cons = TX_BD(sw_cons);
879
880 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
881
34f80b04 882 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
883 hw_cons, sw_cons, pkt_cons);
884
34f80b04 885/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
886 rmb();
887 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
888 }
889*/
890 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
891 sw_cons++;
892 done++;
893
894 if (done == work)
895 break;
896 }
897
898 fp->tx_pkt_cons = sw_cons;
899 fp->tx_bd_cons = bd_cons;
900
555f6c78
EG
901 /* Need to make the tx_bd_cons update visible to start_xmit()
902 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
903 * memory barrier, there is a small possibility that start_xmit()
904 * will miss it and cause the queue to be stopped forever.
905 */
906 smp_mb();
907
908 /* TBD need a thresh? */
555f6c78 909 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 910
555f6c78 911 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 912
555f6c78 913 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 914 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 915 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 916 netif_tx_wake_queue(txq);
a2fbb9ea 917
555f6c78 918 __netif_tx_unlock(txq);
a2fbb9ea
ET
919 }
920}
921
3196a88a 922
a2fbb9ea
ET
923static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
924 union eth_rx_cqe *rr_cqe)
925{
926 struct bnx2x *bp = fp->bp;
927 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
928 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
929
34f80b04 930 DP(BNX2X_MSG_SP,
a2fbb9ea 931 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 932 fp->index, cid, command, bp->state,
34f80b04 933 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
934
935 bp->spq_left++;
936
0626b899 937 if (fp->index) {
a2fbb9ea
ET
938 switch (command | fp->state) {
939 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
940 BNX2X_FP_STATE_OPENING):
941 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
942 cid);
943 fp->state = BNX2X_FP_STATE_OPEN;
944 break;
945
946 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
947 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
948 cid);
949 fp->state = BNX2X_FP_STATE_HALTED;
950 break;
951
952 default:
34f80b04
EG
953 BNX2X_ERR("unexpected MC reply (%d) "
954 "fp->state is %x\n", command, fp->state);
955 break;
a2fbb9ea 956 }
34f80b04 957 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
958 return;
959 }
c14423fe 960
a2fbb9ea
ET
961 switch (command | bp->state) {
962 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
963 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
964 bp->state = BNX2X_STATE_OPEN;
965 break;
966
967 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
968 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
969 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
970 fp->state = BNX2X_FP_STATE_HALTED;
971 break;
972
a2fbb9ea 973 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 974 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 975 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
976 break;
977
3196a88a 978
a2fbb9ea 979 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 981 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 982 bp->set_mac_pending = 0;
a2fbb9ea
ET
983 break;
984
49d66772 985 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 986 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
987 break;
988
a2fbb9ea 989 default:
34f80b04 990 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 991 command, bp->state);
34f80b04 992 break;
a2fbb9ea 993 }
34f80b04 994 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
995}
996
7a9b2557
VZ
997static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
998 struct bnx2x_fastpath *fp, u16 index)
999{
1000 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1001 struct page *page = sw_buf->page;
1002 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1003
1004 /* Skip "next page" elements */
1005 if (!page)
1006 return;
1007
1008 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1009 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1010 __free_pages(page, PAGES_PER_SGE_SHIFT);
1011
1012 sw_buf->page = NULL;
1013 sge->addr_hi = 0;
1014 sge->addr_lo = 0;
1015}
1016
1017static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1018 struct bnx2x_fastpath *fp, int last)
1019{
1020 int i;
1021
1022 for (i = 0; i < last; i++)
1023 bnx2x_free_rx_sge(bp, fp, i);
1024}
1025
1026static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1027 struct bnx2x_fastpath *fp, u16 index)
1028{
1029 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1030 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1031 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1032 dma_addr_t mapping;
1033
1034 if (unlikely(page == NULL))
1035 return -ENOMEM;
1036
4f40f2cb 1037 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1038 PCI_DMA_FROMDEVICE);
8d8bb39b 1039 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1040 __free_pages(page, PAGES_PER_SGE_SHIFT);
1041 return -ENOMEM;
1042 }
1043
1044 sw_buf->page = page;
1045 pci_unmap_addr_set(sw_buf, mapping, mapping);
1046
1047 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1048 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1049
1050 return 0;
1051}
1052
a2fbb9ea
ET
1053static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1054 struct bnx2x_fastpath *fp, u16 index)
1055{
1056 struct sk_buff *skb;
1057 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1058 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1059 dma_addr_t mapping;
1060
1061 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1062 if (unlikely(skb == NULL))
1063 return -ENOMEM;
1064
437cf2f1 1065 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1066 PCI_DMA_FROMDEVICE);
8d8bb39b 1067 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1068 dev_kfree_skb(skb);
1069 return -ENOMEM;
1070 }
1071
1072 rx_buf->skb = skb;
1073 pci_unmap_addr_set(rx_buf, mapping, mapping);
1074
1075 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1076 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1077
1078 return 0;
1079}
1080
1081/* note that we are not allocating a new skb,
1082 * we are just moving one from cons to prod
1083 * we are not creating a new mapping,
1084 * so there is no need to check for dma_mapping_error().
1085 */
1086static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1087 struct sk_buff *skb, u16 cons, u16 prod)
1088{
1089 struct bnx2x *bp = fp->bp;
1090 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1091 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1092 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1093 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1094
1095 pci_dma_sync_single_for_device(bp->pdev,
1096 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1097 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1098
1099 prod_rx_buf->skb = cons_rx_buf->skb;
1100 pci_unmap_addr_set(prod_rx_buf, mapping,
1101 pci_unmap_addr(cons_rx_buf, mapping));
1102 *prod_bd = *cons_bd;
1103}
1104
7a9b2557
VZ
1105static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1106 u16 idx)
1107{
1108 u16 last_max = fp->last_max_sge;
1109
1110 if (SUB_S16(idx, last_max) > 0)
1111 fp->last_max_sge = idx;
1112}
1113
1114static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1115{
1116 int i, j;
1117
1118 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1119 int idx = RX_SGE_CNT * i - 1;
1120
1121 for (j = 0; j < 2; j++) {
1122 SGE_MASK_CLEAR_BIT(fp, idx);
1123 idx--;
1124 }
1125 }
1126}
1127
1128static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1129 struct eth_fast_path_rx_cqe *fp_cqe)
1130{
1131 struct bnx2x *bp = fp->bp;
4f40f2cb 1132 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1133 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1134 SGE_PAGE_SHIFT;
7a9b2557
VZ
1135 u16 last_max, last_elem, first_elem;
1136 u16 delta = 0;
1137 u16 i;
1138
1139 if (!sge_len)
1140 return;
1141
1142 /* First mark all used pages */
1143 for (i = 0; i < sge_len; i++)
1144 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1145
1146 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1147 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149 /* Here we assume that the last SGE index is the biggest */
1150 prefetch((void *)(fp->sge_mask));
1151 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1152
1153 last_max = RX_SGE(fp->last_max_sge);
1154 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1155 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1156
1157 /* If ring is not full */
1158 if (last_elem + 1 != first_elem)
1159 last_elem++;
1160
1161 /* Now update the prod */
1162 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1163 if (likely(fp->sge_mask[i]))
1164 break;
1165
1166 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1167 delta += RX_SGE_MASK_ELEM_SZ;
1168 }
1169
1170 if (delta > 0) {
1171 fp->rx_sge_prod += delta;
1172 /* clear page-end entries */
1173 bnx2x_clear_sge_mask_next_elems(fp);
1174 }
1175
1176 DP(NETIF_MSG_RX_STATUS,
1177 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1178 fp->last_max_sge, fp->rx_sge_prod);
1179}
1180
1181static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1182{
1183 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1184 memset(fp->sge_mask, 0xff,
1185 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1186
33471629
EG
1187 /* Clear the two last indices in the page to 1:
1188 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1189 hence will never be indicated and should be removed from
1190 the calculations. */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1192}
1193
1194static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1195 struct sk_buff *skb, u16 cons, u16 prod)
1196{
1197 struct bnx2x *bp = fp->bp;
1198 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1199 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1200 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1201 dma_addr_t mapping;
1202
1203 /* move empty skb from pool to prod and map it */
1204 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1205 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1206 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1207 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1208
1209 /* move partial skb from cons to pool (don't unmap yet) */
1210 fp->tpa_pool[queue] = *cons_rx_buf;
1211
1212 /* mark bin state as start - print error if current state != stop */
1213 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1214 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1215
1216 fp->tpa_state[queue] = BNX2X_TPA_START;
1217
1218 /* point prod_bd to new skb */
1219 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1220 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1221
1222#ifdef BNX2X_STOP_ON_ERROR
1223 fp->tpa_queue_used |= (1 << queue);
1224#ifdef __powerpc64__
1225 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1226#else
1227 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1228#endif
1229 fp->tpa_queue_used);
1230#endif
1231}
1232
1233static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1234 struct sk_buff *skb,
1235 struct eth_fast_path_rx_cqe *fp_cqe,
1236 u16 cqe_idx)
1237{
1238 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1239 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1240 u32 i, frag_len, frag_size, pages;
1241 int err;
1242 int j;
1243
1244 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1245 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1246
1247 /* This is needed in order to enable forwarding support */
1248 if (frag_size)
4f40f2cb 1249 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1250 max(frag_size, (u32)len_on_bd));
1251
1252#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1253 if (pages >
1254 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1255 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1256 pages, cqe_idx);
1257 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1258 fp_cqe->pkt_len, len_on_bd);
1259 bnx2x_panic();
1260 return -EINVAL;
1261 }
1262#endif
1263
1264 /* Run through the SGL and compose the fragmented skb */
1265 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1266 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1267
1268 /* FW gives the indices of the SGE as if the ring is an array
1269 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1270 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1271 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1272 old_rx_pg = *rx_pg;
1273
1274 /* If we fail to allocate a substitute page, we simply stop
1275 where we are and drop the whole packet */
1276 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1277 if (unlikely(err)) {
de832a55 1278 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1279 return err;
1280 }
1281
1282 /* Unmap the page as we r going to pass it to the stack */
1283 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1284 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1285
1286 /* Add one frag and update the appropriate fields in the skb */
1287 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1288
1289 skb->data_len += frag_len;
1290 skb->truesize += frag_len;
1291 skb->len += frag_len;
1292
1293 frag_size -= frag_len;
1294 }
1295
1296 return 0;
1297}
1298
1299static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1300 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1301 u16 cqe_idx)
1302{
1303 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1304 struct sk_buff *skb = rx_buf->skb;
1305 /* alloc new skb */
1306 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1307
1308 /* Unmap skb in the pool anyway, as we are going to change
1309 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1310 fails. */
1311 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1312 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1313
7a9b2557 1314 if (likely(new_skb)) {
66e855f3
YG
1315 /* fix ip xsum and give it to the stack */
1316 /* (no need to map the new skb) */
0c6671b0
EG
1317#ifdef BCM_VLAN
1318 int is_vlan_cqe =
1319 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1320 PARSING_FLAGS_VLAN);
1321 int is_not_hwaccel_vlan_cqe =
1322 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1323#endif
7a9b2557
VZ
1324
1325 prefetch(skb);
1326 prefetch(((char *)(skb)) + 128);
1327
7a9b2557
VZ
1328#ifdef BNX2X_STOP_ON_ERROR
1329 if (pad + len > bp->rx_buf_size) {
1330 BNX2X_ERR("skb_put is about to fail... "
1331 "pad %d len %d rx_buf_size %d\n",
1332 pad, len, bp->rx_buf_size);
1333 bnx2x_panic();
1334 return;
1335 }
1336#endif
1337
1338 skb_reserve(skb, pad);
1339 skb_put(skb, len);
1340
1341 skb->protocol = eth_type_trans(skb, bp->dev);
1342 skb->ip_summed = CHECKSUM_UNNECESSARY;
1343
1344 {
1345 struct iphdr *iph;
1346
1347 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1348#ifdef BCM_VLAN
1349 /* If there is no Rx VLAN offloading -
1350 take VLAN tag into an account */
1351 if (unlikely(is_not_hwaccel_vlan_cqe))
1352 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1353#endif
7a9b2557
VZ
1354 iph->check = 0;
1355 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1356 }
1357
1358 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1359 &cqe->fast_path_cqe, cqe_idx)) {
1360#ifdef BCM_VLAN
0c6671b0
EG
1361 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1362 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1363 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1364 le16_to_cpu(cqe->fast_path_cqe.
1365 vlan_tag));
1366 else
1367#endif
1368 netif_receive_skb(skb);
1369 } else {
1370 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1371 " - dropping packet!\n");
1372 dev_kfree_skb(skb);
1373 }
1374
7a9b2557
VZ
1375
1376 /* put new skb in bin */
1377 fp->tpa_pool[queue].skb = new_skb;
1378
1379 } else {
66e855f3 1380 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1381 DP(NETIF_MSG_RX_STATUS,
1382 "Failed to allocate new skb - dropping packet!\n");
de832a55 1383 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1384 }
1385
1386 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1387}
1388
1389static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1390 struct bnx2x_fastpath *fp,
1391 u16 bd_prod, u16 rx_comp_prod,
1392 u16 rx_sge_prod)
1393{
8d9c5f34 1394 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1395 int i;
1396
1397 /* Update producers */
1398 rx_prods.bd_prod = bd_prod;
1399 rx_prods.cqe_prod = rx_comp_prod;
1400 rx_prods.sge_prod = rx_sge_prod;
1401
58f4c4cf
EG
1402 /*
1403 * Make sure that the BD and SGE data is updated before updating the
1404 * producers since FW might read the BD/SGE right after the producer
1405 * is updated.
1406 * This is only applicable for weak-ordered memory model archs such
1407 * as IA-64. The following barrier is also mandatory since FW will
1408 * assumes BDs must have buffers.
1409 */
1410 wmb();
1411
8d9c5f34
EG
1412 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1413 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1414 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1415 ((u32 *)&rx_prods)[i]);
1416
58f4c4cf
EG
1417 mmiowb(); /* keep prod updates ordered */
1418
7a9b2557 1419 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1420 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1421 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1422}
1423
a2fbb9ea
ET
1424static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1425{
1426 struct bnx2x *bp = fp->bp;
34f80b04 1427 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1428 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1429 int rx_pkt = 0;
1430
1431#ifdef BNX2X_STOP_ON_ERROR
1432 if (unlikely(bp->panic))
1433 return 0;
1434#endif
1435
34f80b04
EG
1436 /* CQ "next element" is of the size of the regular element,
1437 that's why it's ok here */
a2fbb9ea
ET
1438 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1439 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1440 hw_comp_cons++;
1441
1442 bd_cons = fp->rx_bd_cons;
1443 bd_prod = fp->rx_bd_prod;
34f80b04 1444 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1445 sw_comp_cons = fp->rx_comp_cons;
1446 sw_comp_prod = fp->rx_comp_prod;
1447
1448 /* Memory barrier necessary as speculative reads of the rx
1449 * buffer can be ahead of the index in the status block
1450 */
1451 rmb();
1452
1453 DP(NETIF_MSG_RX_STATUS,
1454 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1455 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1456
1457 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1458 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1459 struct sk_buff *skb;
1460 union eth_rx_cqe *cqe;
34f80b04
EG
1461 u8 cqe_fp_flags;
1462 u16 len, pad;
a2fbb9ea
ET
1463
1464 comp_ring_cons = RCQ_BD(sw_comp_cons);
1465 bd_prod = RX_BD(bd_prod);
1466 bd_cons = RX_BD(bd_cons);
1467
1468 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1469 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1470
a2fbb9ea 1471 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1472 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1473 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1474 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1475 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1476 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1477
1478 /* is this a slowpath msg? */
34f80b04 1479 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1480 bnx2x_sp_event(fp, cqe);
1481 goto next_cqe;
1482
1483 /* this is an rx packet */
1484 } else {
1485 rx_buf = &fp->rx_buf_ring[bd_cons];
1486 skb = rx_buf->skb;
a2fbb9ea
ET
1487 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1488 pad = cqe->fast_path_cqe.placement_offset;
1489
7a9b2557
VZ
1490 /* If CQE is marked both TPA_START and TPA_END
1491 it is a non-TPA CQE */
1492 if ((!fp->disable_tpa) &&
1493 (TPA_TYPE(cqe_fp_flags) !=
1494 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1495 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1496
1497 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1498 DP(NETIF_MSG_RX_STATUS,
1499 "calling tpa_start on queue %d\n",
1500 queue);
1501
1502 bnx2x_tpa_start(fp, queue, skb,
1503 bd_cons, bd_prod);
1504 goto next_rx;
1505 }
1506
1507 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1508 DP(NETIF_MSG_RX_STATUS,
1509 "calling tpa_stop on queue %d\n",
1510 queue);
1511
1512 if (!BNX2X_RX_SUM_FIX(cqe))
1513 BNX2X_ERR("STOP on none TCP "
1514 "data\n");
1515
1516 /* This is a size of the linear data
1517 on this skb */
1518 len = le16_to_cpu(cqe->fast_path_cqe.
1519 len_on_bd);
1520 bnx2x_tpa_stop(bp, fp, queue, pad,
1521 len, cqe, comp_ring_cons);
1522#ifdef BNX2X_STOP_ON_ERROR
1523 if (bp->panic)
1524 return -EINVAL;
1525#endif
1526
1527 bnx2x_update_sge_prod(fp,
1528 &cqe->fast_path_cqe);
1529 goto next_cqe;
1530 }
1531 }
1532
a2fbb9ea
ET
1533 pci_dma_sync_single_for_device(bp->pdev,
1534 pci_unmap_addr(rx_buf, mapping),
1535 pad + RX_COPY_THRESH,
1536 PCI_DMA_FROMDEVICE);
1537 prefetch(skb);
1538 prefetch(((char *)(skb)) + 128);
1539
1540 /* is this an error packet? */
34f80b04 1541 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1542 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1543 "ERROR flags %x rx packet %u\n",
1544 cqe_fp_flags, sw_comp_cons);
de832a55 1545 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1546 goto reuse_rx;
1547 }
1548
1549 /* Since we don't have a jumbo ring
1550 * copy small packets if mtu > 1500
1551 */
1552 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1553 (len <= RX_COPY_THRESH)) {
1554 struct sk_buff *new_skb;
1555
1556 new_skb = netdev_alloc_skb(bp->dev,
1557 len + pad);
1558 if (new_skb == NULL) {
1559 DP(NETIF_MSG_RX_ERR,
34f80b04 1560 "ERROR packet dropped "
a2fbb9ea 1561 "because of alloc failure\n");
de832a55 1562 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1563 goto reuse_rx;
1564 }
1565
1566 /* aligned copy */
1567 skb_copy_from_linear_data_offset(skb, pad,
1568 new_skb->data + pad, len);
1569 skb_reserve(new_skb, pad);
1570 skb_put(new_skb, len);
1571
1572 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1573
1574 skb = new_skb;
1575
1576 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1577 pci_unmap_single(bp->pdev,
1578 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1579 bp->rx_buf_size,
a2fbb9ea
ET
1580 PCI_DMA_FROMDEVICE);
1581 skb_reserve(skb, pad);
1582 skb_put(skb, len);
1583
1584 } else {
1585 DP(NETIF_MSG_RX_ERR,
34f80b04 1586 "ERROR packet dropped because "
a2fbb9ea 1587 "of alloc failure\n");
de832a55 1588 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1589reuse_rx:
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591 goto next_rx;
1592 }
1593
1594 skb->protocol = eth_type_trans(skb, bp->dev);
1595
1596 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1597 if (bp->rx_csum) {
1adcd8be
EG
1598 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1599 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1600 else
de832a55 1601 fp->eth_q_stats.hw_csum_err++;
66e855f3 1602 }
a2fbb9ea
ET
1603 }
1604
748e5439 1605 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1606#ifdef BCM_VLAN
0c6671b0 1607 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1608 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1609 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1610 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1611 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1612 else
1613#endif
34f80b04 1614 netif_receive_skb(skb);
a2fbb9ea 1615
a2fbb9ea
ET
1616
1617next_rx:
1618 rx_buf->skb = NULL;
1619
1620 bd_cons = NEXT_RX_IDX(bd_cons);
1621 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1622 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1623 rx_pkt++;
a2fbb9ea
ET
1624next_cqe:
1625 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1626 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1627
34f80b04 1628 if (rx_pkt == budget)
a2fbb9ea
ET
1629 break;
1630 } /* while */
1631
1632 fp->rx_bd_cons = bd_cons;
34f80b04 1633 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1634 fp->rx_comp_cons = sw_comp_cons;
1635 fp->rx_comp_prod = sw_comp_prod;
1636
7a9b2557
VZ
1637 /* Update producers */
1638 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1639 fp->rx_sge_prod);
a2fbb9ea
ET
1640
1641 fp->rx_pkt += rx_pkt;
1642 fp->rx_calls++;
1643
1644 return rx_pkt;
1645}
1646
1647static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1648{
1649 struct bnx2x_fastpath *fp = fp_cookie;
1650 struct bnx2x *bp = fp->bp;
0626b899 1651 int index = fp->index;
a2fbb9ea 1652
da5a662a
VZ
1653 /* Return here if interrupt is disabled */
1654 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1655 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1656 return IRQ_HANDLED;
1657 }
1658
34f80b04 1659 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1660 index, fp->sb_id);
1661 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1662
1663#ifdef BNX2X_STOP_ON_ERROR
1664 if (unlikely(bp->panic))
1665 return IRQ_HANDLED;
1666#endif
1667
1668 prefetch(fp->rx_cons_sb);
1669 prefetch(fp->tx_cons_sb);
1670 prefetch(&fp->status_blk->c_status_block.status_block_index);
1671 prefetch(&fp->status_blk->u_status_block.status_block_index);
1672
288379f0 1673 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1674
a2fbb9ea
ET
1675 return IRQ_HANDLED;
1676}
1677
1678static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1679{
555f6c78 1680 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1681 u16 status = bnx2x_ack_int(bp);
34f80b04 1682 u16 mask;
a2fbb9ea 1683
34f80b04 1684 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1685 if (unlikely(status == 0)) {
1686 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1687 return IRQ_NONE;
1688 }
34f80b04 1689 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1690
34f80b04 1691 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1692 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1693 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1694 return IRQ_HANDLED;
1695 }
1696
3196a88a
EG
1697#ifdef BNX2X_STOP_ON_ERROR
1698 if (unlikely(bp->panic))
1699 return IRQ_HANDLED;
1700#endif
1701
34f80b04
EG
1702 mask = 0x2 << bp->fp[0].sb_id;
1703 if (status & mask) {
a2fbb9ea
ET
1704 struct bnx2x_fastpath *fp = &bp->fp[0];
1705
1706 prefetch(fp->rx_cons_sb);
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709 prefetch(&fp->status_blk->u_status_block.status_block_index);
1710
288379f0 1711 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1712
34f80b04 1713 status &= ~mask;
a2fbb9ea
ET
1714 }
1715
a2fbb9ea 1716
34f80b04 1717 if (unlikely(status & 0x1)) {
1cf167f2 1718 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1719
1720 status &= ~0x1;
1721 if (!status)
1722 return IRQ_HANDLED;
1723 }
1724
34f80b04
EG
1725 if (status)
1726 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1727 status);
a2fbb9ea 1728
c18487ee 1729 return IRQ_HANDLED;
a2fbb9ea
ET
1730}
1731
c18487ee 1732/* end of fast path */
a2fbb9ea 1733
bb2a0f7a 1734static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1735
c18487ee
YR
1736/* Link */
1737
1738/*
1739 * General service functions
1740 */
a2fbb9ea 1741
4a37fb66 1742static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1743{
1744 u32 lock_status;
1745 u32 resource_bit = (1 << resource);
4a37fb66
YG
1746 int func = BP_FUNC(bp);
1747 u32 hw_lock_control_reg;
c18487ee 1748 int cnt;
a2fbb9ea 1749
c18487ee
YR
1750 /* Validating that the resource is within range */
1751 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1752 DP(NETIF_MSG_HW,
1753 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1754 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1755 return -EINVAL;
1756 }
a2fbb9ea 1757
4a37fb66
YG
1758 if (func <= 5) {
1759 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1760 } else {
1761 hw_lock_control_reg =
1762 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1763 }
1764
c18487ee 1765 /* Validating that the resource is not already taken */
4a37fb66 1766 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1767 if (lock_status & resource_bit) {
1768 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1769 lock_status, resource_bit);
1770 return -EEXIST;
1771 }
a2fbb9ea 1772
46230476
EG
1773 /* Try for 5 second every 5ms */
1774 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1775 /* Try to acquire the lock */
4a37fb66
YG
1776 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1777 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1778 if (lock_status & resource_bit)
1779 return 0;
a2fbb9ea 1780
c18487ee 1781 msleep(5);
a2fbb9ea 1782 }
c18487ee
YR
1783 DP(NETIF_MSG_HW, "Timeout\n");
1784 return -EAGAIN;
1785}
a2fbb9ea 1786
4a37fb66 1787static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1788{
1789 u32 lock_status;
1790 u32 resource_bit = (1 << resource);
4a37fb66
YG
1791 int func = BP_FUNC(bp);
1792 u32 hw_lock_control_reg;
a2fbb9ea 1793
c18487ee
YR
1794 /* Validating that the resource is within range */
1795 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1796 DP(NETIF_MSG_HW,
1797 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1798 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1799 return -EINVAL;
1800 }
1801
4a37fb66
YG
1802 if (func <= 5) {
1803 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1804 } else {
1805 hw_lock_control_reg =
1806 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1807 }
1808
c18487ee 1809 /* Validating that the resource is currently taken */
4a37fb66 1810 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1811 if (!(lock_status & resource_bit)) {
1812 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1813 lock_status, resource_bit);
1814 return -EFAULT;
a2fbb9ea
ET
1815 }
1816
4a37fb66 1817 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1818 return 0;
1819}
1820
1821/* HW Lock for shared dual port PHYs */
4a37fb66 1822static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1823{
34f80b04 1824 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1825
46c6a674
EG
1826 if (bp->port.need_hw_lock)
1827 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1828}
a2fbb9ea 1829
4a37fb66 1830static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1831{
46c6a674
EG
1832 if (bp->port.need_hw_lock)
1833 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1834
34f80b04 1835 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1836}
a2fbb9ea 1837
4acac6a5
EG
1838int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1839{
1840 /* The GPIO should be swapped if swap register is set and active */
1841 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1842 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1843 int gpio_shift = gpio_num +
1844 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1845 u32 gpio_mask = (1 << gpio_shift);
1846 u32 gpio_reg;
1847 int value;
1848
1849 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1850 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1851 return -EINVAL;
1852 }
1853
1854 /* read GPIO value */
1855 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1856
1857 /* get the requested pin value */
1858 if ((gpio_reg & gpio_mask) == gpio_mask)
1859 value = 1;
1860 else
1861 value = 0;
1862
1863 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1864
1865 return value;
1866}
1867
17de50b7 1868int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1869{
1870 /* The GPIO should be swapped if swap register is set and active */
1871 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1872 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1873 int gpio_shift = gpio_num +
1874 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1875 u32 gpio_mask = (1 << gpio_shift);
1876 u32 gpio_reg;
a2fbb9ea 1877
c18487ee
YR
1878 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1879 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1880 return -EINVAL;
1881 }
a2fbb9ea 1882
4a37fb66 1883 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1884 /* read GPIO and mask except the float bits */
1885 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1886
c18487ee
YR
1887 switch (mode) {
1888 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1889 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1890 gpio_num, gpio_shift);
1891 /* clear FLOAT and set CLR */
1892 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1893 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1894 break;
a2fbb9ea 1895
c18487ee
YR
1896 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1897 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1898 gpio_num, gpio_shift);
1899 /* clear FLOAT and set SET */
1900 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1901 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1902 break;
a2fbb9ea 1903
17de50b7 1904 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1905 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1906 gpio_num, gpio_shift);
1907 /* set FLOAT */
1908 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1909 break;
a2fbb9ea 1910
c18487ee
YR
1911 default:
1912 break;
a2fbb9ea
ET
1913 }
1914
c18487ee 1915 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1916 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1917
c18487ee 1918 return 0;
a2fbb9ea
ET
1919}
1920
4acac6a5
EG
1921int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1922{
1923 /* The GPIO should be swapped if swap register is set and active */
1924 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1925 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1926 int gpio_shift = gpio_num +
1927 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1928 u32 gpio_mask = (1 << gpio_shift);
1929 u32 gpio_reg;
1930
1931 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1932 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1933 return -EINVAL;
1934 }
1935
1936 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1937 /* read GPIO int */
1938 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1939
1940 switch (mode) {
1941 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1942 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1943 "output low\n", gpio_num, gpio_shift);
1944 /* clear SET and set CLR */
1945 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1946 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1947 break;
1948
1949 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1950 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1951 "output high\n", gpio_num, gpio_shift);
1952 /* clear CLR and set SET */
1953 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1954 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1955 break;
1956
1957 default:
1958 break;
1959 }
1960
1961 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1962 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1963
1964 return 0;
1965}
1966
c18487ee 1967static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1968{
c18487ee
YR
1969 u32 spio_mask = (1 << spio_num);
1970 u32 spio_reg;
a2fbb9ea 1971
c18487ee
YR
1972 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1973 (spio_num > MISC_REGISTERS_SPIO_7)) {
1974 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1975 return -EINVAL;
a2fbb9ea
ET
1976 }
1977
4a37fb66 1978 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1979 /* read SPIO and mask except the float bits */
1980 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1981
c18487ee 1982 switch (mode) {
6378c025 1983 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1984 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1985 /* clear FLOAT and set CLR */
1986 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1987 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1988 break;
a2fbb9ea 1989
6378c025 1990 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1991 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1992 /* clear FLOAT and set SET */
1993 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1994 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1995 break;
a2fbb9ea 1996
c18487ee
YR
1997 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1998 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1999 /* set FLOAT */
2000 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2001 break;
a2fbb9ea 2002
c18487ee
YR
2003 default:
2004 break;
a2fbb9ea
ET
2005 }
2006
c18487ee 2007 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2008 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2009
a2fbb9ea
ET
2010 return 0;
2011}
2012
c18487ee 2013static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2014{
ad33ea3a
EG
2015 switch (bp->link_vars.ieee_fc &
2016 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2017 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2018 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2019 ADVERTISED_Pause);
2020 break;
2021 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2022 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2023 ADVERTISED_Pause);
2024 break;
2025 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2026 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
2027 break;
2028 default:
34f80b04 2029 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2030 ADVERTISED_Pause);
2031 break;
2032 }
2033}
f1410647 2034
c18487ee
YR
2035static void bnx2x_link_report(struct bnx2x *bp)
2036{
2037 if (bp->link_vars.link_up) {
2038 if (bp->state == BNX2X_STATE_OPEN)
2039 netif_carrier_on(bp->dev);
2040 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2041
c18487ee 2042 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2043
c18487ee
YR
2044 if (bp->link_vars.duplex == DUPLEX_FULL)
2045 printk("full duplex");
2046 else
2047 printk("half duplex");
f1410647 2048
c0700f90
DM
2049 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2050 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2051 printk(", receive ");
c0700f90 2052 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2053 printk("& transmit ");
2054 } else {
2055 printk(", transmit ");
2056 }
2057 printk("flow control ON");
2058 }
2059 printk("\n");
f1410647 2060
c18487ee
YR
2061 } else { /* link_down */
2062 netif_carrier_off(bp->dev);
2063 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2064 }
c18487ee
YR
2065}
2066
b5bf9068 2067static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2068{
19680c48
EG
2069 if (!BP_NOMCP(bp)) {
2070 u8 rc;
a2fbb9ea 2071
19680c48 2072 /* Initialize link parameters structure variables */
8c99e7b0
YR
2073 /* It is recommended to turn off RX FC for jumbo frames
2074 for better performance */
2075 if (IS_E1HMF(bp))
c0700f90 2076 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2077 else if (bp->dev->mtu > 5000)
c0700f90 2078 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2079 else
c0700f90 2080 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2081
4a37fb66 2082 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2083
2084 if (load_mode == LOAD_DIAG)
2085 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2086
19680c48 2087 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2088
4a37fb66 2089 bnx2x_release_phy_lock(bp);
a2fbb9ea 2090
3c96c68b
EG
2091 bnx2x_calc_fc_adv(bp);
2092
b5bf9068
EG
2093 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2094 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2095 bnx2x_link_report(bp);
b5bf9068 2096 }
34f80b04 2097
19680c48
EG
2098 return rc;
2099 }
2100 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2101 return -EINVAL;
a2fbb9ea
ET
2102}
2103
c18487ee 2104static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2105{
19680c48 2106 if (!BP_NOMCP(bp)) {
4a37fb66 2107 bnx2x_acquire_phy_lock(bp);
19680c48 2108 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2109 bnx2x_release_phy_lock(bp);
a2fbb9ea 2110
19680c48
EG
2111 bnx2x_calc_fc_adv(bp);
2112 } else
2113 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2114}
a2fbb9ea 2115
c18487ee
YR
2116static void bnx2x__link_reset(struct bnx2x *bp)
2117{
19680c48 2118 if (!BP_NOMCP(bp)) {
4a37fb66 2119 bnx2x_acquire_phy_lock(bp);
589abe3a 2120 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2121 bnx2x_release_phy_lock(bp);
19680c48
EG
2122 } else
2123 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2124}
a2fbb9ea 2125
c18487ee
YR
2126static u8 bnx2x_link_test(struct bnx2x *bp)
2127{
2128 u8 rc;
a2fbb9ea 2129
4a37fb66 2130 bnx2x_acquire_phy_lock(bp);
c18487ee 2131 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2132 bnx2x_release_phy_lock(bp);
a2fbb9ea 2133
c18487ee
YR
2134 return rc;
2135}
a2fbb9ea 2136
8a1c38d1 2137static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2138{
8a1c38d1
EG
2139 u32 r_param = bp->link_vars.line_speed / 8;
2140 u32 fair_periodic_timeout_usec;
2141 u32 t_fair;
34f80b04 2142
8a1c38d1
EG
2143 memset(&(bp->cmng.rs_vars), 0,
2144 sizeof(struct rate_shaping_vars_per_port));
2145 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2146
8a1c38d1
EG
2147 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2148 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2149
8a1c38d1
EG
2150 /* this is the threshold below which no timer arming will occur
2151 1.25 coefficient is for the threshold to be a little bigger
2152 than the real time, to compensate for timer in-accuracy */
2153 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2154 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2155
8a1c38d1
EG
2156 /* resolution of fairness timer */
2157 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2158 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2159 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2160
8a1c38d1
EG
2161 /* this is the threshold below which we won't arm the timer anymore */
2162 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2163
8a1c38d1
EG
2164 /* we multiply by 1e3/8 to get bytes/msec.
2165 We don't want the credits to pass a credit
2166 of the t_fair*FAIR_MEM (algorithm resolution) */
2167 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2168 /* since each tick is 4 usec */
2169 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2170}
2171
8a1c38d1 2172static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2173{
2174 struct rate_shaping_vars_per_vn m_rs_vn;
2175 struct fairness_vars_per_vn m_fair_vn;
2176 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2177 u16 vn_min_rate, vn_max_rate;
2178 int i;
2179
2180 /* If function is hidden - set min and max to zeroes */
2181 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2182 vn_min_rate = 0;
2183 vn_max_rate = 0;
2184
2185 } else {
2186 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2187 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2188 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2189 if current min rate is zero - set it to 1.
33471629 2190 This is a requirement of the algorithm. */
8a1c38d1 2191 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2192 vn_min_rate = DEF_MIN_RATE;
2193 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2194 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2195 }
2196
8a1c38d1
EG
2197 DP(NETIF_MSG_IFUP,
2198 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2199 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2200
2201 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2202 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2203
2204 /* global vn counter - maximal Mbps for this vn */
2205 m_rs_vn.vn_counter.rate = vn_max_rate;
2206
2207 /* quota - number of bytes transmitted in this period */
2208 m_rs_vn.vn_counter.quota =
2209 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2210
8a1c38d1 2211 if (bp->vn_weight_sum) {
34f80b04
EG
2212 /* credit for each period of the fairness algorithm:
2213 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2214 vn_weight_sum should not be larger than 10000, thus
2215 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2216 than zero */
34f80b04 2217 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2218 max((u32)(vn_min_rate * (T_FAIR_COEF /
2219 (8 * bp->vn_weight_sum))),
2220 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2221 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2222 m_fair_vn.vn_credit_delta);
2223 }
2224
34f80b04
EG
2225 /* Store it to internal memory */
2226 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2227 REG_WR(bp, BAR_XSTRORM_INTMEM +
2228 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2229 ((u32 *)(&m_rs_vn))[i]);
2230
2231 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2232 REG_WR(bp, BAR_XSTRORM_INTMEM +
2233 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2234 ((u32 *)(&m_fair_vn))[i]);
2235}
2236
8a1c38d1 2237
c18487ee
YR
2238/* This function is called upon link interrupt */
2239static void bnx2x_link_attn(struct bnx2x *bp)
2240{
bb2a0f7a
YG
2241 /* Make sure that we are synced with the current statistics */
2242 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2243
c18487ee 2244 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2245
bb2a0f7a
YG
2246 if (bp->link_vars.link_up) {
2247
1c06328c
EG
2248 /* dropless flow control */
2249 if (CHIP_IS_E1H(bp)) {
2250 int port = BP_PORT(bp);
2251 u32 pause_enabled = 0;
2252
2253 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2254 pause_enabled = 1;
2255
2256 REG_WR(bp, BAR_USTRORM_INTMEM +
2257 USTORM_PAUSE_ENABLED_OFFSET(port),
2258 pause_enabled);
2259 }
2260
bb2a0f7a
YG
2261 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2262 struct host_port_stats *pstats;
2263
2264 pstats = bnx2x_sp(bp, port_stats);
2265 /* reset old bmac stats */
2266 memset(&(pstats->mac_stx[0]), 0,
2267 sizeof(struct mac_stx));
2268 }
2269 if ((bp->state == BNX2X_STATE_OPEN) ||
2270 (bp->state == BNX2X_STATE_DISABLED))
2271 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2272 }
2273
c18487ee
YR
2274 /* indicate link status */
2275 bnx2x_link_report(bp);
34f80b04
EG
2276
2277 if (IS_E1HMF(bp)) {
8a1c38d1 2278 int port = BP_PORT(bp);
34f80b04 2279 int func;
8a1c38d1 2280 int vn;
34f80b04
EG
2281
2282 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2283 if (vn == BP_E1HVN(bp))
2284 continue;
2285
8a1c38d1 2286 func = ((vn << 1) | port);
34f80b04
EG
2287
2288 /* Set the attention towards other drivers
2289 on the same port */
2290 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2291 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2292 }
34f80b04 2293
8a1c38d1
EG
2294 if (bp->link_vars.link_up) {
2295 int i;
2296
2297 /* Init rate shaping and fairness contexts */
2298 bnx2x_init_port_minmax(bp);
34f80b04 2299
34f80b04 2300 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2301 bnx2x_init_vn_minmax(bp, 2*vn + port);
2302
2303 /* Store it to internal memory */
2304 for (i = 0;
2305 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2306 REG_WR(bp, BAR_XSTRORM_INTMEM +
2307 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2308 ((u32 *)(&bp->cmng))[i]);
2309 }
34f80b04 2310 }
c18487ee 2311}
a2fbb9ea 2312
c18487ee
YR
2313static void bnx2x__link_status_update(struct bnx2x *bp)
2314{
2315 if (bp->state != BNX2X_STATE_OPEN)
2316 return;
a2fbb9ea 2317
c18487ee 2318 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2319
bb2a0f7a
YG
2320 if (bp->link_vars.link_up)
2321 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2322 else
2323 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2324
c18487ee
YR
2325 /* indicate link status */
2326 bnx2x_link_report(bp);
a2fbb9ea 2327}
a2fbb9ea 2328
34f80b04
EG
2329static void bnx2x_pmf_update(struct bnx2x *bp)
2330{
2331 int port = BP_PORT(bp);
2332 u32 val;
2333
2334 bp->port.pmf = 1;
2335 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2336
2337 /* enable nig attention */
2338 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2339 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2340 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2341
2342 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2343}
2344
c18487ee 2345/* end of Link */
a2fbb9ea
ET
2346
2347/* slow path */
2348
2349/*
2350 * General service functions
2351 */
2352
2353/* the slow path queue is odd since completions arrive on the fastpath ring */
2354static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2355 u32 data_hi, u32 data_lo, int common)
2356{
34f80b04 2357 int func = BP_FUNC(bp);
a2fbb9ea 2358
34f80b04
EG
2359 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2360 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2361 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2362 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2363 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2364
2365#ifdef BNX2X_STOP_ON_ERROR
2366 if (unlikely(bp->panic))
2367 return -EIO;
2368#endif
2369
34f80b04 2370 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2371
2372 if (!bp->spq_left) {
2373 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2374 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2375 bnx2x_panic();
2376 return -EBUSY;
2377 }
f1410647 2378
a2fbb9ea
ET
2379 /* CID needs port number to be encoded int it */
2380 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2381 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2382 HW_CID(bp, cid)));
2383 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2384 if (common)
2385 bp->spq_prod_bd->hdr.type |=
2386 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2387
2388 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2389 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2390
2391 bp->spq_left--;
2392
2393 if (bp->spq_prod_bd == bp->spq_last_bd) {
2394 bp->spq_prod_bd = bp->spq;
2395 bp->spq_prod_idx = 0;
2396 DP(NETIF_MSG_TIMER, "end of spq\n");
2397
2398 } else {
2399 bp->spq_prod_bd++;
2400 bp->spq_prod_idx++;
2401 }
2402
34f80b04 2403 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2404 bp->spq_prod_idx);
2405
34f80b04 2406 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2407 return 0;
2408}
2409
2410/* acquire split MCP access lock register */
4a37fb66 2411static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2412{
a2fbb9ea 2413 u32 i, j, val;
34f80b04 2414 int rc = 0;
a2fbb9ea
ET
2415
2416 might_sleep();
2417 i = 100;
2418 for (j = 0; j < i*10; j++) {
2419 val = (1UL << 31);
2420 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2421 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2422 if (val & (1L << 31))
2423 break;
2424
2425 msleep(5);
2426 }
a2fbb9ea 2427 if (!(val & (1L << 31))) {
19680c48 2428 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2429 rc = -EBUSY;
2430 }
2431
2432 return rc;
2433}
2434
4a37fb66
YG
2435/* release split MCP access lock register */
2436static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2437{
2438 u32 val = 0;
2439
2440 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2441}
2442
2443static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2444{
2445 struct host_def_status_block *def_sb = bp->def_status_blk;
2446 u16 rc = 0;
2447
2448 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2449 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2450 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2451 rc |= 1;
2452 }
2453 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2454 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2455 rc |= 2;
2456 }
2457 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2458 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2459 rc |= 4;
2460 }
2461 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2462 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2463 rc |= 8;
2464 }
2465 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2466 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2467 rc |= 16;
2468 }
2469 return rc;
2470}
2471
2472/*
2473 * slow path service functions
2474 */
2475
2476static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2477{
34f80b04 2478 int port = BP_PORT(bp);
5c862848
EG
2479 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2480 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2481 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2482 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2483 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2484 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2485 u32 aeu_mask;
87942b46 2486 u32 nig_mask = 0;
a2fbb9ea 2487
a2fbb9ea
ET
2488 if (bp->attn_state & asserted)
2489 BNX2X_ERR("IGU ERROR\n");
2490
3fcaf2e5
EG
2491 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2492 aeu_mask = REG_RD(bp, aeu_addr);
2493
a2fbb9ea 2494 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2495 aeu_mask, asserted);
2496 aeu_mask &= ~(asserted & 0xff);
2497 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2498
3fcaf2e5
EG
2499 REG_WR(bp, aeu_addr, aeu_mask);
2500 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2501
3fcaf2e5 2502 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2503 bp->attn_state |= asserted;
3fcaf2e5 2504 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2505
2506 if (asserted & ATTN_HARD_WIRED_MASK) {
2507 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2508
a5e9a7cf
EG
2509 bnx2x_acquire_phy_lock(bp);
2510
877e9aa4 2511 /* save nig interrupt mask */
87942b46 2512 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2513 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2514
c18487ee 2515 bnx2x_link_attn(bp);
a2fbb9ea
ET
2516
2517 /* handle unicore attn? */
2518 }
2519 if (asserted & ATTN_SW_TIMER_4_FUNC)
2520 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2521
2522 if (asserted & GPIO_2_FUNC)
2523 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2524
2525 if (asserted & GPIO_3_FUNC)
2526 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2527
2528 if (asserted & GPIO_4_FUNC)
2529 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2530
2531 if (port == 0) {
2532 if (asserted & ATTN_GENERAL_ATTN_1) {
2533 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2534 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2535 }
2536 if (asserted & ATTN_GENERAL_ATTN_2) {
2537 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2538 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2539 }
2540 if (asserted & ATTN_GENERAL_ATTN_3) {
2541 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2542 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2543 }
2544 } else {
2545 if (asserted & ATTN_GENERAL_ATTN_4) {
2546 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2547 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2548 }
2549 if (asserted & ATTN_GENERAL_ATTN_5) {
2550 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2551 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2552 }
2553 if (asserted & ATTN_GENERAL_ATTN_6) {
2554 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2555 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2556 }
2557 }
2558
2559 } /* if hardwired */
2560
5c862848
EG
2561 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2562 asserted, hc_addr);
2563 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2564
2565 /* now set back the mask */
a5e9a7cf 2566 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2567 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2568 bnx2x_release_phy_lock(bp);
2569 }
a2fbb9ea
ET
2570}
2571
877e9aa4 2572static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2573{
34f80b04 2574 int port = BP_PORT(bp);
877e9aa4
ET
2575 int reg_offset;
2576 u32 val;
2577
34f80b04
EG
2578 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2579 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2580
34f80b04 2581 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2582
2583 val = REG_RD(bp, reg_offset);
2584 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2585 REG_WR(bp, reg_offset, val);
2586
2587 BNX2X_ERR("SPIO5 hw attention\n");
2588
35b19ba5
EG
2589 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2590 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2591 /* Fan failure attention */
2592
17de50b7 2593 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2594 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2595 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2596 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2597 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2598 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2599 /* mark the failure */
c18487ee 2600 bp->link_params.ext_phy_config &=
877e9aa4 2601 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2602 bp->link_params.ext_phy_config |=
877e9aa4
ET
2603 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2604 SHMEM_WR(bp,
2605 dev_info.port_hw_config[port].
2606 external_phy_config,
c18487ee 2607 bp->link_params.ext_phy_config);
877e9aa4
ET
2608 /* log the failure */
2609 printk(KERN_ERR PFX "Fan Failure on Network"
2610 " Controller %s has caused the driver to"
2611 " shutdown the card to prevent permanent"
2612 " damage. Please contact Dell Support for"
2613 " assistance\n", bp->dev->name);
2614 break;
2615
2616 default:
2617 break;
2618 }
2619 }
34f80b04 2620
589abe3a
EG
2621 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2622 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2623 bnx2x_acquire_phy_lock(bp);
2624 bnx2x_handle_module_detect_int(&bp->link_params);
2625 bnx2x_release_phy_lock(bp);
2626 }
2627
34f80b04
EG
2628 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2629
2630 val = REG_RD(bp, reg_offset);
2631 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2632 REG_WR(bp, reg_offset, val);
2633
2634 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2635 (attn & HW_INTERRUT_ASSERT_SET_0));
2636 bnx2x_panic();
2637 }
877e9aa4
ET
2638}
2639
2640static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2641{
2642 u32 val;
2643
0626b899 2644 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2645
2646 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2647 BNX2X_ERR("DB hw attention 0x%x\n", val);
2648 /* DORQ discard attention */
2649 if (val & 0x2)
2650 BNX2X_ERR("FATAL error from DORQ\n");
2651 }
34f80b04
EG
2652
2653 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2654
2655 int port = BP_PORT(bp);
2656 int reg_offset;
2657
2658 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2659 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2660
2661 val = REG_RD(bp, reg_offset);
2662 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2663 REG_WR(bp, reg_offset, val);
2664
2665 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2666 (attn & HW_INTERRUT_ASSERT_SET_1));
2667 bnx2x_panic();
2668 }
877e9aa4
ET
2669}
2670
2671static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2672{
2673 u32 val;
2674
2675 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2676
2677 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2678 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2679 /* CFC error attention */
2680 if (val & 0x2)
2681 BNX2X_ERR("FATAL error from CFC\n");
2682 }
2683
2684 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2685
2686 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2687 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2688 /* RQ_USDMDP_FIFO_OVERFLOW */
2689 if (val & 0x18000)
2690 BNX2X_ERR("FATAL error from PXP\n");
2691 }
34f80b04
EG
2692
2693 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2694
2695 int port = BP_PORT(bp);
2696 int reg_offset;
2697
2698 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2699 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2700
2701 val = REG_RD(bp, reg_offset);
2702 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2703 REG_WR(bp, reg_offset, val);
2704
2705 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2706 (attn & HW_INTERRUT_ASSERT_SET_2));
2707 bnx2x_panic();
2708 }
877e9aa4
ET
2709}
2710
2711static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2712{
34f80b04
EG
2713 u32 val;
2714
877e9aa4
ET
2715 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2716
34f80b04
EG
2717 if (attn & BNX2X_PMF_LINK_ASSERT) {
2718 int func = BP_FUNC(bp);
2719
2720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2721 bnx2x__link_status_update(bp);
2722 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2723 DRV_STATUS_PMF)
2724 bnx2x_pmf_update(bp);
2725
2726 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2727
2728 BNX2X_ERR("MC assert!\n");
2729 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2731 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2732 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2733 bnx2x_panic();
2734
2735 } else if (attn & BNX2X_MCP_ASSERT) {
2736
2737 BNX2X_ERR("MCP assert!\n");
2738 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2739 bnx2x_fw_dump(bp);
877e9aa4
ET
2740
2741 } else
2742 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2743 }
2744
2745 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2746 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2747 if (attn & BNX2X_GRC_TIMEOUT) {
2748 val = CHIP_IS_E1H(bp) ?
2749 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2750 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2751 }
2752 if (attn & BNX2X_GRC_RSV) {
2753 val = CHIP_IS_E1H(bp) ?
2754 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2755 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2756 }
877e9aa4 2757 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2758 }
2759}
2760
2761static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2762{
a2fbb9ea
ET
2763 struct attn_route attn;
2764 struct attn_route group_mask;
34f80b04 2765 int port = BP_PORT(bp);
877e9aa4 2766 int index;
a2fbb9ea
ET
2767 u32 reg_addr;
2768 u32 val;
3fcaf2e5 2769 u32 aeu_mask;
a2fbb9ea
ET
2770
2771 /* need to take HW lock because MCP or other port might also
2772 try to handle this event */
4a37fb66 2773 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2774
2775 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2776 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2777 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2778 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2779 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2780 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2781
2782 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2783 if (deasserted & (1 << index)) {
2784 group_mask = bp->attn_group[index];
2785
34f80b04
EG
2786 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2787 index, group_mask.sig[0], group_mask.sig[1],
2788 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2789
877e9aa4
ET
2790 bnx2x_attn_int_deasserted3(bp,
2791 attn.sig[3] & group_mask.sig[3]);
2792 bnx2x_attn_int_deasserted1(bp,
2793 attn.sig[1] & group_mask.sig[1]);
2794 bnx2x_attn_int_deasserted2(bp,
2795 attn.sig[2] & group_mask.sig[2]);
2796 bnx2x_attn_int_deasserted0(bp,
2797 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2798
a2fbb9ea
ET
2799 if ((attn.sig[0] & group_mask.sig[0] &
2800 HW_PRTY_ASSERT_SET_0) ||
2801 (attn.sig[1] & group_mask.sig[1] &
2802 HW_PRTY_ASSERT_SET_1) ||
2803 (attn.sig[2] & group_mask.sig[2] &
2804 HW_PRTY_ASSERT_SET_2))
6378c025 2805 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2806 }
2807 }
2808
4a37fb66 2809 bnx2x_release_alr(bp);
a2fbb9ea 2810
5c862848 2811 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2812
2813 val = ~deasserted;
3fcaf2e5
EG
2814 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2815 val, reg_addr);
5c862848 2816 REG_WR(bp, reg_addr, val);
a2fbb9ea 2817
a2fbb9ea 2818 if (~bp->attn_state & deasserted)
3fcaf2e5 2819 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2820
2821 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2822 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2823
3fcaf2e5
EG
2824 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2825 aeu_mask = REG_RD(bp, reg_addr);
2826
2827 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2828 aeu_mask, deasserted);
2829 aeu_mask |= (deasserted & 0xff);
2830 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2831
3fcaf2e5
EG
2832 REG_WR(bp, reg_addr, aeu_mask);
2833 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2834
2835 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2836 bp->attn_state &= ~deasserted;
2837 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2838}
2839
2840static void bnx2x_attn_int(struct bnx2x *bp)
2841{
2842 /* read local copy of bits */
68d59484
EG
2843 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2844 attn_bits);
2845 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2846 attn_bits_ack);
a2fbb9ea
ET
2847 u32 attn_state = bp->attn_state;
2848
2849 /* look for changed bits */
2850 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2851 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2852
2853 DP(NETIF_MSG_HW,
2854 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2855 attn_bits, attn_ack, asserted, deasserted);
2856
2857 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2858 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2859
2860 /* handle bits that were raised */
2861 if (asserted)
2862 bnx2x_attn_int_asserted(bp, asserted);
2863
2864 if (deasserted)
2865 bnx2x_attn_int_deasserted(bp, deasserted);
2866}
2867
2868static void bnx2x_sp_task(struct work_struct *work)
2869{
1cf167f2 2870 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2871 u16 status;
2872
34f80b04 2873
a2fbb9ea
ET
2874 /* Return here if interrupt is disabled */
2875 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2876 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2877 return;
2878 }
2879
2880 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2881/* if (status == 0) */
2882/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2883
3196a88a 2884 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2885
877e9aa4
ET
2886 /* HW attentions */
2887 if (status & 0x1)
a2fbb9ea 2888 bnx2x_attn_int(bp);
a2fbb9ea 2889
68d59484 2890 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2891 IGU_INT_NOP, 1);
2892 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2893 IGU_INT_NOP, 1);
2894 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2895 IGU_INT_NOP, 1);
2896 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2897 IGU_INT_NOP, 1);
2898 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2899 IGU_INT_ENABLE, 1);
877e9aa4 2900
a2fbb9ea
ET
2901}
2902
2903static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2904{
2905 struct net_device *dev = dev_instance;
2906 struct bnx2x *bp = netdev_priv(dev);
2907
2908 /* Return here if interrupt is disabled */
2909 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2910 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2911 return IRQ_HANDLED;
2912 }
2913
8d9c5f34 2914 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2915
2916#ifdef BNX2X_STOP_ON_ERROR
2917 if (unlikely(bp->panic))
2918 return IRQ_HANDLED;
2919#endif
2920
1cf167f2 2921 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2922
2923 return IRQ_HANDLED;
2924}
2925
2926/* end of slow path */
2927
2928/* Statistics */
2929
2930/****************************************************************************
2931* Macros
2932****************************************************************************/
2933
a2fbb9ea
ET
2934/* sum[hi:lo] += add[hi:lo] */
2935#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2936 do { \
2937 s_lo += a_lo; \
f5ba6772 2938 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2939 } while (0)
2940
2941/* difference = minuend - subtrahend */
2942#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2943 do { \
bb2a0f7a
YG
2944 if (m_lo < s_lo) { \
2945 /* underflow */ \
a2fbb9ea 2946 d_hi = m_hi - s_hi; \
bb2a0f7a 2947 if (d_hi > 0) { \
6378c025 2948 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2949 d_hi--; \
2950 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2951 } else { \
6378c025 2952 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2953 d_hi = 0; \
2954 d_lo = 0; \
2955 } \
bb2a0f7a
YG
2956 } else { \
2957 /* m_lo >= s_lo */ \
a2fbb9ea 2958 if (m_hi < s_hi) { \
bb2a0f7a
YG
2959 d_hi = 0; \
2960 d_lo = 0; \
2961 } else { \
6378c025 2962 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2963 d_hi = m_hi - s_hi; \
2964 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2965 } \
2966 } \
2967 } while (0)
2968
bb2a0f7a 2969#define UPDATE_STAT64(s, t) \
a2fbb9ea 2970 do { \
bb2a0f7a
YG
2971 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2972 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2973 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2974 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2975 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2976 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2977 } while (0)
2978
bb2a0f7a 2979#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2980 do { \
bb2a0f7a
YG
2981 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2982 diff.lo, new->s##_lo, old->s##_lo); \
2983 ADD_64(estats->t##_hi, diff.hi, \
2984 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2985 } while (0)
2986
2987/* sum[hi:lo] += add */
2988#define ADD_EXTEND_64(s_hi, s_lo, a) \
2989 do { \
2990 s_lo += a; \
2991 s_hi += (s_lo < a) ? 1 : 0; \
2992 } while (0)
2993
bb2a0f7a 2994#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2995 do { \
bb2a0f7a
YG
2996 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2997 pstats->mac_stx[1].s##_lo, \
2998 new->s); \
a2fbb9ea
ET
2999 } while (0)
3000
bb2a0f7a 3001#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3002 do { \
4781bfad
EG
3003 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3004 old_tclient->s = tclient->s; \
de832a55
EG
3005 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3006 } while (0)
3007
3008#define UPDATE_EXTEND_USTAT(s, t) \
3009 do { \
3010 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3011 old_uclient->s = uclient->s; \
3012 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3013 } while (0)
3014
3015#define UPDATE_EXTEND_XSTAT(s, t) \
3016 do { \
4781bfad
EG
3017 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3018 old_xclient->s = xclient->s; \
de832a55
EG
3019 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3020 } while (0)
3021
3022/* minuend -= subtrahend */
3023#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3024 do { \
3025 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3026 } while (0)
3027
3028/* minuend[hi:lo] -= subtrahend */
3029#define SUB_EXTEND_64(m_hi, m_lo, s) \
3030 do { \
3031 SUB_64(m_hi, 0, m_lo, s); \
3032 } while (0)
3033
3034#define SUB_EXTEND_USTAT(s, t) \
3035 do { \
3036 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3037 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3038 } while (0)
3039
3040/*
3041 * General service functions
3042 */
3043
3044static inline long bnx2x_hilo(u32 *hiref)
3045{
3046 u32 lo = *(hiref + 1);
3047#if (BITS_PER_LONG == 64)
3048 u32 hi = *hiref;
3049
3050 return HILO_U64(hi, lo);
3051#else
3052 return lo;
3053#endif
3054}
3055
3056/*
3057 * Init service functions
3058 */
3059
bb2a0f7a
YG
3060static void bnx2x_storm_stats_post(struct bnx2x *bp)
3061{
3062 if (!bp->stats_pending) {
3063 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3064 int i, rc;
bb2a0f7a
YG
3065
3066 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3067 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3068 for_each_queue(bp, i)
3069 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3070
3071 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3072 ((u32 *)&ramrod_data)[1],
3073 ((u32 *)&ramrod_data)[0], 0);
3074 if (rc == 0) {
3075 /* stats ramrod has it's own slot on the spq */
3076 bp->spq_left++;
3077 bp->stats_pending = 1;
3078 }
3079 }
3080}
3081
3082static void bnx2x_stats_init(struct bnx2x *bp)
3083{
3084 int port = BP_PORT(bp);
de832a55 3085 int i;
bb2a0f7a 3086
de832a55 3087 bp->stats_pending = 0;
bb2a0f7a
YG
3088 bp->executer_idx = 0;
3089 bp->stats_counter = 0;
3090
3091 /* port stats */
3092 if (!BP_NOMCP(bp))
3093 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3094 else
3095 bp->port.port_stx = 0;
3096 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3097
3098 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3099 bp->port.old_nig_stats.brb_discard =
3100 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3101 bp->port.old_nig_stats.brb_truncate =
3102 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3103 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3104 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3105 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3106 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3107
3108 /* function stats */
de832a55
EG
3109 for_each_queue(bp, i) {
3110 struct bnx2x_fastpath *fp = &bp->fp[i];
3111
3112 memset(&fp->old_tclient, 0,
3113 sizeof(struct tstorm_per_client_stats));
3114 memset(&fp->old_uclient, 0,
3115 sizeof(struct ustorm_per_client_stats));
3116 memset(&fp->old_xclient, 0,
3117 sizeof(struct xstorm_per_client_stats));
3118 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3119 }
3120
bb2a0f7a 3121 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3122 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3123
3124 bp->stats_state = STATS_STATE_DISABLED;
3125 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3126 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3127}
3128
3129static void bnx2x_hw_stats_post(struct bnx2x *bp)
3130{
3131 struct dmae_command *dmae = &bp->stats_dmae;
3132 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3133
3134 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3135 if (CHIP_REV_IS_SLOW(bp))
3136 return;
bb2a0f7a
YG
3137
3138 /* loader */
3139 if (bp->executer_idx) {
3140 int loader_idx = PMF_DMAE_C(bp);
3141
3142 memset(dmae, 0, sizeof(struct dmae_command));
3143
3144 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3145 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3146 DMAE_CMD_DST_RESET |
3147#ifdef __BIG_ENDIAN
3148 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3149#else
3150 DMAE_CMD_ENDIANITY_DW_SWAP |
3151#endif
3152 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3153 DMAE_CMD_PORT_0) |
3154 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3155 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3156 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3157 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3158 sizeof(struct dmae_command) *
3159 (loader_idx + 1)) >> 2;
3160 dmae->dst_addr_hi = 0;
3161 dmae->len = sizeof(struct dmae_command) >> 2;
3162 if (CHIP_IS_E1(bp))
3163 dmae->len--;
3164 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3165 dmae->comp_addr_hi = 0;
3166 dmae->comp_val = 1;
3167
3168 *stats_comp = 0;
3169 bnx2x_post_dmae(bp, dmae, loader_idx);
3170
3171 } else if (bp->func_stx) {
3172 *stats_comp = 0;
3173 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3174 }
3175}
3176
3177static int bnx2x_stats_comp(struct bnx2x *bp)
3178{
3179 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180 int cnt = 10;
3181
3182 might_sleep();
3183 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3184 if (!cnt) {
3185 BNX2X_ERR("timeout waiting for stats finished\n");
3186 break;
3187 }
3188 cnt--;
12469401 3189 msleep(1);
bb2a0f7a
YG
3190 }
3191 return 1;
3192}
3193
3194/*
3195 * Statistics service functions
3196 */
3197
3198static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3199{
3200 struct dmae_command *dmae;
3201 u32 opcode;
3202 int loader_idx = PMF_DMAE_C(bp);
3203 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3204
3205 /* sanity */
3206 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3207 BNX2X_ERR("BUG!\n");
3208 return;
3209 }
3210
3211 bp->executer_idx = 0;
3212
3213 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3214 DMAE_CMD_C_ENABLE |
3215 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3216#ifdef __BIG_ENDIAN
3217 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3218#else
3219 DMAE_CMD_ENDIANITY_DW_SWAP |
3220#endif
3221 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3222 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3223
3224 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3225 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3226 dmae->src_addr_lo = bp->port.port_stx >> 2;
3227 dmae->src_addr_hi = 0;
3228 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3229 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3230 dmae->len = DMAE_LEN32_RD_MAX;
3231 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3232 dmae->comp_addr_hi = 0;
3233 dmae->comp_val = 1;
3234
3235 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3236 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3237 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3238 dmae->src_addr_hi = 0;
7a9b2557
VZ
3239 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3240 DMAE_LEN32_RD_MAX * 4);
3241 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3242 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3243 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3244 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3245 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3246 dmae->comp_val = DMAE_COMP_VAL;
3247
3248 *stats_comp = 0;
3249 bnx2x_hw_stats_post(bp);
3250 bnx2x_stats_comp(bp);
3251}
3252
3253static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3254{
3255 struct dmae_command *dmae;
34f80b04 3256 int port = BP_PORT(bp);
bb2a0f7a 3257 int vn = BP_E1HVN(bp);
a2fbb9ea 3258 u32 opcode;
bb2a0f7a 3259 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3260 u32 mac_addr;
bb2a0f7a
YG
3261 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3262
3263 /* sanity */
3264 if (!bp->link_vars.link_up || !bp->port.pmf) {
3265 BNX2X_ERR("BUG!\n");
3266 return;
3267 }
a2fbb9ea
ET
3268
3269 bp->executer_idx = 0;
bb2a0f7a
YG
3270
3271 /* MCP */
3272 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3273 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3274 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3275#ifdef __BIG_ENDIAN
bb2a0f7a 3276 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3277#else
bb2a0f7a 3278 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3279#endif
bb2a0f7a
YG
3280 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3281 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3282
bb2a0f7a 3283 if (bp->port.port_stx) {
a2fbb9ea
ET
3284
3285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3286 dmae->opcode = opcode;
bb2a0f7a
YG
3287 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3288 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3289 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3290 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3291 dmae->len = sizeof(struct host_port_stats) >> 2;
3292 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293 dmae->comp_addr_hi = 0;
3294 dmae->comp_val = 1;
a2fbb9ea
ET
3295 }
3296
bb2a0f7a
YG
3297 if (bp->func_stx) {
3298
3299 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3300 dmae->opcode = opcode;
3301 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3302 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3303 dmae->dst_addr_lo = bp->func_stx >> 2;
3304 dmae->dst_addr_hi = 0;
3305 dmae->len = sizeof(struct host_func_stats) >> 2;
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3308 dmae->comp_val = 1;
a2fbb9ea
ET
3309 }
3310
bb2a0f7a 3311 /* MAC */
a2fbb9ea
ET
3312 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3313 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3314 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3315#ifdef __BIG_ENDIAN
3316 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3317#else
3318 DMAE_CMD_ENDIANITY_DW_SWAP |
3319#endif
bb2a0f7a
YG
3320 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3321 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3322
c18487ee 3323 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3324
3325 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3326 NIG_REG_INGRESS_BMAC0_MEM);
3327
3328 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3329 BIGMAC_REGISTER_TX_STAT_GTBYT */
3330 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3331 dmae->opcode = opcode;
3332 dmae->src_addr_lo = (mac_addr +
3333 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3334 dmae->src_addr_hi = 0;
3335 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3336 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3337 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3338 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3339 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3340 dmae->comp_addr_hi = 0;
3341 dmae->comp_val = 1;
3342
3343 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3344 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3345 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3346 dmae->opcode = opcode;
3347 dmae->src_addr_lo = (mac_addr +
3348 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3349 dmae->src_addr_hi = 0;
3350 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3351 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3352 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3353 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3354 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3355 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3356 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3357 dmae->comp_addr_hi = 0;
3358 dmae->comp_val = 1;
3359
c18487ee 3360 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3361
3362 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3363
3364 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366 dmae->opcode = opcode;
3367 dmae->src_addr_lo = (mac_addr +
3368 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3369 dmae->src_addr_hi = 0;
3370 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3371 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3372 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3373 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3374 dmae->comp_addr_hi = 0;
3375 dmae->comp_val = 1;
3376
3377 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3378 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3379 dmae->opcode = opcode;
3380 dmae->src_addr_lo = (mac_addr +
3381 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3382 dmae->src_addr_hi = 0;
3383 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3384 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3385 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3386 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3387 dmae->len = 1;
3388 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3389 dmae->comp_addr_hi = 0;
3390 dmae->comp_val = 1;
3391
3392 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3393 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3394 dmae->opcode = opcode;
3395 dmae->src_addr_lo = (mac_addr +
3396 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3397 dmae->src_addr_hi = 0;
3398 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3399 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3400 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3401 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3402 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3404 dmae->comp_addr_hi = 0;
3405 dmae->comp_val = 1;
3406 }
3407
3408 /* NIG */
bb2a0f7a
YG
3409 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3410 dmae->opcode = opcode;
3411 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3412 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3413 dmae->src_addr_hi = 0;
3414 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3415 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3416 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3417 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418 dmae->comp_addr_hi = 0;
3419 dmae->comp_val = 1;
3420
3421 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3422 dmae->opcode = opcode;
3423 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3424 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3425 dmae->src_addr_hi = 0;
3426 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3427 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3428 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3429 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3430 dmae->len = (2*sizeof(u32)) >> 2;
3431 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3432 dmae->comp_addr_hi = 0;
3433 dmae->comp_val = 1;
3434
a2fbb9ea
ET
3435 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3436 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3437 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3438 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3439#ifdef __BIG_ENDIAN
3440 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3441#else
3442 DMAE_CMD_ENDIANITY_DW_SWAP |
3443#endif
bb2a0f7a
YG
3444 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3445 (vn << DMAE_CMD_E1HVN_SHIFT));
3446 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3447 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3448 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3449 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3450 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3451 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3452 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3453 dmae->len = (2*sizeof(u32)) >> 2;
3454 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3455 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3456 dmae->comp_val = DMAE_COMP_VAL;
3457
3458 *stats_comp = 0;
a2fbb9ea
ET
3459}
3460
bb2a0f7a 3461static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3462{
bb2a0f7a
YG
3463 struct dmae_command *dmae = &bp->stats_dmae;
3464 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3465
bb2a0f7a
YG
3466 /* sanity */
3467 if (!bp->func_stx) {
3468 BNX2X_ERR("BUG!\n");
3469 return;
3470 }
a2fbb9ea 3471
bb2a0f7a
YG
3472 bp->executer_idx = 0;
3473 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3474
bb2a0f7a
YG
3475 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3476 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3477 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3478#ifdef __BIG_ENDIAN
3479 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3480#else
3481 DMAE_CMD_ENDIANITY_DW_SWAP |
3482#endif
3483 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3484 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3485 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3486 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3487 dmae->dst_addr_lo = bp->func_stx >> 2;
3488 dmae->dst_addr_hi = 0;
3489 dmae->len = sizeof(struct host_func_stats) >> 2;
3490 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3491 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3492 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3493
bb2a0f7a
YG
3494 *stats_comp = 0;
3495}
a2fbb9ea 3496
bb2a0f7a
YG
3497static void bnx2x_stats_start(struct bnx2x *bp)
3498{
3499 if (bp->port.pmf)
3500 bnx2x_port_stats_init(bp);
3501
3502 else if (bp->func_stx)
3503 bnx2x_func_stats_init(bp);
3504
3505 bnx2x_hw_stats_post(bp);
3506 bnx2x_storm_stats_post(bp);
3507}
3508
3509static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3510{
3511 bnx2x_stats_comp(bp);
3512 bnx2x_stats_pmf_update(bp);
3513 bnx2x_stats_start(bp);
3514}
3515
3516static void bnx2x_stats_restart(struct bnx2x *bp)
3517{
3518 bnx2x_stats_comp(bp);
3519 bnx2x_stats_start(bp);
3520}
3521
3522static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3523{
3524 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3525 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3526 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3527 struct {
3528 u32 lo;
3529 u32 hi;
3530 } diff;
bb2a0f7a
YG
3531
3532 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3533 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3534 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3535 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3536 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3537 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3538 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3539 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3540 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3541 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3542 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3543 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3544 UPDATE_STAT64(tx_stat_gt127,
3545 tx_stat_etherstatspkts65octetsto127octets);
3546 UPDATE_STAT64(tx_stat_gt255,
3547 tx_stat_etherstatspkts128octetsto255octets);
3548 UPDATE_STAT64(tx_stat_gt511,
3549 tx_stat_etherstatspkts256octetsto511octets);
3550 UPDATE_STAT64(tx_stat_gt1023,
3551 tx_stat_etherstatspkts512octetsto1023octets);
3552 UPDATE_STAT64(tx_stat_gt1518,
3553 tx_stat_etherstatspkts1024octetsto1522octets);
3554 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3555 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3556 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3557 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3558 UPDATE_STAT64(tx_stat_gterr,
3559 tx_stat_dot3statsinternalmactransmiterrors);
3560 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3561
3562 estats->pause_frames_received_hi =
3563 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3564 estats->pause_frames_received_lo =
3565 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3566
3567 estats->pause_frames_sent_hi =
3568 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3569 estats->pause_frames_sent_lo =
3570 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3571}
3572
3573static void bnx2x_emac_stats_update(struct bnx2x *bp)
3574{
3575 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3576 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3577 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3578
3579 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3580 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3581 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3582 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3583 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3584 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3585 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3586 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3587 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3588 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3589 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3590 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3591 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3592 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3593 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3594 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3595 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3596 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3597 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3598 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3599 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3600 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3601 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3602 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3603 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3604 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3605 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3606 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3607 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3608 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3609 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3610
3611 estats->pause_frames_received_hi =
3612 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3613 estats->pause_frames_received_lo =
3614 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3615 ADD_64(estats->pause_frames_received_hi,
3616 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3617 estats->pause_frames_received_lo,
3618 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3619
3620 estats->pause_frames_sent_hi =
3621 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3622 estats->pause_frames_sent_lo =
3623 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3624 ADD_64(estats->pause_frames_sent_hi,
3625 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3626 estats->pause_frames_sent_lo,
3627 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3628}
3629
3630static int bnx2x_hw_stats_update(struct bnx2x *bp)
3631{
3632 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3633 struct nig_stats *old = &(bp->port.old_nig_stats);
3634 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3635 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3636 struct {
3637 u32 lo;
3638 u32 hi;
3639 } diff;
de832a55 3640 u32 nig_timer_max;
bb2a0f7a
YG
3641
3642 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3643 bnx2x_bmac_stats_update(bp);
3644
3645 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3646 bnx2x_emac_stats_update(bp);
3647
3648 else { /* unreached */
3649 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3650 return -1;
3651 }
a2fbb9ea 3652
bb2a0f7a
YG
3653 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3654 new->brb_discard - old->brb_discard);
66e855f3
YG
3655 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3656 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3657
bb2a0f7a
YG
3658 UPDATE_STAT64_NIG(egress_mac_pkt0,
3659 etherstatspkts1024octetsto1522octets);
3660 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3661
bb2a0f7a 3662 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3663
bb2a0f7a
YG
3664 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3665 sizeof(struct mac_stx));
3666 estats->brb_drop_hi = pstats->brb_drop_hi;
3667 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3668
bb2a0f7a 3669 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3670
de832a55
EG
3671 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3672 if (nig_timer_max != estats->nig_timer_max) {
3673 estats->nig_timer_max = nig_timer_max;
3674 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3675 }
3676
bb2a0f7a 3677 return 0;
a2fbb9ea
ET
3678}
3679
bb2a0f7a 3680static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3681{
3682 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3683 struct tstorm_per_port_stats *tport =
de832a55 3684 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3685 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3686 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3687 int i;
3688
3689 memset(&(fstats->total_bytes_received_hi), 0,
3690 sizeof(struct host_func_stats) - 2*sizeof(u32));
3691 estats->error_bytes_received_hi = 0;
3692 estats->error_bytes_received_lo = 0;
3693 estats->etherstatsoverrsizepkts_hi = 0;
3694 estats->etherstatsoverrsizepkts_lo = 0;
3695 estats->no_buff_discard_hi = 0;
3696 estats->no_buff_discard_lo = 0;
a2fbb9ea 3697
de832a55
EG
3698 for_each_queue(bp, i) {
3699 struct bnx2x_fastpath *fp = &bp->fp[i];
3700 int cl_id = fp->cl_id;
3701 struct tstorm_per_client_stats *tclient =
3702 &stats->tstorm_common.client_statistics[cl_id];
3703 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3704 struct ustorm_per_client_stats *uclient =
3705 &stats->ustorm_common.client_statistics[cl_id];
3706 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3707 struct xstorm_per_client_stats *xclient =
3708 &stats->xstorm_common.client_statistics[cl_id];
3709 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3710 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3711 u32 diff;
3712
3713 /* are storm stats valid? */
3714 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3715 bp->stats_counter) {
de832a55
EG
3716 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3717 " xstorm counter (%d) != stats_counter (%d)\n",
3718 i, xclient->stats_counter, bp->stats_counter);
3719 return -1;
3720 }
3721 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3722 bp->stats_counter) {
de832a55
EG
3723 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3724 " tstorm counter (%d) != stats_counter (%d)\n",
3725 i, tclient->stats_counter, bp->stats_counter);
3726 return -2;
3727 }
3728 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3729 bp->stats_counter) {
3730 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3731 " ustorm counter (%d) != stats_counter (%d)\n",
3732 i, uclient->stats_counter, bp->stats_counter);
3733 return -4;
3734 }
a2fbb9ea 3735
de832a55
EG
3736 qstats->total_bytes_received_hi =
3737 qstats->valid_bytes_received_hi =
a2fbb9ea 3738 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3739 qstats->total_bytes_received_lo =
3740 qstats->valid_bytes_received_lo =
a2fbb9ea 3741 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3742
de832a55 3743 qstats->error_bytes_received_hi =
bb2a0f7a 3744 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3745 qstats->error_bytes_received_lo =
bb2a0f7a 3746 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3747
de832a55
EG
3748 ADD_64(qstats->total_bytes_received_hi,
3749 qstats->error_bytes_received_hi,
3750 qstats->total_bytes_received_lo,
3751 qstats->error_bytes_received_lo);
3752
3753 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3754 total_unicast_packets_received);
3755 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3756 total_multicast_packets_received);
3757 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3758 total_broadcast_packets_received);
3759 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3760 etherstatsoverrsizepkts);
3761 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3762
3763 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3764 total_unicast_packets_received);
3765 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3766 total_multicast_packets_received);
3767 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3768 total_broadcast_packets_received);
3769 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3770 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3771 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3772
3773 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3774 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3775 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3776 le32_to_cpu(xclient->total_sent_bytes.lo);
3777
de832a55
EG
3778 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3779 total_unicast_packets_transmitted);
3780 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3781 total_multicast_packets_transmitted);
3782 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3783 total_broadcast_packets_transmitted);
3784
3785 old_tclient->checksum_discard = tclient->checksum_discard;
3786 old_tclient->ttl0_discard = tclient->ttl0_discard;
3787
3788 ADD_64(fstats->total_bytes_received_hi,
3789 qstats->total_bytes_received_hi,
3790 fstats->total_bytes_received_lo,
3791 qstats->total_bytes_received_lo);
3792 ADD_64(fstats->total_bytes_transmitted_hi,
3793 qstats->total_bytes_transmitted_hi,
3794 fstats->total_bytes_transmitted_lo,
3795 qstats->total_bytes_transmitted_lo);
3796 ADD_64(fstats->total_unicast_packets_received_hi,
3797 qstats->total_unicast_packets_received_hi,
3798 fstats->total_unicast_packets_received_lo,
3799 qstats->total_unicast_packets_received_lo);
3800 ADD_64(fstats->total_multicast_packets_received_hi,
3801 qstats->total_multicast_packets_received_hi,
3802 fstats->total_multicast_packets_received_lo,
3803 qstats->total_multicast_packets_received_lo);
3804 ADD_64(fstats->total_broadcast_packets_received_hi,
3805 qstats->total_broadcast_packets_received_hi,
3806 fstats->total_broadcast_packets_received_lo,
3807 qstats->total_broadcast_packets_received_lo);
3808 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3809 qstats->total_unicast_packets_transmitted_hi,
3810 fstats->total_unicast_packets_transmitted_lo,
3811 qstats->total_unicast_packets_transmitted_lo);
3812 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3813 qstats->total_multicast_packets_transmitted_hi,
3814 fstats->total_multicast_packets_transmitted_lo,
3815 qstats->total_multicast_packets_transmitted_lo);
3816 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3817 qstats->total_broadcast_packets_transmitted_hi,
3818 fstats->total_broadcast_packets_transmitted_lo,
3819 qstats->total_broadcast_packets_transmitted_lo);
3820 ADD_64(fstats->valid_bytes_received_hi,
3821 qstats->valid_bytes_received_hi,
3822 fstats->valid_bytes_received_lo,
3823 qstats->valid_bytes_received_lo);
3824
3825 ADD_64(estats->error_bytes_received_hi,
3826 qstats->error_bytes_received_hi,
3827 estats->error_bytes_received_lo,
3828 qstats->error_bytes_received_lo);
3829 ADD_64(estats->etherstatsoverrsizepkts_hi,
3830 qstats->etherstatsoverrsizepkts_hi,
3831 estats->etherstatsoverrsizepkts_lo,
3832 qstats->etherstatsoverrsizepkts_lo);
3833 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3834 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3835 }
3836
3837 ADD_64(fstats->total_bytes_received_hi,
3838 estats->rx_stat_ifhcinbadoctets_hi,
3839 fstats->total_bytes_received_lo,
3840 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3841
3842 memcpy(estats, &(fstats->total_bytes_received_hi),
3843 sizeof(struct host_func_stats) - 2*sizeof(u32));
3844
de832a55
EG
3845 ADD_64(estats->etherstatsoverrsizepkts_hi,
3846 estats->rx_stat_dot3statsframestoolong_hi,
3847 estats->etherstatsoverrsizepkts_lo,
3848 estats->rx_stat_dot3statsframestoolong_lo);
3849 ADD_64(estats->error_bytes_received_hi,
3850 estats->rx_stat_ifhcinbadoctets_hi,
3851 estats->error_bytes_received_lo,
3852 estats->rx_stat_ifhcinbadoctets_lo);
3853
3854 if (bp->port.pmf) {
3855 estats->mac_filter_discard =
3856 le32_to_cpu(tport->mac_filter_discard);
3857 estats->xxoverflow_discard =
3858 le32_to_cpu(tport->xxoverflow_discard);
3859 estats->brb_truncate_discard =
bb2a0f7a 3860 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3861 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3862 }
bb2a0f7a
YG
3863
3864 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3865
de832a55
EG
3866 bp->stats_pending = 0;
3867
a2fbb9ea
ET
3868 return 0;
3869}
3870
bb2a0f7a 3871static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3872{
bb2a0f7a 3873 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3874 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3875 int i;
a2fbb9ea
ET
3876
3877 nstats->rx_packets =
3878 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3879 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3880 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3881
3882 nstats->tx_packets =
3883 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3884 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3885 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3886
de832a55 3887 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3888
0e39e645 3889 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3890
de832a55
EG
3891 nstats->rx_dropped = estats->mac_discard;
3892 for_each_queue(bp, i)
3893 nstats->rx_dropped +=
3894 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3895
a2fbb9ea
ET
3896 nstats->tx_dropped = 0;
3897
3898 nstats->multicast =
de832a55 3899 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3900
bb2a0f7a 3901 nstats->collisions =
de832a55 3902 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3903
3904 nstats->rx_length_errors =
de832a55
EG
3905 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3906 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3907 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3908 bnx2x_hilo(&estats->brb_truncate_hi);
3909 nstats->rx_crc_errors =
3910 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3911 nstats->rx_frame_errors =
3912 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3913 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3914 nstats->rx_missed_errors = estats->xxoverflow_discard;
3915
3916 nstats->rx_errors = nstats->rx_length_errors +
3917 nstats->rx_over_errors +
3918 nstats->rx_crc_errors +
3919 nstats->rx_frame_errors +
0e39e645
ET
3920 nstats->rx_fifo_errors +
3921 nstats->rx_missed_errors;
a2fbb9ea 3922
bb2a0f7a 3923 nstats->tx_aborted_errors =
de832a55
EG
3924 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3925 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3926 nstats->tx_carrier_errors =
3927 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3928 nstats->tx_fifo_errors = 0;
3929 nstats->tx_heartbeat_errors = 0;
3930 nstats->tx_window_errors = 0;
3931
3932 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3933 nstats->tx_carrier_errors +
3934 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3935}
3936
3937static void bnx2x_drv_stats_update(struct bnx2x *bp)
3938{
3939 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3940 int i;
3941
3942 estats->driver_xoff = 0;
3943 estats->rx_err_discard_pkt = 0;
3944 estats->rx_skb_alloc_failed = 0;
3945 estats->hw_csum_err = 0;
3946 for_each_queue(bp, i) {
3947 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3948
3949 estats->driver_xoff += qstats->driver_xoff;
3950 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3951 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3952 estats->hw_csum_err += qstats->hw_csum_err;
3953 }
a2fbb9ea
ET
3954}
3955
bb2a0f7a 3956static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3957{
bb2a0f7a 3958 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3959
bb2a0f7a
YG
3960 if (*stats_comp != DMAE_COMP_VAL)
3961 return;
3962
3963 if (bp->port.pmf)
de832a55 3964 bnx2x_hw_stats_update(bp);
a2fbb9ea 3965
de832a55
EG
3966 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3967 BNX2X_ERR("storm stats were not updated for 3 times\n");
3968 bnx2x_panic();
3969 return;
a2fbb9ea
ET
3970 }
3971
de832a55
EG
3972 bnx2x_net_stats_update(bp);
3973 bnx2x_drv_stats_update(bp);
3974
a2fbb9ea 3975 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3976 struct tstorm_per_client_stats *old_tclient =
3977 &bp->fp->old_tclient;
3978 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3979 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3980 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3981 int i;
a2fbb9ea
ET
3982
3983 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3984 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3985 " tx pkt (%lx)\n",
3986 bnx2x_tx_avail(bp->fp),
7a9b2557 3987 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3988 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3989 " rx pkt (%lx)\n",
7a9b2557
VZ
3990 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3991 bp->fp->rx_comp_cons),
3992 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3993 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3994 "brb truncate %u\n",
3995 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3996 qstats->driver_xoff,
3997 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 3998 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 3999 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4000 "mac_discard %u mac_filter_discard %u "
4001 "xxovrflow_discard %u brb_truncate_discard %u "
4002 "ttl0_discard %u\n",
4781bfad 4003 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4004 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4005 bnx2x_hilo(&qstats->no_buff_discard_hi),
4006 estats->mac_discard, estats->mac_filter_discard,
4007 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4008 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4009
4010 for_each_queue(bp, i) {
4011 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4012 bnx2x_fp(bp, i, tx_pkt),
4013 bnx2x_fp(bp, i, rx_pkt),
4014 bnx2x_fp(bp, i, rx_calls));
4015 }
4016 }
4017
bb2a0f7a
YG
4018 bnx2x_hw_stats_post(bp);
4019 bnx2x_storm_stats_post(bp);
4020}
a2fbb9ea 4021
bb2a0f7a
YG
4022static void bnx2x_port_stats_stop(struct bnx2x *bp)
4023{
4024 struct dmae_command *dmae;
4025 u32 opcode;
4026 int loader_idx = PMF_DMAE_C(bp);
4027 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4028
bb2a0f7a 4029 bp->executer_idx = 0;
a2fbb9ea 4030
bb2a0f7a
YG
4031 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4032 DMAE_CMD_C_ENABLE |
4033 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4034#ifdef __BIG_ENDIAN
bb2a0f7a 4035 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4036#else
bb2a0f7a 4037 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4038#endif
bb2a0f7a
YG
4039 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4040 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4041
4042 if (bp->port.port_stx) {
4043
4044 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4045 if (bp->func_stx)
4046 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4047 else
4048 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4049 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4050 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4051 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4052 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4053 dmae->len = sizeof(struct host_port_stats) >> 2;
4054 if (bp->func_stx) {
4055 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4056 dmae->comp_addr_hi = 0;
4057 dmae->comp_val = 1;
4058 } else {
4059 dmae->comp_addr_lo =
4060 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4061 dmae->comp_addr_hi =
4062 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4063 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4064
bb2a0f7a
YG
4065 *stats_comp = 0;
4066 }
a2fbb9ea
ET
4067 }
4068
bb2a0f7a
YG
4069 if (bp->func_stx) {
4070
4071 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4072 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4073 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4074 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4075 dmae->dst_addr_lo = bp->func_stx >> 2;
4076 dmae->dst_addr_hi = 0;
4077 dmae->len = sizeof(struct host_func_stats) >> 2;
4078 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4079 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4080 dmae->comp_val = DMAE_COMP_VAL;
4081
4082 *stats_comp = 0;
a2fbb9ea 4083 }
bb2a0f7a
YG
4084}
4085
4086static void bnx2x_stats_stop(struct bnx2x *bp)
4087{
4088 int update = 0;
4089
4090 bnx2x_stats_comp(bp);
4091
4092 if (bp->port.pmf)
4093 update = (bnx2x_hw_stats_update(bp) == 0);
4094
4095 update |= (bnx2x_storm_stats_update(bp) == 0);
4096
4097 if (update) {
4098 bnx2x_net_stats_update(bp);
a2fbb9ea 4099
bb2a0f7a
YG
4100 if (bp->port.pmf)
4101 bnx2x_port_stats_stop(bp);
4102
4103 bnx2x_hw_stats_post(bp);
4104 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4105 }
4106}
4107
bb2a0f7a
YG
4108static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4109{
4110}
4111
4112static const struct {
4113 void (*action)(struct bnx2x *bp);
4114 enum bnx2x_stats_state next_state;
4115} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4116/* state event */
4117{
4118/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4119/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4120/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4121/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4122},
4123{
4124/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4125/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4126/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4127/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4128}
4129};
4130
4131static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4132{
4133 enum bnx2x_stats_state state = bp->stats_state;
4134
4135 bnx2x_stats_stm[state][event].action(bp);
4136 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4137
4138 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4139 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4140 state, event, bp->stats_state);
4141}
4142
a2fbb9ea
ET
4143static void bnx2x_timer(unsigned long data)
4144{
4145 struct bnx2x *bp = (struct bnx2x *) data;
4146
4147 if (!netif_running(bp->dev))
4148 return;
4149
4150 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4151 goto timer_restart;
a2fbb9ea
ET
4152
4153 if (poll) {
4154 struct bnx2x_fastpath *fp = &bp->fp[0];
4155 int rc;
4156
4157 bnx2x_tx_int(fp, 1000);
4158 rc = bnx2x_rx_int(fp, 1000);
4159 }
4160
34f80b04
EG
4161 if (!BP_NOMCP(bp)) {
4162 int func = BP_FUNC(bp);
a2fbb9ea
ET
4163 u32 drv_pulse;
4164 u32 mcp_pulse;
4165
4166 ++bp->fw_drv_pulse_wr_seq;
4167 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4168 /* TBD - add SYSTEM_TIME */
4169 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4170 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4171
34f80b04 4172 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4173 MCP_PULSE_SEQ_MASK);
4174 /* The delta between driver pulse and mcp response
4175 * should be 1 (before mcp response) or 0 (after mcp response)
4176 */
4177 if ((drv_pulse != mcp_pulse) &&
4178 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4179 /* someone lost a heartbeat... */
4180 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4181 drv_pulse, mcp_pulse);
4182 }
4183 }
4184
bb2a0f7a
YG
4185 if ((bp->state == BNX2X_STATE_OPEN) ||
4186 (bp->state == BNX2X_STATE_DISABLED))
4187 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4188
f1410647 4189timer_restart:
a2fbb9ea
ET
4190 mod_timer(&bp->timer, jiffies + bp->current_interval);
4191}
4192
4193/* end of Statistics */
4194
4195/* nic init */
4196
4197/*
4198 * nic init service functions
4199 */
4200
34f80b04 4201static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4202{
34f80b04
EG
4203 int port = BP_PORT(bp);
4204
4205 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4206 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4207 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4208 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4209 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4210 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4211}
4212
5c862848
EG
4213static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4214 dma_addr_t mapping, int sb_id)
34f80b04
EG
4215{
4216 int port = BP_PORT(bp);
bb2a0f7a 4217 int func = BP_FUNC(bp);
a2fbb9ea 4218 int index;
34f80b04 4219 u64 section;
a2fbb9ea
ET
4220
4221 /* USTORM */
4222 section = ((u64)mapping) + offsetof(struct host_status_block,
4223 u_status_block);
34f80b04 4224 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4225
4226 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4227 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4228 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4229 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4230 U64_HI(section));
bb2a0f7a
YG
4231 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4232 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4233
4234 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4235 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4236 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4237
4238 /* CSTORM */
4239 section = ((u64)mapping) + offsetof(struct host_status_block,
4240 c_status_block);
34f80b04 4241 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4242
4243 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4244 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4245 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4246 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4247 U64_HI(section));
7a9b2557
VZ
4248 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4249 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4250
4251 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4252 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4253 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4254
4255 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4256}
4257
4258static void bnx2x_zero_def_sb(struct bnx2x *bp)
4259{
4260 int func = BP_FUNC(bp);
a2fbb9ea 4261
34f80b04
EG
4262 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4263 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4264 sizeof(struct ustorm_def_status_block)/4);
4265 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4266 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4267 sizeof(struct cstorm_def_status_block)/4);
4268 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4269 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4270 sizeof(struct xstorm_def_status_block)/4);
4271 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4272 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4273 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4274}
4275
4276static void bnx2x_init_def_sb(struct bnx2x *bp,
4277 struct host_def_status_block *def_sb,
34f80b04 4278 dma_addr_t mapping, int sb_id)
a2fbb9ea 4279{
34f80b04
EG
4280 int port = BP_PORT(bp);
4281 int func = BP_FUNC(bp);
a2fbb9ea
ET
4282 int index, val, reg_offset;
4283 u64 section;
4284
4285 /* ATTN */
4286 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4287 atten_status_block);
34f80b04 4288 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4289
49d66772
ET
4290 bp->attn_state = 0;
4291
a2fbb9ea
ET
4292 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4293 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4294
34f80b04 4295 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4296 bp->attn_group[index].sig[0] = REG_RD(bp,
4297 reg_offset + 0x10*index);
4298 bp->attn_group[index].sig[1] = REG_RD(bp,
4299 reg_offset + 0x4 + 0x10*index);
4300 bp->attn_group[index].sig[2] = REG_RD(bp,
4301 reg_offset + 0x8 + 0x10*index);
4302 bp->attn_group[index].sig[3] = REG_RD(bp,
4303 reg_offset + 0xc + 0x10*index);
4304 }
4305
a2fbb9ea
ET
4306 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4307 HC_REG_ATTN_MSG0_ADDR_L);
4308
4309 REG_WR(bp, reg_offset, U64_LO(section));
4310 REG_WR(bp, reg_offset + 4, U64_HI(section));
4311
4312 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4313
4314 val = REG_RD(bp, reg_offset);
34f80b04 4315 val |= sb_id;
a2fbb9ea
ET
4316 REG_WR(bp, reg_offset, val);
4317
4318 /* USTORM */
4319 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4320 u_def_status_block);
34f80b04 4321 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4322
4323 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4324 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4325 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4326 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4327 U64_HI(section));
5c862848 4328 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4329 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4330
4331 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4332 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4333 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4334
4335 /* CSTORM */
4336 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4337 c_def_status_block);
34f80b04 4338 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4339
4340 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4341 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4342 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4343 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4344 U64_HI(section));
5c862848 4345 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4346 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4347
4348 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4349 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4350 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4351
4352 /* TSTORM */
4353 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4354 t_def_status_block);
34f80b04 4355 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4356
4357 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4358 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4359 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4360 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4361 U64_HI(section));
5c862848 4362 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4363 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4364
4365 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4366 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4367 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4368
4369 /* XSTORM */
4370 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4371 x_def_status_block);
34f80b04 4372 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4373
4374 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4375 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4376 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4377 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4378 U64_HI(section));
5c862848 4379 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4380 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4381
4382 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4383 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4384 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4385
bb2a0f7a 4386 bp->stats_pending = 0;
66e855f3 4387 bp->set_mac_pending = 0;
bb2a0f7a 4388
34f80b04 4389 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4390}
4391
4392static void bnx2x_update_coalesce(struct bnx2x *bp)
4393{
34f80b04 4394 int port = BP_PORT(bp);
a2fbb9ea
ET
4395 int i;
4396
4397 for_each_queue(bp, i) {
34f80b04 4398 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4399
4400 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4401 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4402 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4403 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4404 bp->rx_ticks/12);
a2fbb9ea 4405 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4406 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4407 U_SB_ETH_RX_CQ_INDEX),
4408 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4409
4410 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4411 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4412 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4413 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4414 bp->tx_ticks/12);
a2fbb9ea 4415 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4416 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4417 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4418 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4419 }
4420}
4421
7a9b2557
VZ
4422static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4423 struct bnx2x_fastpath *fp, int last)
4424{
4425 int i;
4426
4427 for (i = 0; i < last; i++) {
4428 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4429 struct sk_buff *skb = rx_buf->skb;
4430
4431 if (skb == NULL) {
4432 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4433 continue;
4434 }
4435
4436 if (fp->tpa_state[i] == BNX2X_TPA_START)
4437 pci_unmap_single(bp->pdev,
4438 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4439 bp->rx_buf_size,
7a9b2557
VZ
4440 PCI_DMA_FROMDEVICE);
4441
4442 dev_kfree_skb(skb);
4443 rx_buf->skb = NULL;
4444 }
4445}
4446
a2fbb9ea
ET
4447static void bnx2x_init_rx_rings(struct bnx2x *bp)
4448{
7a9b2557 4449 int func = BP_FUNC(bp);
32626230
EG
4450 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4451 ETH_MAX_AGGREGATION_QUEUES_E1H;
4452 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4453 int i, j;
a2fbb9ea 4454
87942b46 4455 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4456 DP(NETIF_MSG_IFUP,
4457 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4458
7a9b2557 4459 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4460
555f6c78 4461 for_each_rx_queue(bp, j) {
32626230 4462 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4463
32626230 4464 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4465 fp->tpa_pool[i].skb =
4466 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4467 if (!fp->tpa_pool[i].skb) {
4468 BNX2X_ERR("Failed to allocate TPA "
4469 "skb pool for queue[%d] - "
4470 "disabling TPA on this "
4471 "queue!\n", j);
4472 bnx2x_free_tpa_pool(bp, fp, i);
4473 fp->disable_tpa = 1;
4474 break;
4475 }
4476 pci_unmap_addr_set((struct sw_rx_bd *)
4477 &bp->fp->tpa_pool[i],
4478 mapping, 0);
4479 fp->tpa_state[i] = BNX2X_TPA_STOP;
4480 }
4481 }
4482 }
4483
555f6c78 4484 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4485 struct bnx2x_fastpath *fp = &bp->fp[j];
4486
4487 fp->rx_bd_cons = 0;
4488 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4489 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4490
4491 /* "next page" elements initialization */
4492 /* SGE ring */
4493 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4494 struct eth_rx_sge *sge;
4495
4496 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4497 sge->addr_hi =
4498 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4499 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4500 sge->addr_lo =
4501 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4502 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4503 }
4504
4505 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4506
7a9b2557 4507 /* RX BD ring */
a2fbb9ea
ET
4508 for (i = 1; i <= NUM_RX_RINGS; i++) {
4509 struct eth_rx_bd *rx_bd;
4510
4511 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4512 rx_bd->addr_hi =
4513 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4514 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4515 rx_bd->addr_lo =
4516 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4517 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4518 }
4519
34f80b04 4520 /* CQ ring */
a2fbb9ea
ET
4521 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4522 struct eth_rx_cqe_next_page *nextpg;
4523
4524 nextpg = (struct eth_rx_cqe_next_page *)
4525 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4526 nextpg->addr_hi =
4527 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4528 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4529 nextpg->addr_lo =
4530 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4531 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4532 }
4533
7a9b2557
VZ
4534 /* Allocate SGEs and initialize the ring elements */
4535 for (i = 0, ring_prod = 0;
4536 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4537
7a9b2557
VZ
4538 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4539 BNX2X_ERR("was only able to allocate "
4540 "%d rx sges\n", i);
4541 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4542 /* Cleanup already allocated elements */
4543 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4544 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4545 fp->disable_tpa = 1;
4546 ring_prod = 0;
4547 break;
4548 }
4549 ring_prod = NEXT_SGE_IDX(ring_prod);
4550 }
4551 fp->rx_sge_prod = ring_prod;
4552
4553 /* Allocate BDs and initialize BD ring */
66e855f3 4554 fp->rx_comp_cons = 0;
7a9b2557 4555 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4556 for (i = 0; i < bp->rx_ring_size; i++) {
4557 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4558 BNX2X_ERR("was only able to allocate "
de832a55
EG
4559 "%d rx skbs on queue[%d]\n", i, j);
4560 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4561 break;
4562 }
4563 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4564 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4565 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4566 }
4567
7a9b2557
VZ
4568 fp->rx_bd_prod = ring_prod;
4569 /* must not have more available CQEs than BDs */
4570 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4571 cqe_ring_prod);
a2fbb9ea
ET
4572 fp->rx_pkt = fp->rx_calls = 0;
4573
7a9b2557
VZ
4574 /* Warning!
4575 * this will generate an interrupt (to the TSTORM)
4576 * must only be done after chip is initialized
4577 */
4578 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4579 fp->rx_sge_prod);
a2fbb9ea
ET
4580 if (j != 0)
4581 continue;
4582
4583 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4584 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4585 U64_LO(fp->rx_comp_mapping));
4586 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4587 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4588 U64_HI(fp->rx_comp_mapping));
4589 }
4590}
4591
4592static void bnx2x_init_tx_ring(struct bnx2x *bp)
4593{
4594 int i, j;
4595
555f6c78 4596 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4597 struct bnx2x_fastpath *fp = &bp->fp[j];
4598
4599 for (i = 1; i <= NUM_TX_RINGS; i++) {
4600 struct eth_tx_bd *tx_bd =
4601 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4602
4603 tx_bd->addr_hi =
4604 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4605 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4606 tx_bd->addr_lo =
4607 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4608 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4609 }
4610
4611 fp->tx_pkt_prod = 0;
4612 fp->tx_pkt_cons = 0;
4613 fp->tx_bd_prod = 0;
4614 fp->tx_bd_cons = 0;
4615 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4616 fp->tx_pkt = 0;
4617 }
4618}
4619
4620static void bnx2x_init_sp_ring(struct bnx2x *bp)
4621{
34f80b04 4622 int func = BP_FUNC(bp);
a2fbb9ea
ET
4623
4624 spin_lock_init(&bp->spq_lock);
4625
4626 bp->spq_left = MAX_SPQ_PENDING;
4627 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4628 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4629 bp->spq_prod_bd = bp->spq;
4630 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4631
34f80b04 4632 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4633 U64_LO(bp->spq_mapping));
34f80b04
EG
4634 REG_WR(bp,
4635 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4636 U64_HI(bp->spq_mapping));
4637
34f80b04 4638 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4639 bp->spq_prod_idx);
4640}
4641
4642static void bnx2x_init_context(struct bnx2x *bp)
4643{
4644 int i;
4645
4646 for_each_queue(bp, i) {
4647 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4648 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4649 u8 cl_id = fp->cl_id;
0626b899 4650 u8 sb_id = fp->sb_id;
a2fbb9ea 4651
34f80b04
EG
4652 context->ustorm_st_context.common.sb_index_numbers =
4653 BNX2X_RX_SB_INDEX_NUM;
0626b899 4654 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4655 context->ustorm_st_context.common.status_block_id = sb_id;
4656 context->ustorm_st_context.common.flags =
de832a55
EG
4657 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4658 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4659 context->ustorm_st_context.common.statistics_counter_id =
4660 cl_id;
8d9c5f34 4661 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4662 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4663 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4664 bp->rx_buf_size;
34f80b04 4665 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4666 U64_HI(fp->rx_desc_mapping);
34f80b04 4667 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4668 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4669 if (!fp->disable_tpa) {
4670 context->ustorm_st_context.common.flags |=
4671 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4672 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4673 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4674 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4675 (u32)0xffff);
7a9b2557
VZ
4676 context->ustorm_st_context.common.sge_page_base_hi =
4677 U64_HI(fp->rx_sge_mapping);
4678 context->ustorm_st_context.common.sge_page_base_lo =
4679 U64_LO(fp->rx_sge_mapping);
4680 }
4681
8d9c5f34
EG
4682 context->ustorm_ag_context.cdu_usage =
4683 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4684 CDU_REGION_NUMBER_UCM_AG,
4685 ETH_CONNECTION_TYPE);
4686
4687 context->xstorm_st_context.tx_bd_page_base_hi =
4688 U64_HI(fp->tx_desc_mapping);
4689 context->xstorm_st_context.tx_bd_page_base_lo =
4690 U64_LO(fp->tx_desc_mapping);
4691 context->xstorm_st_context.db_data_addr_hi =
4692 U64_HI(fp->tx_prods_mapping);
4693 context->xstorm_st_context.db_data_addr_lo =
4694 U64_LO(fp->tx_prods_mapping);
0626b899 4695 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4696 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4697 context->cstorm_st_context.sb_index_number =
5c862848 4698 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4699 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4700
4701 context->xstorm_ag_context.cdu_reserved =
4702 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4703 CDU_REGION_NUMBER_XCM_AG,
4704 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4705 }
4706}
4707
4708static void bnx2x_init_ind_table(struct bnx2x *bp)
4709{
26c8fa4d 4710 int func = BP_FUNC(bp);
a2fbb9ea
ET
4711 int i;
4712
555f6c78 4713 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4714 return;
4715
555f6c78
EG
4716 DP(NETIF_MSG_IFUP,
4717 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4718 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4719 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4720 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4721 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4722}
4723
49d66772
ET
4724static void bnx2x_set_client_config(struct bnx2x *bp)
4725{
49d66772 4726 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4727 int port = BP_PORT(bp);
4728 int i;
49d66772 4729
e7799c5f 4730 tstorm_client.mtu = bp->dev->mtu;
49d66772 4731 tstorm_client.config_flags =
de832a55
EG
4732 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4733 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4734#ifdef BCM_VLAN
0c6671b0 4735 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4736 tstorm_client.config_flags |=
8d9c5f34 4737 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4738 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4739 }
4740#endif
49d66772 4741
7a9b2557
VZ
4742 if (bp->flags & TPA_ENABLE_FLAG) {
4743 tstorm_client.max_sges_for_packet =
4f40f2cb 4744 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4745 tstorm_client.max_sges_for_packet =
4746 ((tstorm_client.max_sges_for_packet +
4747 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4748 PAGES_PER_SGE_SHIFT;
4749
4750 tstorm_client.config_flags |=
4751 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4752 }
4753
49d66772 4754 for_each_queue(bp, i) {
de832a55
EG
4755 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4756
49d66772 4757 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4758 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4759 ((u32 *)&tstorm_client)[0]);
4760 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4761 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4762 ((u32 *)&tstorm_client)[1]);
4763 }
4764
34f80b04
EG
4765 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4766 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4767}
4768
a2fbb9ea
ET
4769static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4770{
a2fbb9ea 4771 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4772 int mode = bp->rx_mode;
4773 int mask = (1 << BP_L_ID(bp));
4774 int func = BP_FUNC(bp);
a2fbb9ea
ET
4775 int i;
4776
3196a88a 4777 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4778
4779 switch (mode) {
4780 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4781 tstorm_mac_filter.ucast_drop_all = mask;
4782 tstorm_mac_filter.mcast_drop_all = mask;
4783 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4784 break;
4785 case BNX2X_RX_MODE_NORMAL:
34f80b04 4786 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4787 break;
4788 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4789 tstorm_mac_filter.mcast_accept_all = mask;
4790 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4791 break;
4792 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4793 tstorm_mac_filter.ucast_accept_all = mask;
4794 tstorm_mac_filter.mcast_accept_all = mask;
4795 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4796 break;
4797 default:
34f80b04
EG
4798 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4799 break;
a2fbb9ea
ET
4800 }
4801
4802 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4803 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4804 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4805 ((u32 *)&tstorm_mac_filter)[i]);
4806
34f80b04 4807/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4808 ((u32 *)&tstorm_mac_filter)[i]); */
4809 }
a2fbb9ea 4810
49d66772
ET
4811 if (mode != BNX2X_RX_MODE_NONE)
4812 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4813}
4814
471de716
EG
4815static void bnx2x_init_internal_common(struct bnx2x *bp)
4816{
4817 int i;
4818
3cdf1db7
YG
4819 if (bp->flags & TPA_ENABLE_FLAG) {
4820 struct tstorm_eth_tpa_exist tpa = {0};
4821
4822 tpa.tpa_exist = 1;
4823
4824 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4825 ((u32 *)&tpa)[0]);
4826 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4827 ((u32 *)&tpa)[1]);
4828 }
4829
471de716
EG
4830 /* Zero this manually as its initialization is
4831 currently missing in the initTool */
4832 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4833 REG_WR(bp, BAR_USTRORM_INTMEM +
4834 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4835}
4836
4837static void bnx2x_init_internal_port(struct bnx2x *bp)
4838{
4839 int port = BP_PORT(bp);
4840
4841 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4842 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4843 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4844 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4845}
4846
8a1c38d1
EG
4847/* Calculates the sum of vn_min_rates.
4848 It's needed for further normalizing of the min_rates.
4849 Returns:
4850 sum of vn_min_rates.
4851 or
4852 0 - if all the min_rates are 0.
4853 In the later case fainess algorithm should be deactivated.
4854 If not all min_rates are zero then those that are zeroes will be set to 1.
4855 */
4856static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4857{
4858 int all_zero = 1;
4859 int port = BP_PORT(bp);
4860 int vn;
4861
4862 bp->vn_weight_sum = 0;
4863 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4864 int func = 2*vn + port;
4865 u32 vn_cfg =
4866 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4867 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4868 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4869
4870 /* Skip hidden vns */
4871 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4872 continue;
4873
4874 /* If min rate is zero - set it to 1 */
4875 if (!vn_min_rate)
4876 vn_min_rate = DEF_MIN_RATE;
4877 else
4878 all_zero = 0;
4879
4880 bp->vn_weight_sum += vn_min_rate;
4881 }
4882
4883 /* ... only if all min rates are zeros - disable fairness */
4884 if (all_zero)
4885 bp->vn_weight_sum = 0;
4886}
4887
471de716 4888static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4889{
a2fbb9ea
ET
4890 struct tstorm_eth_function_common_config tstorm_config = {0};
4891 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4892 int port = BP_PORT(bp);
4893 int func = BP_FUNC(bp);
de832a55
EG
4894 int i, j;
4895 u32 offset;
471de716 4896 u16 max_agg_size;
a2fbb9ea
ET
4897
4898 if (is_multi(bp)) {
555f6c78 4899 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4900 tstorm_config.rss_result_mask = MULTI_MASK;
4901 }
8d9c5f34
EG
4902 if (IS_E1HMF(bp))
4903 tstorm_config.config_flags |=
4904 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4905
34f80b04
EG
4906 tstorm_config.leading_client_id = BP_L_ID(bp);
4907
a2fbb9ea 4908 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4909 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4910 (*(u32 *)&tstorm_config));
4911
c14423fe 4912 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4913 bnx2x_set_storm_rx_mode(bp);
4914
de832a55
EG
4915 for_each_queue(bp, i) {
4916 u8 cl_id = bp->fp[i].cl_id;
4917
4918 /* reset xstorm per client statistics */
4919 offset = BAR_XSTRORM_INTMEM +
4920 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4921 for (j = 0;
4922 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4923 REG_WR(bp, offset + j*4, 0);
4924
4925 /* reset tstorm per client statistics */
4926 offset = BAR_TSTRORM_INTMEM +
4927 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4928 for (j = 0;
4929 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4930 REG_WR(bp, offset + j*4, 0);
4931
4932 /* reset ustorm per client statistics */
4933 offset = BAR_USTRORM_INTMEM +
4934 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4935 for (j = 0;
4936 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4937 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4938 }
4939
4940 /* Init statistics related context */
34f80b04 4941 stats_flags.collect_eth = 1;
a2fbb9ea 4942
66e855f3 4943 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4944 ((u32 *)&stats_flags)[0]);
66e855f3 4945 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4946 ((u32 *)&stats_flags)[1]);
4947
66e855f3 4948 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4949 ((u32 *)&stats_flags)[0]);
66e855f3 4950 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4951 ((u32 *)&stats_flags)[1]);
4952
de832a55
EG
4953 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4954 ((u32 *)&stats_flags)[0]);
4955 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4956 ((u32 *)&stats_flags)[1]);
4957
66e855f3 4958 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4959 ((u32 *)&stats_flags)[0]);
66e855f3 4960 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4961 ((u32 *)&stats_flags)[1]);
4962
66e855f3
YG
4963 REG_WR(bp, BAR_XSTRORM_INTMEM +
4964 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4965 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4966 REG_WR(bp, BAR_XSTRORM_INTMEM +
4967 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4968 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4969
4970 REG_WR(bp, BAR_TSTRORM_INTMEM +
4971 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4972 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4973 REG_WR(bp, BAR_TSTRORM_INTMEM +
4974 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4975 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4976
de832a55
EG
4977 REG_WR(bp, BAR_USTRORM_INTMEM +
4978 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4979 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4980 REG_WR(bp, BAR_USTRORM_INTMEM +
4981 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4982 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4983
34f80b04
EG
4984 if (CHIP_IS_E1H(bp)) {
4985 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4986 IS_E1HMF(bp));
4987 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4988 IS_E1HMF(bp));
4989 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4990 IS_E1HMF(bp));
4991 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4992 IS_E1HMF(bp));
4993
7a9b2557
VZ
4994 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4995 bp->e1hov);
34f80b04
EG
4996 }
4997
4f40f2cb
EG
4998 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4999 max_agg_size =
5000 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5001 SGE_PAGE_SIZE * PAGES_PER_SGE),
5002 (u32)0xffff);
555f6c78 5003 for_each_rx_queue(bp, i) {
7a9b2557 5004 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5005
5006 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5007 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5008 U64_LO(fp->rx_comp_mapping));
5009 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5010 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5011 U64_HI(fp->rx_comp_mapping));
5012
7a9b2557 5013 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5014 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5015 max_agg_size);
5016 }
8a1c38d1 5017
1c06328c
EG
5018 /* dropless flow control */
5019 if (CHIP_IS_E1H(bp)) {
5020 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5021
5022 rx_pause.bd_thr_low = 250;
5023 rx_pause.cqe_thr_low = 250;
5024 rx_pause.cos = 1;
5025 rx_pause.sge_thr_low = 0;
5026 rx_pause.bd_thr_high = 350;
5027 rx_pause.cqe_thr_high = 350;
5028 rx_pause.sge_thr_high = 0;
5029
5030 for_each_rx_queue(bp, i) {
5031 struct bnx2x_fastpath *fp = &bp->fp[i];
5032
5033 if (!fp->disable_tpa) {
5034 rx_pause.sge_thr_low = 150;
5035 rx_pause.sge_thr_high = 250;
5036 }
5037
5038
5039 offset = BAR_USTRORM_INTMEM +
5040 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5041 fp->cl_id);
5042 for (j = 0;
5043 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5044 j++)
5045 REG_WR(bp, offset + j*4,
5046 ((u32 *)&rx_pause)[j]);
5047 }
5048 }
5049
8a1c38d1
EG
5050 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5051
5052 /* Init rate shaping and fairness contexts */
5053 if (IS_E1HMF(bp)) {
5054 int vn;
5055
5056 /* During init there is no active link
5057 Until link is up, set link rate to 10Gbps */
5058 bp->link_vars.line_speed = SPEED_10000;
5059 bnx2x_init_port_minmax(bp);
5060
5061 bnx2x_calc_vn_weight_sum(bp);
5062
5063 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5064 bnx2x_init_vn_minmax(bp, 2*vn + port);
5065
5066 /* Enable rate shaping and fairness */
5067 bp->cmng.flags.cmng_enables =
5068 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5069 if (bp->vn_weight_sum)
5070 bp->cmng.flags.cmng_enables |=
5071 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5072 else
5073 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5074 " fairness will be disabled\n");
5075 } else {
5076 /* rate shaping and fairness are disabled */
5077 DP(NETIF_MSG_IFUP,
5078 "single function mode minmax will be disabled\n");
5079 }
5080
5081
5082 /* Store it to internal memory */
5083 if (bp->port.pmf)
5084 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5085 REG_WR(bp, BAR_XSTRORM_INTMEM +
5086 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5087 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5088}
5089
471de716
EG
5090static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5091{
5092 switch (load_code) {
5093 case FW_MSG_CODE_DRV_LOAD_COMMON:
5094 bnx2x_init_internal_common(bp);
5095 /* no break */
5096
5097 case FW_MSG_CODE_DRV_LOAD_PORT:
5098 bnx2x_init_internal_port(bp);
5099 /* no break */
5100
5101 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5102 bnx2x_init_internal_func(bp);
5103 break;
5104
5105 default:
5106 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5107 break;
5108 }
5109}
5110
5111static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5112{
5113 int i;
5114
5115 for_each_queue(bp, i) {
5116 struct bnx2x_fastpath *fp = &bp->fp[i];
5117
34f80b04 5118 fp->bp = bp;
a2fbb9ea 5119 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5120 fp->index = i;
34f80b04
EG
5121 fp->cl_id = BP_L_ID(bp) + i;
5122 fp->sb_id = fp->cl_id;
5123 DP(NETIF_MSG_IFUP,
5124 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
0626b899 5125 bp, fp->status_blk, i, fp->cl_id, fp->sb_id);
5c862848 5126 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5127 fp->sb_id);
5c862848 5128 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5129 }
5130
5c862848
EG
5131 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5132 DEF_SB_ID);
5133 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5134 bnx2x_update_coalesce(bp);
5135 bnx2x_init_rx_rings(bp);
5136 bnx2x_init_tx_ring(bp);
5137 bnx2x_init_sp_ring(bp);
5138 bnx2x_init_context(bp);
471de716 5139 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5140 bnx2x_init_ind_table(bp);
0ef00459
EG
5141 bnx2x_stats_init(bp);
5142
5143 /* At this point, we are ready for interrupts */
5144 atomic_set(&bp->intr_sem, 0);
5145
5146 /* flush all before enabling interrupts */
5147 mb();
5148 mmiowb();
5149
615f8fd9 5150 bnx2x_int_enable(bp);
a2fbb9ea
ET
5151}
5152
5153/* end of nic init */
5154
5155/*
5156 * gzip service functions
5157 */
5158
5159static int bnx2x_gunzip_init(struct bnx2x *bp)
5160{
5161 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5162 &bp->gunzip_mapping);
5163 if (bp->gunzip_buf == NULL)
5164 goto gunzip_nomem1;
5165
5166 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5167 if (bp->strm == NULL)
5168 goto gunzip_nomem2;
5169
5170 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5171 GFP_KERNEL);
5172 if (bp->strm->workspace == NULL)
5173 goto gunzip_nomem3;
5174
5175 return 0;
5176
5177gunzip_nomem3:
5178 kfree(bp->strm);
5179 bp->strm = NULL;
5180
5181gunzip_nomem2:
5182 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5183 bp->gunzip_mapping);
5184 bp->gunzip_buf = NULL;
5185
5186gunzip_nomem1:
5187 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5188 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5189 return -ENOMEM;
5190}
5191
5192static void bnx2x_gunzip_end(struct bnx2x *bp)
5193{
5194 kfree(bp->strm->workspace);
5195
5196 kfree(bp->strm);
5197 bp->strm = NULL;
5198
5199 if (bp->gunzip_buf) {
5200 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5201 bp->gunzip_mapping);
5202 bp->gunzip_buf = NULL;
5203 }
5204}
5205
5206static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5207{
5208 int n, rc;
5209
5210 /* check gzip header */
5211 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5212 return -EINVAL;
5213
5214 n = 10;
5215
34f80b04 5216#define FNAME 0x8
a2fbb9ea
ET
5217
5218 if (zbuf[3] & FNAME)
5219 while ((zbuf[n++] != 0) && (n < len));
5220
5221 bp->strm->next_in = zbuf + n;
5222 bp->strm->avail_in = len - n;
5223 bp->strm->next_out = bp->gunzip_buf;
5224 bp->strm->avail_out = FW_BUF_SIZE;
5225
5226 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5227 if (rc != Z_OK)
5228 return rc;
5229
5230 rc = zlib_inflate(bp->strm, Z_FINISH);
5231 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5232 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5233 bp->dev->name, bp->strm->msg);
5234
5235 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5236 if (bp->gunzip_outlen & 0x3)
5237 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5238 " gunzip_outlen (%d) not aligned\n",
5239 bp->dev->name, bp->gunzip_outlen);
5240 bp->gunzip_outlen >>= 2;
5241
5242 zlib_inflateEnd(bp->strm);
5243
5244 if (rc == Z_STREAM_END)
5245 return 0;
5246
5247 return rc;
5248}
5249
5250/* nic load/unload */
5251
5252/*
34f80b04 5253 * General service functions
a2fbb9ea
ET
5254 */
5255
5256/* send a NIG loopback debug packet */
5257static void bnx2x_lb_pckt(struct bnx2x *bp)
5258{
a2fbb9ea 5259 u32 wb_write[3];
a2fbb9ea
ET
5260
5261 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5262 wb_write[0] = 0x55555555;
5263 wb_write[1] = 0x55555555;
34f80b04 5264 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5265 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5266
5267 /* NON-IP protocol */
a2fbb9ea
ET
5268 wb_write[0] = 0x09000000;
5269 wb_write[1] = 0x55555555;
34f80b04 5270 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5271 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5272}
5273
5274/* some of the internal memories
5275 * are not directly readable from the driver
5276 * to test them we send debug packets
5277 */
5278static int bnx2x_int_mem_test(struct bnx2x *bp)
5279{
5280 int factor;
5281 int count, i;
5282 u32 val = 0;
5283
ad8d3948 5284 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5285 factor = 120;
ad8d3948
EG
5286 else if (CHIP_REV_IS_EMUL(bp))
5287 factor = 200;
5288 else
a2fbb9ea 5289 factor = 1;
a2fbb9ea
ET
5290
5291 DP(NETIF_MSG_HW, "start part1\n");
5292
5293 /* Disable inputs of parser neighbor blocks */
5294 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5295 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5296 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5297 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5298
5299 /* Write 0 to parser credits for CFC search request */
5300 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5301
5302 /* send Ethernet packet */
5303 bnx2x_lb_pckt(bp);
5304
5305 /* TODO do i reset NIG statistic? */
5306 /* Wait until NIG register shows 1 packet of size 0x10 */
5307 count = 1000 * factor;
5308 while (count) {
34f80b04 5309
a2fbb9ea
ET
5310 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5311 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5312 if (val == 0x10)
5313 break;
5314
5315 msleep(10);
5316 count--;
5317 }
5318 if (val != 0x10) {
5319 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5320 return -1;
5321 }
5322
5323 /* Wait until PRS register shows 1 packet */
5324 count = 1000 * factor;
5325 while (count) {
5326 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5327 if (val == 1)
5328 break;
5329
5330 msleep(10);
5331 count--;
5332 }
5333 if (val != 0x1) {
5334 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5335 return -2;
5336 }
5337
5338 /* Reset and init BRB, PRS */
34f80b04 5339 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5340 msleep(50);
34f80b04 5341 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5342 msleep(50);
5343 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5344 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5345
5346 DP(NETIF_MSG_HW, "part2\n");
5347
5348 /* Disable inputs of parser neighbor blocks */
5349 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5350 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5351 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5352 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5353
5354 /* Write 0 to parser credits for CFC search request */
5355 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5356
5357 /* send 10 Ethernet packets */
5358 for (i = 0; i < 10; i++)
5359 bnx2x_lb_pckt(bp);
5360
5361 /* Wait until NIG register shows 10 + 1
5362 packets of size 11*0x10 = 0xb0 */
5363 count = 1000 * factor;
5364 while (count) {
34f80b04 5365
a2fbb9ea
ET
5366 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5367 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5368 if (val == 0xb0)
5369 break;
5370
5371 msleep(10);
5372 count--;
5373 }
5374 if (val != 0xb0) {
5375 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5376 return -3;
5377 }
5378
5379 /* Wait until PRS register shows 2 packets */
5380 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5381 if (val != 2)
5382 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5383
5384 /* Write 1 to parser credits for CFC search request */
5385 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5386
5387 /* Wait until PRS register shows 3 packets */
5388 msleep(10 * factor);
5389 /* Wait until NIG register shows 1 packet of size 0x10 */
5390 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5391 if (val != 3)
5392 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5393
5394 /* clear NIG EOP FIFO */
5395 for (i = 0; i < 11; i++)
5396 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5397 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5398 if (val != 1) {
5399 BNX2X_ERR("clear of NIG failed\n");
5400 return -4;
5401 }
5402
5403 /* Reset and init BRB, PRS, NIG */
5404 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5405 msleep(50);
5406 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5407 msleep(50);
5408 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5409 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5410#ifndef BCM_ISCSI
5411 /* set NIC mode */
5412 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5413#endif
5414
5415 /* Enable inputs of parser neighbor blocks */
5416 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5417 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5418 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5419 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5420
5421 DP(NETIF_MSG_HW, "done\n");
5422
5423 return 0; /* OK */
5424}
5425
5426static void enable_blocks_attention(struct bnx2x *bp)
5427{
5428 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5429 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5430 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5431 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5432 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5433 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5434 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5435 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5436 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5437/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5438/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5439 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5440 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5441 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5442/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5443/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5444 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5445 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5446 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5447 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5448/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5449/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5450 if (CHIP_REV_IS_FPGA(bp))
5451 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5452 else
5453 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5454 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5455 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5456 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5457/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5458/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5459 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5460 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5461/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5462 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5463}
5464
34f80b04 5465
81f75bbf
EG
5466static void bnx2x_reset_common(struct bnx2x *bp)
5467{
5468 /* reset_common */
5469 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5470 0xd3ffff7f);
5471 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5472}
5473
34f80b04 5474static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5475{
a2fbb9ea 5476 u32 val, i;
a2fbb9ea 5477
34f80b04 5478 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5479
81f75bbf 5480 bnx2x_reset_common(bp);
34f80b04
EG
5481 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5482 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5483
34f80b04
EG
5484 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5485 if (CHIP_IS_E1H(bp))
5486 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5487
34f80b04
EG
5488 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5489 msleep(30);
5490 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5491
34f80b04
EG
5492 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5493 if (CHIP_IS_E1(bp)) {
5494 /* enable HW interrupt from PXP on USDM overflow
5495 bit 16 on INT_MASK_0 */
5496 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5497 }
a2fbb9ea 5498
34f80b04
EG
5499 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5500 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5501
5502#ifdef __BIG_ENDIAN
34f80b04
EG
5503 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5504 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5505 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5506 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5507 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5508 /* make sure this value is 0 */
5509 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5510
5511/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5512 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5513 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5514 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5515 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5516#endif
5517
34f80b04 5518 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5519#ifdef BCM_ISCSI
34f80b04
EG
5520 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5521 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5522 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5523#endif
5524
34f80b04
EG
5525 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5526 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5527
34f80b04
EG
5528 /* let the HW do it's magic ... */
5529 msleep(100);
5530 /* finish PXP init */
5531 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5532 if (val != 1) {
5533 BNX2X_ERR("PXP2 CFG failed\n");
5534 return -EBUSY;
5535 }
5536 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5537 if (val != 1) {
5538 BNX2X_ERR("PXP2 RD_INIT failed\n");
5539 return -EBUSY;
5540 }
a2fbb9ea 5541
34f80b04
EG
5542 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5543 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5544
34f80b04 5545 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5546
34f80b04
EG
5547 /* clean the DMAE memory */
5548 bp->dmae_ready = 1;
5549 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5550
34f80b04
EG
5551 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5552 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5553 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5554 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5555
34f80b04
EG
5556 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5557 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5558 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5559 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5560
5561 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5562 /* soft reset pulse */
5563 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5564 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5565
5566#ifdef BCM_ISCSI
34f80b04 5567 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5568#endif
a2fbb9ea 5569
34f80b04
EG
5570 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5571 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5572 if (!CHIP_REV_IS_SLOW(bp)) {
5573 /* enable hw interrupt from doorbell Q */
5574 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5575 }
a2fbb9ea 5576
34f80b04 5577 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5578 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5579 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5580 /* set NIC mode */
5581 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5582 if (CHIP_IS_E1H(bp))
5583 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5584
34f80b04
EG
5585 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5586 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5587 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5588 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5589
34f80b04
EG
5590 if (CHIP_IS_E1H(bp)) {
5591 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5592 STORM_INTMEM_SIZE_E1H/2);
5593 bnx2x_init_fill(bp,
5594 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5595 0, STORM_INTMEM_SIZE_E1H/2);
5596 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5597 STORM_INTMEM_SIZE_E1H/2);
5598 bnx2x_init_fill(bp,
5599 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5600 0, STORM_INTMEM_SIZE_E1H/2);
5601 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5602 STORM_INTMEM_SIZE_E1H/2);
5603 bnx2x_init_fill(bp,
5604 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5605 0, STORM_INTMEM_SIZE_E1H/2);
5606 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5607 STORM_INTMEM_SIZE_E1H/2);
5608 bnx2x_init_fill(bp,
5609 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5610 0, STORM_INTMEM_SIZE_E1H/2);
5611 } else { /* E1 */
ad8d3948
EG
5612 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5613 STORM_INTMEM_SIZE_E1);
5614 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5615 STORM_INTMEM_SIZE_E1);
5616 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5617 STORM_INTMEM_SIZE_E1);
5618 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5619 STORM_INTMEM_SIZE_E1);
34f80b04 5620 }
a2fbb9ea 5621
34f80b04
EG
5622 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5623 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5624 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5625 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5626
34f80b04
EG
5627 /* sync semi rtc */
5628 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5629 0x80000000);
5630 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5631 0x80000000);
a2fbb9ea 5632
34f80b04
EG
5633 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5634 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5635 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5636
34f80b04
EG
5637 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5638 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5639 REG_WR(bp, i, 0xc0cac01a);
5640 /* TODO: replace with something meaningful */
5641 }
8d9c5f34 5642 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5643 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5644
34f80b04
EG
5645 if (sizeof(union cdu_context) != 1024)
5646 /* we currently assume that a context is 1024 bytes */
5647 printk(KERN_ALERT PFX "please adjust the size of"
5648 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5649
34f80b04
EG
5650 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5651 val = (4 << 24) + (0 << 12) + 1024;
5652 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5653 if (CHIP_IS_E1(bp)) {
5654 /* !!! fix pxp client crdit until excel update */
5655 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5656 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5657 }
a2fbb9ea 5658
34f80b04
EG
5659 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5660 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5661 /* enable context validation interrupt from CFC */
5662 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5663
5664 /* set the thresholds to prevent CFC/CDU race */
5665 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5666
34f80b04
EG
5667 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5668 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5669
34f80b04
EG
5670 /* PXPCS COMMON comes here */
5671 /* Reset PCIE errors for debug */
5672 REG_WR(bp, 0x2814, 0xffffffff);
5673 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5674
34f80b04
EG
5675 /* EMAC0 COMMON comes here */
5676 /* EMAC1 COMMON comes here */
5677 /* DBU COMMON comes here */
5678 /* DBG COMMON comes here */
5679
5680 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5681 if (CHIP_IS_E1H(bp)) {
5682 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5683 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5684 }
5685
5686 if (CHIP_REV_IS_SLOW(bp))
5687 msleep(200);
5688
5689 /* finish CFC init */
5690 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5691 if (val != 1) {
5692 BNX2X_ERR("CFC LL_INIT failed\n");
5693 return -EBUSY;
5694 }
5695 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5696 if (val != 1) {
5697 BNX2X_ERR("CFC AC_INIT failed\n");
5698 return -EBUSY;
5699 }
5700 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5701 if (val != 1) {
5702 BNX2X_ERR("CFC CAM_INIT failed\n");
5703 return -EBUSY;
5704 }
5705 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5706
34f80b04
EG
5707 /* read NIG statistic
5708 to see if this is our first up since powerup */
5709 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5710 val = *bnx2x_sp(bp, wb_data[0]);
5711
5712 /* do internal memory self test */
5713 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5714 BNX2X_ERR("internal mem self test failed\n");
5715 return -EBUSY;
5716 }
5717
35b19ba5 5718 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5722 bp->port.need_hw_lock = 1;
5723 break;
5724
35b19ba5 5725 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5726 /* Fan failure is indicated by SPIO 5 */
5727 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5728 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5729
5730 /* set to active low mode */
5731 val = REG_RD(bp, MISC_REG_SPIO_INT);
5732 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5733 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5734 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5735
34f80b04
EG
5736 /* enable interrupt to signal the IGU */
5737 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5738 val |= (1 << MISC_REGISTERS_SPIO_5);
5739 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5740 break;
f1410647 5741
34f80b04
EG
5742 default:
5743 break;
5744 }
f1410647 5745
34f80b04
EG
5746 /* clear PXP2 attentions */
5747 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5748
34f80b04 5749 enable_blocks_attention(bp);
a2fbb9ea 5750
6bbca910
YR
5751 if (!BP_NOMCP(bp)) {
5752 bnx2x_acquire_phy_lock(bp);
5753 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5754 bnx2x_release_phy_lock(bp);
5755 } else
5756 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5757
34f80b04
EG
5758 return 0;
5759}
a2fbb9ea 5760
34f80b04
EG
5761static int bnx2x_init_port(struct bnx2x *bp)
5762{
5763 int port = BP_PORT(bp);
1c06328c 5764 u32 low, high;
34f80b04 5765 u32 val;
a2fbb9ea 5766
34f80b04
EG
5767 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5768
5769 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5770
5771 /* Port PXP comes here */
5772 /* Port PXP2 comes here */
a2fbb9ea
ET
5773#ifdef BCM_ISCSI
5774 /* Port0 1
5775 * Port1 385 */
5776 i++;
5777 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5778 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5779 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5780 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5781
5782 /* Port0 2
5783 * Port1 386 */
5784 i++;
5785 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5786 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5787 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5788 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5789
5790 /* Port0 3
5791 * Port1 387 */
5792 i++;
5793 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5794 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5795 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5796 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5797#endif
34f80b04 5798 /* Port CMs come here */
8d9c5f34
EG
5799 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5800 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5801
5802 /* Port QM comes here */
a2fbb9ea
ET
5803#ifdef BCM_ISCSI
5804 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5805 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5806
5807 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5808 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5809#endif
5810 /* Port DQ comes here */
1c06328c
EG
5811
5812 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5813 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5814 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5815 /* no pause for emulation and FPGA */
5816 low = 0;
5817 high = 513;
5818 } else {
5819 if (IS_E1HMF(bp))
5820 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5821 else if (bp->dev->mtu > 4096) {
5822 if (bp->flags & ONE_PORT_FLAG)
5823 low = 160;
5824 else {
5825 val = bp->dev->mtu;
5826 /* (24*1024 + val*4)/256 */
5827 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5828 }
5829 } else
5830 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5831 high = low + 56; /* 14*1024/256 */
5832 }
5833 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5834 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5835
5836
ad8d3948 5837 /* Port PRS comes here */
a2fbb9ea
ET
5838 /* Port TSDM comes here */
5839 /* Port CSDM comes here */
5840 /* Port USDM comes here */
5841 /* Port XSDM comes here */
34f80b04
EG
5842 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5843 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5844 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5845 port ? USEM_PORT1_END : USEM_PORT0_END);
5846 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5847 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5848 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5849 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5850 /* Port UPB comes here */
34f80b04
EG
5851 /* Port XPB comes here */
5852
5853 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5854 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5855
5856 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5857 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5858
5859 /* update threshold */
34f80b04 5860 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5861 /* update init credit */
34f80b04 5862 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5863
5864 /* probe changes */
34f80b04 5865 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5866 msleep(5);
34f80b04 5867 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5868
5869#ifdef BCM_ISCSI
5870 /* tell the searcher where the T2 table is */
5871 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5872
5873 wb_write[0] = U64_LO(bp->t2_mapping);
5874 wb_write[1] = U64_HI(bp->t2_mapping);
5875 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5876 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5877 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5878 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5879
5880 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5881 /* Port SRCH comes here */
5882#endif
5883 /* Port CDU comes here */
5884 /* Port CFC comes here */
34f80b04
EG
5885
5886 if (CHIP_IS_E1(bp)) {
5887 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5888 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5889 }
5890 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5891 port ? HC_PORT1_END : HC_PORT0_END);
5892
5893 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5894 MISC_AEU_PORT0_START,
34f80b04
EG
5895 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5896 /* init aeu_mask_attn_func_0/1:
5897 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5898 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5899 * bits 4-7 are used for "per vn group attention" */
5900 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5901 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5902
a2fbb9ea
ET
5903 /* Port PXPCS comes here */
5904 /* Port EMAC0 comes here */
5905 /* Port EMAC1 comes here */
5906 /* Port DBU comes here */
5907 /* Port DBG comes here */
34f80b04
EG
5908 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5909 port ? NIG_PORT1_END : NIG_PORT0_END);
5910
5911 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5912
5913 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5914 /* 0x2 disable e1hov, 0x1 enable */
5915 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5916 (IS_E1HMF(bp) ? 0x1 : 0x2));
5917
1c06328c
EG
5918 /* support pause requests from USDM, TSDM and BRB */
5919 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5920
5921 {
5922 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5923 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5924 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5925 }
34f80b04
EG
5926 }
5927
a2fbb9ea
ET
5928 /* Port MCP comes here */
5929 /* Port DMAE comes here */
5930
35b19ba5 5931 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5932 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5933 {
5934 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5935
5936 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5937 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5938
5939 /* The GPIO should be swapped if the swap register is
5940 set and active */
5941 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5942 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5943
5944 /* Select function upon port-swap configuration */
5945 if (port == 0) {
5946 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5947 aeu_gpio_mask = (swap_val && swap_override) ?
5948 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5949 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5950 } else {
5951 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5952 aeu_gpio_mask = (swap_val && swap_override) ?
5953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5954 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5955 }
5956 val = REG_RD(bp, offset);
5957 /* add GPIO3 to group */
5958 val |= aeu_gpio_mask;
5959 REG_WR(bp, offset, val);
5960 }
5961 break;
5962
35b19ba5 5963 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5964 /* add SPIO 5 to group 0 */
5965 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5966 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5967 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5968 break;
5969
5970 default:
5971 break;
5972 }
5973
c18487ee 5974 bnx2x__link_reset(bp);
a2fbb9ea 5975
34f80b04
EG
5976 return 0;
5977}
5978
5979#define ILT_PER_FUNC (768/2)
5980#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5981/* the phys address is shifted right 12 bits and has an added
5982 1=valid bit added to the 53rd bit
5983 then since this is a wide register(TM)
5984 we split it into two 32 bit writes
5985 */
5986#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5987#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5988#define PXP_ONE_ILT(x) (((x) << 10) | x)
5989#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5990
5991#define CNIC_ILT_LINES 0
5992
5993static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5994{
5995 int reg;
5996
5997 if (CHIP_IS_E1H(bp))
5998 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5999 else /* E1 */
6000 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6001
6002 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6003}
6004
6005static int bnx2x_init_func(struct bnx2x *bp)
6006{
6007 int port = BP_PORT(bp);
6008 int func = BP_FUNC(bp);
8badd27a 6009 u32 addr, val;
34f80b04
EG
6010 int i;
6011
6012 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6013
8badd27a
EG
6014 /* set MSI reconfigure capability */
6015 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6016 val = REG_RD(bp, addr);
6017 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6018 REG_WR(bp, addr, val);
6019
34f80b04
EG
6020 i = FUNC_ILT_BASE(func);
6021
6022 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6023 if (CHIP_IS_E1H(bp)) {
6024 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6025 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6026 } else /* E1 */
6027 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6028 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6029
6030
6031 if (CHIP_IS_E1H(bp)) {
6032 for (i = 0; i < 9; i++)
6033 bnx2x_init_block(bp,
6034 cm_start[func][i], cm_end[func][i]);
6035
6036 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6037 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6038 }
6039
6040 /* HC init per function */
6041 if (CHIP_IS_E1H(bp)) {
6042 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6043
6044 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6045 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6046 }
6047 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6048
c14423fe 6049 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6050 REG_WR(bp, 0x2114, 0xffffffff);
6051 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6052
34f80b04
EG
6053 return 0;
6054}
6055
6056static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6057{
6058 int i, rc = 0;
a2fbb9ea 6059
34f80b04
EG
6060 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6061 BP_FUNC(bp), load_code);
a2fbb9ea 6062
34f80b04
EG
6063 bp->dmae_ready = 0;
6064 mutex_init(&bp->dmae_mutex);
6065 bnx2x_gunzip_init(bp);
a2fbb9ea 6066
34f80b04
EG
6067 switch (load_code) {
6068 case FW_MSG_CODE_DRV_LOAD_COMMON:
6069 rc = bnx2x_init_common(bp);
6070 if (rc)
6071 goto init_hw_err;
6072 /* no break */
6073
6074 case FW_MSG_CODE_DRV_LOAD_PORT:
6075 bp->dmae_ready = 1;
6076 rc = bnx2x_init_port(bp);
6077 if (rc)
6078 goto init_hw_err;
6079 /* no break */
6080
6081 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6082 bp->dmae_ready = 1;
6083 rc = bnx2x_init_func(bp);
6084 if (rc)
6085 goto init_hw_err;
6086 break;
6087
6088 default:
6089 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6090 break;
6091 }
6092
6093 if (!BP_NOMCP(bp)) {
6094 int func = BP_FUNC(bp);
a2fbb9ea
ET
6095
6096 bp->fw_drv_pulse_wr_seq =
34f80b04 6097 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6098 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6099 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6100 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6101 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6102 } else
6103 bp->func_stx = 0;
a2fbb9ea 6104
34f80b04
EG
6105 /* this needs to be done before gunzip end */
6106 bnx2x_zero_def_sb(bp);
6107 for_each_queue(bp, i)
6108 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6109
6110init_hw_err:
6111 bnx2x_gunzip_end(bp);
6112
6113 return rc;
a2fbb9ea
ET
6114}
6115
c14423fe 6116/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6117static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6118{
34f80b04 6119 int func = BP_FUNC(bp);
f1410647
ET
6120 u32 seq = ++bp->fw_seq;
6121 u32 rc = 0;
19680c48
EG
6122 u32 cnt = 1;
6123 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6124
34f80b04 6125 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6126 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6127
19680c48
EG
6128 do {
6129 /* let the FW do it's magic ... */
6130 msleep(delay);
a2fbb9ea 6131
19680c48 6132 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6133
19680c48
EG
6134 /* Give the FW up to 2 second (200*10ms) */
6135 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6136
6137 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6138 cnt*delay, rc, seq);
a2fbb9ea
ET
6139
6140 /* is this a reply to our command? */
6141 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6142 rc &= FW_MSG_CODE_MASK;
f1410647 6143
a2fbb9ea
ET
6144 } else {
6145 /* FW BUG! */
6146 BNX2X_ERR("FW failed to respond!\n");
6147 bnx2x_fw_dump(bp);
6148 rc = 0;
6149 }
f1410647 6150
a2fbb9ea
ET
6151 return rc;
6152}
6153
6154static void bnx2x_free_mem(struct bnx2x *bp)
6155{
6156
6157#define BNX2X_PCI_FREE(x, y, size) \
6158 do { \
6159 if (x) { \
6160 pci_free_consistent(bp->pdev, size, x, y); \
6161 x = NULL; \
6162 y = 0; \
6163 } \
6164 } while (0)
6165
6166#define BNX2X_FREE(x) \
6167 do { \
6168 if (x) { \
6169 vfree(x); \
6170 x = NULL; \
6171 } \
6172 } while (0)
6173
6174 int i;
6175
6176 /* fastpath */
555f6c78 6177 /* Common */
a2fbb9ea
ET
6178 for_each_queue(bp, i) {
6179
555f6c78 6180 /* status blocks */
a2fbb9ea
ET
6181 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6182 bnx2x_fp(bp, i, status_blk_mapping),
6183 sizeof(struct host_status_block) +
6184 sizeof(struct eth_tx_db_data));
555f6c78
EG
6185 }
6186 /* Rx */
6187 for_each_rx_queue(bp, i) {
a2fbb9ea 6188
555f6c78 6189 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6190 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6191 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6192 bnx2x_fp(bp, i, rx_desc_mapping),
6193 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6194
6195 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6196 bnx2x_fp(bp, i, rx_comp_mapping),
6197 sizeof(struct eth_fast_path_rx_cqe) *
6198 NUM_RCQ_BD);
a2fbb9ea 6199
7a9b2557 6200 /* SGE ring */
32626230 6201 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6202 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6203 bnx2x_fp(bp, i, rx_sge_mapping),
6204 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6205 }
555f6c78
EG
6206 /* Tx */
6207 for_each_tx_queue(bp, i) {
6208
6209 /* fastpath tx rings: tx_buf tx_desc */
6210 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6211 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6212 bnx2x_fp(bp, i, tx_desc_mapping),
6213 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6214 }
a2fbb9ea
ET
6215 /* end of fastpath */
6216
6217 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6218 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6219
6220 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6221 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6222
6223#ifdef BCM_ISCSI
6224 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6225 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6226 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6227 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6228#endif
7a9b2557 6229 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6230
6231#undef BNX2X_PCI_FREE
6232#undef BNX2X_KFREE
6233}
6234
6235static int bnx2x_alloc_mem(struct bnx2x *bp)
6236{
6237
6238#define BNX2X_PCI_ALLOC(x, y, size) \
6239 do { \
6240 x = pci_alloc_consistent(bp->pdev, size, y); \
6241 if (x == NULL) \
6242 goto alloc_mem_err; \
6243 memset(x, 0, size); \
6244 } while (0)
6245
6246#define BNX2X_ALLOC(x, size) \
6247 do { \
6248 x = vmalloc(size); \
6249 if (x == NULL) \
6250 goto alloc_mem_err; \
6251 memset(x, 0, size); \
6252 } while (0)
6253
6254 int i;
6255
6256 /* fastpath */
555f6c78 6257 /* Common */
a2fbb9ea
ET
6258 for_each_queue(bp, i) {
6259 bnx2x_fp(bp, i, bp) = bp;
6260
555f6c78 6261 /* status blocks */
a2fbb9ea
ET
6262 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6263 &bnx2x_fp(bp, i, status_blk_mapping),
6264 sizeof(struct host_status_block) +
6265 sizeof(struct eth_tx_db_data));
555f6c78
EG
6266 }
6267 /* Rx */
6268 for_each_rx_queue(bp, i) {
a2fbb9ea 6269
555f6c78 6270 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6271 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6272 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6273 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6274 &bnx2x_fp(bp, i, rx_desc_mapping),
6275 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6276
6277 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6278 &bnx2x_fp(bp, i, rx_comp_mapping),
6279 sizeof(struct eth_fast_path_rx_cqe) *
6280 NUM_RCQ_BD);
6281
7a9b2557
VZ
6282 /* SGE ring */
6283 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6284 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6285 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6286 &bnx2x_fp(bp, i, rx_sge_mapping),
6287 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6288 }
555f6c78
EG
6289 /* Tx */
6290 for_each_tx_queue(bp, i) {
6291
6292 bnx2x_fp(bp, i, hw_tx_prods) =
6293 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6294
6295 bnx2x_fp(bp, i, tx_prods_mapping) =
6296 bnx2x_fp(bp, i, status_blk_mapping) +
6297 sizeof(struct host_status_block);
6298
6299 /* fastpath tx rings: tx_buf tx_desc */
6300 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6301 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6302 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6303 &bnx2x_fp(bp, i, tx_desc_mapping),
6304 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6305 }
a2fbb9ea
ET
6306 /* end of fastpath */
6307
6308 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6309 sizeof(struct host_def_status_block));
6310
6311 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6312 sizeof(struct bnx2x_slowpath));
6313
6314#ifdef BCM_ISCSI
6315 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6316
6317 /* Initialize T1 */
6318 for (i = 0; i < 64*1024; i += 64) {
6319 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6320 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6321 }
6322
6323 /* allocate searcher T2 table
6324 we allocate 1/4 of alloc num for T2
6325 (which is not entered into the ILT) */
6326 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6327
6328 /* Initialize T2 */
6329 for (i = 0; i < 16*1024; i += 64)
6330 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6331
c14423fe 6332 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6333 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6334
6335 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6336 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6337
6338 /* QM queues (128*MAX_CONN) */
6339 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6340#endif
6341
6342 /* Slow path ring */
6343 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6344
6345 return 0;
6346
6347alloc_mem_err:
6348 bnx2x_free_mem(bp);
6349 return -ENOMEM;
6350
6351#undef BNX2X_PCI_ALLOC
6352#undef BNX2X_ALLOC
6353}
6354
6355static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6356{
6357 int i;
6358
555f6c78 6359 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6360 struct bnx2x_fastpath *fp = &bp->fp[i];
6361
6362 u16 bd_cons = fp->tx_bd_cons;
6363 u16 sw_prod = fp->tx_pkt_prod;
6364 u16 sw_cons = fp->tx_pkt_cons;
6365
a2fbb9ea
ET
6366 while (sw_cons != sw_prod) {
6367 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6368 sw_cons++;
6369 }
6370 }
6371}
6372
6373static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6374{
6375 int i, j;
6376
555f6c78 6377 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6378 struct bnx2x_fastpath *fp = &bp->fp[j];
6379
a2fbb9ea
ET
6380 for (i = 0; i < NUM_RX_BD; i++) {
6381 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6382 struct sk_buff *skb = rx_buf->skb;
6383
6384 if (skb == NULL)
6385 continue;
6386
6387 pci_unmap_single(bp->pdev,
6388 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6389 bp->rx_buf_size,
a2fbb9ea
ET
6390 PCI_DMA_FROMDEVICE);
6391
6392 rx_buf->skb = NULL;
6393 dev_kfree_skb(skb);
6394 }
7a9b2557 6395 if (!fp->disable_tpa)
32626230
EG
6396 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6397 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6398 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6399 }
6400}
6401
6402static void bnx2x_free_skbs(struct bnx2x *bp)
6403{
6404 bnx2x_free_tx_skbs(bp);
6405 bnx2x_free_rx_skbs(bp);
6406}
6407
6408static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6409{
34f80b04 6410 int i, offset = 1;
a2fbb9ea
ET
6411
6412 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6413 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6414 bp->msix_table[0].vector);
6415
6416 for_each_queue(bp, i) {
c14423fe 6417 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6418 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6419 bnx2x_fp(bp, i, state));
6420
34f80b04 6421 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6422 }
a2fbb9ea
ET
6423}
6424
6425static void bnx2x_free_irq(struct bnx2x *bp)
6426{
a2fbb9ea 6427 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6428 bnx2x_free_msix_irqs(bp);
6429 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6430 bp->flags &= ~USING_MSIX_FLAG;
6431
8badd27a
EG
6432 } else if (bp->flags & USING_MSI_FLAG) {
6433 free_irq(bp->pdev->irq, bp->dev);
6434 pci_disable_msi(bp->pdev);
6435 bp->flags &= ~USING_MSI_FLAG;
6436
a2fbb9ea
ET
6437 } else
6438 free_irq(bp->pdev->irq, bp->dev);
6439}
6440
6441static int bnx2x_enable_msix(struct bnx2x *bp)
6442{
8badd27a
EG
6443 int i, rc, offset = 1;
6444 int igu_vec = 0;
a2fbb9ea 6445
8badd27a
EG
6446 bp->msix_table[0].entry = igu_vec;
6447 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6448
34f80b04 6449 for_each_queue(bp, i) {
8badd27a 6450 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6451 bp->msix_table[i + offset].entry = igu_vec;
6452 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6453 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6454 }
6455
34f80b04 6456 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6457 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6458 if (rc) {
8badd27a
EG
6459 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6460 return rc;
34f80b04 6461 }
8badd27a 6462
a2fbb9ea
ET
6463 bp->flags |= USING_MSIX_FLAG;
6464
6465 return 0;
a2fbb9ea
ET
6466}
6467
a2fbb9ea
ET
6468static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6469{
34f80b04 6470 int i, rc, offset = 1;
a2fbb9ea 6471
a2fbb9ea
ET
6472 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6473 bp->dev->name, bp->dev);
a2fbb9ea
ET
6474 if (rc) {
6475 BNX2X_ERR("request sp irq failed\n");
6476 return -EBUSY;
6477 }
6478
6479 for_each_queue(bp, i) {
555f6c78
EG
6480 struct bnx2x_fastpath *fp = &bp->fp[i];
6481
6482 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6483 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6484 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6485 if (rc) {
555f6c78 6486 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6487 bnx2x_free_msix_irqs(bp);
6488 return -EBUSY;
6489 }
6490
555f6c78 6491 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6492 }
6493
555f6c78
EG
6494 i = BNX2X_NUM_QUEUES(bp);
6495 if (is_multi(bp))
6496 printk(KERN_INFO PFX
6497 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6498 bp->dev->name, bp->msix_table[0].vector,
6499 bp->msix_table[offset].vector,
6500 bp->msix_table[offset + i - 1].vector);
6501 else
6502 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6503 bp->dev->name, bp->msix_table[0].vector,
6504 bp->msix_table[offset + i - 1].vector);
6505
a2fbb9ea 6506 return 0;
a2fbb9ea
ET
6507}
6508
8badd27a
EG
6509static int bnx2x_enable_msi(struct bnx2x *bp)
6510{
6511 int rc;
6512
6513 rc = pci_enable_msi(bp->pdev);
6514 if (rc) {
6515 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6516 return -1;
6517 }
6518 bp->flags |= USING_MSI_FLAG;
6519
6520 return 0;
6521}
6522
a2fbb9ea
ET
6523static int bnx2x_req_irq(struct bnx2x *bp)
6524{
8badd27a 6525 unsigned long flags;
34f80b04 6526 int rc;
a2fbb9ea 6527
8badd27a
EG
6528 if (bp->flags & USING_MSI_FLAG)
6529 flags = 0;
6530 else
6531 flags = IRQF_SHARED;
6532
6533 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6534 bp->dev->name, bp->dev);
a2fbb9ea
ET
6535 if (!rc)
6536 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6537
6538 return rc;
a2fbb9ea
ET
6539}
6540
65abd74d
YG
6541static void bnx2x_napi_enable(struct bnx2x *bp)
6542{
6543 int i;
6544
555f6c78 6545 for_each_rx_queue(bp, i)
65abd74d
YG
6546 napi_enable(&bnx2x_fp(bp, i, napi));
6547}
6548
6549static void bnx2x_napi_disable(struct bnx2x *bp)
6550{
6551 int i;
6552
555f6c78 6553 for_each_rx_queue(bp, i)
65abd74d
YG
6554 napi_disable(&bnx2x_fp(bp, i, napi));
6555}
6556
6557static void bnx2x_netif_start(struct bnx2x *bp)
6558{
6559 if (atomic_dec_and_test(&bp->intr_sem)) {
6560 if (netif_running(bp->dev)) {
65abd74d
YG
6561 bnx2x_napi_enable(bp);
6562 bnx2x_int_enable(bp);
555f6c78
EG
6563 if (bp->state == BNX2X_STATE_OPEN)
6564 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6565 }
6566 }
6567}
6568
f8ef6e44 6569static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6570{
f8ef6e44 6571 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6572 bnx2x_napi_disable(bp);
65abd74d 6573 if (netif_running(bp->dev)) {
65abd74d
YG
6574 netif_tx_disable(bp->dev);
6575 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6576 }
6577}
6578
a2fbb9ea
ET
6579/*
6580 * Init service functions
6581 */
6582
3101c2bc 6583static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6584{
6585 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6586 int port = BP_PORT(bp);
a2fbb9ea
ET
6587
6588 /* CAM allocation
6589 * unicasts 0-31:port0 32-63:port1
6590 * multicast 64-127:port0 128-191:port1
6591 */
8d9c5f34 6592 config->hdr.length = 2;
af246401 6593 config->hdr.offset = port ? 32 : 0;
0626b899 6594 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6595 config->hdr.reserved1 = 0;
6596
6597 /* primary MAC */
6598 config->config_table[0].cam_entry.msb_mac_addr =
6599 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6600 config->config_table[0].cam_entry.middle_mac_addr =
6601 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6602 config->config_table[0].cam_entry.lsb_mac_addr =
6603 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6604 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6605 if (set)
6606 config->config_table[0].target_table_entry.flags = 0;
6607 else
6608 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6609 config->config_table[0].target_table_entry.client_id = 0;
6610 config->config_table[0].target_table_entry.vlan_id = 0;
6611
3101c2bc
YG
6612 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6613 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6614 config->config_table[0].cam_entry.msb_mac_addr,
6615 config->config_table[0].cam_entry.middle_mac_addr,
6616 config->config_table[0].cam_entry.lsb_mac_addr);
6617
6618 /* broadcast */
4781bfad
EG
6619 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6620 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6621 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6622 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6623 if (set)
6624 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6625 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6626 else
6627 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6628 config->config_table[1].target_table_entry.client_id = 0;
6629 config->config_table[1].target_table_entry.vlan_id = 0;
6630
6631 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6632 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6633 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6634}
6635
3101c2bc 6636static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6637{
6638 struct mac_configuration_cmd_e1h *config =
6639 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6640
3101c2bc 6641 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6642 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6643 return;
6644 }
6645
6646 /* CAM allocation for E1H
6647 * unicasts: by func number
6648 * multicast: 20+FUNC*20, 20 each
6649 */
8d9c5f34 6650 config->hdr.length = 1;
34f80b04 6651 config->hdr.offset = BP_FUNC(bp);
0626b899 6652 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6653 config->hdr.reserved1 = 0;
6654
6655 /* primary MAC */
6656 config->config_table[0].msb_mac_addr =
6657 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6658 config->config_table[0].middle_mac_addr =
6659 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6660 config->config_table[0].lsb_mac_addr =
6661 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6662 config->config_table[0].client_id = BP_L_ID(bp);
6663 config->config_table[0].vlan_id = 0;
6664 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6665 if (set)
6666 config->config_table[0].flags = BP_PORT(bp);
6667 else
6668 config->config_table[0].flags =
6669 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6670
3101c2bc
YG
6671 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6672 (set ? "setting" : "clearing"),
34f80b04
EG
6673 config->config_table[0].msb_mac_addr,
6674 config->config_table[0].middle_mac_addr,
6675 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6676
6677 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6678 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6679 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6680}
6681
a2fbb9ea
ET
6682static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6683 int *state_p, int poll)
6684{
6685 /* can take a while if any port is running */
8b3a0f0b 6686 int cnt = 5000;
a2fbb9ea 6687
c14423fe
ET
6688 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6689 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6690
6691 might_sleep();
34f80b04 6692 while (cnt--) {
a2fbb9ea
ET
6693 if (poll) {
6694 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6695 /* if index is different from 0
6696 * the reply for some commands will
3101c2bc 6697 * be on the non default queue
a2fbb9ea
ET
6698 */
6699 if (idx)
6700 bnx2x_rx_int(&bp->fp[idx], 10);
6701 }
a2fbb9ea 6702
3101c2bc 6703 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6704 if (*state_p == state) {
6705#ifdef BNX2X_STOP_ON_ERROR
6706 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6707#endif
a2fbb9ea 6708 return 0;
8b3a0f0b 6709 }
a2fbb9ea 6710
a2fbb9ea 6711 msleep(1);
a2fbb9ea
ET
6712 }
6713
a2fbb9ea 6714 /* timeout! */
49d66772
ET
6715 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6716 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6717#ifdef BNX2X_STOP_ON_ERROR
6718 bnx2x_panic();
6719#endif
a2fbb9ea 6720
49d66772 6721 return -EBUSY;
a2fbb9ea
ET
6722}
6723
6724static int bnx2x_setup_leading(struct bnx2x *bp)
6725{
34f80b04 6726 int rc;
a2fbb9ea 6727
c14423fe 6728 /* reset IGU state */
34f80b04 6729 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6730
6731 /* SETUP ramrod */
6732 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6733
34f80b04
EG
6734 /* Wait for completion */
6735 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6736
34f80b04 6737 return rc;
a2fbb9ea
ET
6738}
6739
6740static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6741{
555f6c78
EG
6742 struct bnx2x_fastpath *fp = &bp->fp[index];
6743
a2fbb9ea 6744 /* reset IGU state */
555f6c78 6745 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6746
228241eb 6747 /* SETUP ramrod */
555f6c78
EG
6748 fp->state = BNX2X_FP_STATE_OPENING;
6749 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6750 fp->cl_id, 0);
a2fbb9ea
ET
6751
6752 /* Wait for completion */
6753 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6754 &(fp->state), 0);
a2fbb9ea
ET
6755}
6756
a2fbb9ea 6757static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6758
8badd27a 6759static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6760{
555f6c78 6761 int num_queues;
a2fbb9ea 6762
8badd27a
EG
6763 switch (int_mode) {
6764 case INT_MODE_INTx:
6765 case INT_MODE_MSI:
555f6c78
EG
6766 num_queues = 1;
6767 bp->num_rx_queues = num_queues;
6768 bp->num_tx_queues = num_queues;
6769 DP(NETIF_MSG_IFUP,
6770 "set number of queues to %d\n", num_queues);
8badd27a
EG
6771 break;
6772
6773 case INT_MODE_MSIX:
6774 default:
555f6c78
EG
6775 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6776 num_queues = min_t(u32, num_online_cpus(),
6777 BNX2X_MAX_QUEUES(bp));
34f80b04 6778 else
555f6c78
EG
6779 num_queues = 1;
6780 bp->num_rx_queues = num_queues;
6781 bp->num_tx_queues = num_queues;
6782 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6783 " number of tx queues to %d\n",
6784 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6785 /* if we can't use MSI-X we only need one fp,
6786 * so try to enable MSI-X with the requested number of fp's
6787 * and fallback to MSI or legacy INTx with one fp
6788 */
8badd27a 6789 if (bnx2x_enable_msix(bp)) {
34f80b04 6790 /* failed to enable MSI-X */
555f6c78
EG
6791 num_queues = 1;
6792 bp->num_rx_queues = num_queues;
6793 bp->num_tx_queues = num_queues;
6794 if (bp->multi_mode)
6795 BNX2X_ERR("Multi requested but failed to "
6796 "enable MSI-X set number of "
6797 "queues to %d\n", num_queues);
a2fbb9ea 6798 }
8badd27a 6799 break;
a2fbb9ea 6800 }
555f6c78 6801 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6802}
6803
6804static void bnx2x_set_rx_mode(struct net_device *dev);
6805
6806/* must be called with rtnl_lock */
6807static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6808{
6809 u32 load_code;
6810 int i, rc = 0;
6811#ifdef BNX2X_STOP_ON_ERROR
6812 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6813 if (unlikely(bp->panic))
6814 return -EPERM;
6815#endif
6816
6817 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6818
6819 bnx2x_set_int_mode(bp);
c14423fe 6820
a2fbb9ea
ET
6821 if (bnx2x_alloc_mem(bp))
6822 return -ENOMEM;
6823
555f6c78 6824 for_each_rx_queue(bp, i)
7a9b2557
VZ
6825 bnx2x_fp(bp, i, disable_tpa) =
6826 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6827
555f6c78 6828 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6829 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6830 bnx2x_poll, 128);
6831
6832#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6833 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6834 struct bnx2x_fastpath *fp = &bp->fp[i];
6835
6836 fp->poll_no_work = 0;
6837 fp->poll_calls = 0;
6838 fp->poll_max_calls = 0;
6839 fp->poll_complete = 0;
6840 fp->poll_exit = 0;
6841 }
6842#endif
6843 bnx2x_napi_enable(bp);
6844
34f80b04
EG
6845 if (bp->flags & USING_MSIX_FLAG) {
6846 rc = bnx2x_req_msix_irqs(bp);
6847 if (rc) {
6848 pci_disable_msix(bp->pdev);
2dfe0e1f 6849 goto load_error1;
34f80b04
EG
6850 }
6851 } else {
8badd27a
EG
6852 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6853 bnx2x_enable_msi(bp);
34f80b04
EG
6854 bnx2x_ack_int(bp);
6855 rc = bnx2x_req_irq(bp);
6856 if (rc) {
2dfe0e1f 6857 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6858 if (bp->flags & USING_MSI_FLAG)
6859 pci_disable_msi(bp->pdev);
2dfe0e1f 6860 goto load_error1;
a2fbb9ea 6861 }
8badd27a
EG
6862 if (bp->flags & USING_MSI_FLAG) {
6863 bp->dev->irq = bp->pdev->irq;
6864 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6865 bp->dev->name, bp->pdev->irq);
6866 }
a2fbb9ea
ET
6867 }
6868
2dfe0e1f
EG
6869 /* Send LOAD_REQUEST command to MCP
6870 Returns the type of LOAD command:
6871 if it is the first port to be initialized
6872 common blocks should be initialized, otherwise - not
6873 */
6874 if (!BP_NOMCP(bp)) {
6875 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6876 if (!load_code) {
6877 BNX2X_ERR("MCP response failure, aborting\n");
6878 rc = -EBUSY;
6879 goto load_error2;
6880 }
6881 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6882 rc = -EBUSY; /* other port in diagnostic mode */
6883 goto load_error2;
6884 }
6885
6886 } else {
6887 int port = BP_PORT(bp);
6888
6889 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6890 load_count[0], load_count[1], load_count[2]);
6891 load_count[0]++;
6892 load_count[1 + port]++;
6893 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6894 load_count[0], load_count[1], load_count[2]);
6895 if (load_count[0] == 1)
6896 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6897 else if (load_count[1 + port] == 1)
6898 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6899 else
6900 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6901 }
6902
6903 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6904 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6905 bp->port.pmf = 1;
6906 else
6907 bp->port.pmf = 0;
6908 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6909
a2fbb9ea 6910 /* Initialize HW */
34f80b04
EG
6911 rc = bnx2x_init_hw(bp, load_code);
6912 if (rc) {
a2fbb9ea 6913 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6914 goto load_error2;
a2fbb9ea
ET
6915 }
6916
a2fbb9ea 6917 /* Setup NIC internals and enable interrupts */
471de716 6918 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6919
6920 /* Send LOAD_DONE command to MCP */
34f80b04 6921 if (!BP_NOMCP(bp)) {
228241eb
ET
6922 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6923 if (!load_code) {
da5a662a 6924 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6925 rc = -EBUSY;
2dfe0e1f 6926 goto load_error3;
a2fbb9ea
ET
6927 }
6928 }
6929
6930 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6931
34f80b04
EG
6932 rc = bnx2x_setup_leading(bp);
6933 if (rc) {
da5a662a 6934 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6935 goto load_error3;
34f80b04 6936 }
a2fbb9ea 6937
34f80b04
EG
6938 if (CHIP_IS_E1H(bp))
6939 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6940 BNX2X_ERR("!!! mf_cfg function disabled\n");
6941 bp->state = BNX2X_STATE_DISABLED;
6942 }
a2fbb9ea 6943
34f80b04
EG
6944 if (bp->state == BNX2X_STATE_OPEN)
6945 for_each_nondefault_queue(bp, i) {
6946 rc = bnx2x_setup_multi(bp, i);
6947 if (rc)
2dfe0e1f 6948 goto load_error3;
34f80b04 6949 }
a2fbb9ea 6950
34f80b04 6951 if (CHIP_IS_E1(bp))
3101c2bc 6952 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6953 else
3101c2bc 6954 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6955
6956 if (bp->port.pmf)
b5bf9068 6957 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6958
6959 /* Start fast path */
34f80b04
EG
6960 switch (load_mode) {
6961 case LOAD_NORMAL:
6962 /* Tx queue should be only reenabled */
555f6c78 6963 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6964 /* Initialize the receive filter. */
34f80b04
EG
6965 bnx2x_set_rx_mode(bp->dev);
6966 break;
6967
6968 case LOAD_OPEN:
555f6c78 6969 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6970 /* Initialize the receive filter. */
34f80b04 6971 bnx2x_set_rx_mode(bp->dev);
34f80b04 6972 break;
a2fbb9ea 6973
34f80b04 6974 case LOAD_DIAG:
2dfe0e1f 6975 /* Initialize the receive filter. */
a2fbb9ea 6976 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6977 bp->state = BNX2X_STATE_DIAG;
6978 break;
6979
6980 default:
6981 break;
a2fbb9ea
ET
6982 }
6983
34f80b04
EG
6984 if (!bp->port.pmf)
6985 bnx2x__link_status_update(bp);
6986
a2fbb9ea
ET
6987 /* start the timer */
6988 mod_timer(&bp->timer, jiffies + bp->current_interval);
6989
34f80b04 6990
a2fbb9ea
ET
6991 return 0;
6992
2dfe0e1f
EG
6993load_error3:
6994 bnx2x_int_disable_sync(bp, 1);
6995 if (!BP_NOMCP(bp)) {
6996 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6997 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6998 }
6999 bp->port.pmf = 0;
7a9b2557
VZ
7000 /* Free SKBs, SGEs, TPA pool and driver internals */
7001 bnx2x_free_skbs(bp);
555f6c78 7002 for_each_rx_queue(bp, i)
3196a88a 7003 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7004load_error2:
d1014634
YG
7005 /* Release IRQs */
7006 bnx2x_free_irq(bp);
2dfe0e1f
EG
7007load_error1:
7008 bnx2x_napi_disable(bp);
555f6c78 7009 for_each_rx_queue(bp, i)
7cde1c8b 7010 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7011 bnx2x_free_mem(bp);
7012
7013 /* TBD we really need to reset the chip
7014 if we want to recover from this */
34f80b04 7015 return rc;
a2fbb9ea
ET
7016}
7017
7018static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7019{
555f6c78 7020 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7021 int rc;
7022
c14423fe 7023 /* halt the connection */
555f6c78
EG
7024 fp->state = BNX2X_FP_STATE_HALTING;
7025 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7026
34f80b04 7027 /* Wait for completion */
a2fbb9ea 7028 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7029 &(fp->state), 1);
c14423fe 7030 if (rc) /* timeout */
a2fbb9ea
ET
7031 return rc;
7032
7033 /* delete cfc entry */
7034 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7035
34f80b04
EG
7036 /* Wait for completion */
7037 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7038 &(fp->state), 1);
34f80b04 7039 return rc;
a2fbb9ea
ET
7040}
7041
da5a662a 7042static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7043{
4781bfad 7044 __le16 dsb_sp_prod_idx;
c14423fe 7045 /* if the other port is handling traffic,
a2fbb9ea 7046 this can take a lot of time */
34f80b04
EG
7047 int cnt = 500;
7048 int rc;
a2fbb9ea
ET
7049
7050 might_sleep();
7051
7052 /* Send HALT ramrod */
7053 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7054 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7055
34f80b04
EG
7056 /* Wait for completion */
7057 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7058 &(bp->fp[0].state), 1);
7059 if (rc) /* timeout */
da5a662a 7060 return rc;
a2fbb9ea 7061
49d66772 7062 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7063
228241eb 7064 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7065 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7066
49d66772 7067 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7068 we are going to reset the chip anyway
7069 so there is not much to do if this times out
7070 */
34f80b04 7071 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7072 if (!cnt) {
7073 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7074 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7075 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7076#ifdef BNX2X_STOP_ON_ERROR
7077 bnx2x_panic();
7078#endif
36e552ab 7079 rc = -EBUSY;
34f80b04
EG
7080 break;
7081 }
7082 cnt--;
da5a662a 7083 msleep(1);
5650d9d4 7084 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7085 }
7086 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7087 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7088
7089 return rc;
a2fbb9ea
ET
7090}
7091
34f80b04
EG
7092static void bnx2x_reset_func(struct bnx2x *bp)
7093{
7094 int port = BP_PORT(bp);
7095 int func = BP_FUNC(bp);
7096 int base, i;
7097
7098 /* Configure IGU */
7099 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7100 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7101
34f80b04
EG
7102 /* Clear ILT */
7103 base = FUNC_ILT_BASE(func);
7104 for (i = base; i < base + ILT_PER_FUNC; i++)
7105 bnx2x_ilt_wr(bp, i, 0);
7106}
7107
7108static void bnx2x_reset_port(struct bnx2x *bp)
7109{
7110 int port = BP_PORT(bp);
7111 u32 val;
7112
7113 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7114
7115 /* Do not rcv packets to BRB */
7116 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7117 /* Do not direct rcv packets that are not for MCP to the BRB */
7118 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7119 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7120
7121 /* Configure AEU */
7122 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7123
7124 msleep(100);
7125 /* Check for BRB port occupancy */
7126 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7127 if (val)
7128 DP(NETIF_MSG_IFDOWN,
33471629 7129 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7130
7131 /* TODO: Close Doorbell port? */
7132}
7133
34f80b04
EG
7134static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7135{
7136 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7137 BP_FUNC(bp), reset_code);
7138
7139 switch (reset_code) {
7140 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7141 bnx2x_reset_port(bp);
7142 bnx2x_reset_func(bp);
7143 bnx2x_reset_common(bp);
7144 break;
7145
7146 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7147 bnx2x_reset_port(bp);
7148 bnx2x_reset_func(bp);
7149 break;
7150
7151 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7152 bnx2x_reset_func(bp);
7153 break;
49d66772 7154
34f80b04
EG
7155 default:
7156 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7157 break;
7158 }
7159}
7160
33471629 7161/* must be called with rtnl_lock */
34f80b04 7162static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7163{
da5a662a 7164 int port = BP_PORT(bp);
a2fbb9ea 7165 u32 reset_code = 0;
da5a662a 7166 int i, cnt, rc;
a2fbb9ea
ET
7167
7168 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7169
228241eb
ET
7170 bp->rx_mode = BNX2X_RX_MODE_NONE;
7171 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7172
f8ef6e44 7173 bnx2x_netif_stop(bp, 1);
e94d8af3 7174
34f80b04
EG
7175 del_timer_sync(&bp->timer);
7176 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7177 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7178 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7179
70b9986c
EG
7180 /* Release IRQs */
7181 bnx2x_free_irq(bp);
7182
555f6c78
EG
7183 /* Wait until tx fastpath tasks complete */
7184 for_each_tx_queue(bp, i) {
228241eb
ET
7185 struct bnx2x_fastpath *fp = &bp->fp[i];
7186
34f80b04 7187 cnt = 1000;
3e5b510e 7188 smp_mb();
e8b5fc51 7189 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7190
65abd74d 7191 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7192 if (!cnt) {
7193 BNX2X_ERR("timeout waiting for queue[%d]\n",
7194 i);
7195#ifdef BNX2X_STOP_ON_ERROR
7196 bnx2x_panic();
7197 return -EBUSY;
7198#else
7199 break;
7200#endif
7201 }
7202 cnt--;
da5a662a 7203 msleep(1);
3e5b510e 7204 smp_mb();
34f80b04 7205 }
228241eb 7206 }
da5a662a
VZ
7207 /* Give HW time to discard old tx messages */
7208 msleep(1);
a2fbb9ea 7209
3101c2bc
YG
7210 if (CHIP_IS_E1(bp)) {
7211 struct mac_configuration_cmd *config =
7212 bnx2x_sp(bp, mcast_config);
7213
7214 bnx2x_set_mac_addr_e1(bp, 0);
7215
8d9c5f34 7216 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7217 CAM_INVALIDATE(config->config_table[i]);
7218
8d9c5f34 7219 config->hdr.length = i;
3101c2bc
YG
7220 if (CHIP_REV_IS_SLOW(bp))
7221 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7222 else
7223 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7224 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7225 config->hdr.reserved1 = 0;
7226
7227 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7228 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7229 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7230
7231 } else { /* E1H */
65abd74d
YG
7232 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7233
3101c2bc
YG
7234 bnx2x_set_mac_addr_e1h(bp, 0);
7235
7236 for (i = 0; i < MC_HASH_SIZE; i++)
7237 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7238 }
7239
65abd74d
YG
7240 if (unload_mode == UNLOAD_NORMAL)
7241 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7242
7243 else if (bp->flags & NO_WOL_FLAG) {
7244 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7245 if (CHIP_IS_E1H(bp))
7246 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7247
7248 } else if (bp->wol) {
7249 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7250 u8 *mac_addr = bp->dev->dev_addr;
7251 u32 val;
7252 /* The mac address is written to entries 1-4 to
7253 preserve entry 0 which is used by the PMF */
7254 u8 entry = (BP_E1HVN(bp) + 1)*8;
7255
7256 val = (mac_addr[0] << 8) | mac_addr[1];
7257 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7258
7259 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7260 (mac_addr[4] << 8) | mac_addr[5];
7261 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7262
7263 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7264
7265 } else
7266 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7267
34f80b04
EG
7268 /* Close multi and leading connections
7269 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7270 for_each_nondefault_queue(bp, i)
7271 if (bnx2x_stop_multi(bp, i))
228241eb 7272 goto unload_error;
a2fbb9ea 7273
da5a662a
VZ
7274 rc = bnx2x_stop_leading(bp);
7275 if (rc) {
34f80b04 7276 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7277#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7278 return -EBUSY;
da5a662a
VZ
7279#else
7280 goto unload_error;
34f80b04 7281#endif
228241eb
ET
7282 }
7283
7284unload_error:
34f80b04 7285 if (!BP_NOMCP(bp))
228241eb 7286 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7287 else {
7288 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7289 load_count[0], load_count[1], load_count[2]);
7290 load_count[0]--;
da5a662a 7291 load_count[1 + port]--;
34f80b04
EG
7292 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7293 load_count[0], load_count[1], load_count[2]);
7294 if (load_count[0] == 0)
7295 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7296 else if (load_count[1 + port] == 0)
34f80b04
EG
7297 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7298 else
7299 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7300 }
a2fbb9ea 7301
34f80b04
EG
7302 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7303 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7304 bnx2x__link_reset(bp);
a2fbb9ea
ET
7305
7306 /* Reset the chip */
228241eb 7307 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7308
7309 /* Report UNLOAD_DONE to MCP */
34f80b04 7310 if (!BP_NOMCP(bp))
a2fbb9ea 7311 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7312 bp->port.pmf = 0;
a2fbb9ea 7313
7a9b2557 7314 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7315 bnx2x_free_skbs(bp);
555f6c78 7316 for_each_rx_queue(bp, i)
3196a88a 7317 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7318 for_each_rx_queue(bp, i)
7cde1c8b 7319 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7320 bnx2x_free_mem(bp);
7321
7322 bp->state = BNX2X_STATE_CLOSED;
228241eb 7323
a2fbb9ea
ET
7324 netif_carrier_off(bp->dev);
7325
7326 return 0;
7327}
7328
34f80b04
EG
7329static void bnx2x_reset_task(struct work_struct *work)
7330{
7331 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7332
7333#ifdef BNX2X_STOP_ON_ERROR
7334 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7335 " so reset not done to allow debug dump,\n"
7336 KERN_ERR " you will need to reboot when done\n");
7337 return;
7338#endif
7339
7340 rtnl_lock();
7341
7342 if (!netif_running(bp->dev))
7343 goto reset_task_exit;
7344
7345 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7346 bnx2x_nic_load(bp, LOAD_NORMAL);
7347
7348reset_task_exit:
7349 rtnl_unlock();
7350}
7351
a2fbb9ea
ET
7352/* end of nic load/unload */
7353
7354/* ethtool_ops */
7355
7356/*
7357 * Init service functions
7358 */
7359
f1ef27ef
EG
7360static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7361{
7362 switch (func) {
7363 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7364 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7365 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7366 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7367 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7368 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7369 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7370 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7371 default:
7372 BNX2X_ERR("Unsupported function index: %d\n", func);
7373 return (u32)(-1);
7374 }
7375}
7376
7377static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7378{
7379 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7380
7381 /* Flush all outstanding writes */
7382 mmiowb();
7383
7384 /* Pretend to be function 0 */
7385 REG_WR(bp, reg, 0);
7386 /* Flush the GRC transaction (in the chip) */
7387 new_val = REG_RD(bp, reg);
7388 if (new_val != 0) {
7389 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7390 new_val);
7391 BUG();
7392 }
7393
7394 /* From now we are in the "like-E1" mode */
7395 bnx2x_int_disable(bp);
7396
7397 /* Flush all outstanding writes */
7398 mmiowb();
7399
7400 /* Restore the original funtion settings */
7401 REG_WR(bp, reg, orig_func);
7402 new_val = REG_RD(bp, reg);
7403 if (new_val != orig_func) {
7404 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7405 orig_func, new_val);
7406 BUG();
7407 }
7408}
7409
7410static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7411{
7412 if (CHIP_IS_E1H(bp))
7413 bnx2x_undi_int_disable_e1h(bp, func);
7414 else
7415 bnx2x_int_disable(bp);
7416}
7417
34f80b04
EG
7418static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7419{
7420 u32 val;
7421
7422 /* Check if there is any driver already loaded */
7423 val = REG_RD(bp, MISC_REG_UNPREPARED);
7424 if (val == 0x1) {
7425 /* Check if it is the UNDI driver
7426 * UNDI driver initializes CID offset for normal bell to 0x7
7427 */
4a37fb66 7428 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7429 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7430 if (val == 0x7) {
7431 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7432 /* save our func */
34f80b04 7433 int func = BP_FUNC(bp);
da5a662a
VZ
7434 u32 swap_en;
7435 u32 swap_val;
34f80b04 7436
b4661739
EG
7437 /* clear the UNDI indication */
7438 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7439
34f80b04
EG
7440 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7441
7442 /* try unload UNDI on port 0 */
7443 bp->func = 0;
da5a662a
VZ
7444 bp->fw_seq =
7445 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7446 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7447 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7448
7449 /* if UNDI is loaded on the other port */
7450 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7451
da5a662a
VZ
7452 /* send "DONE" for previous unload */
7453 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7454
7455 /* unload UNDI on port 1 */
34f80b04 7456 bp->func = 1;
da5a662a
VZ
7457 bp->fw_seq =
7458 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7459 DRV_MSG_SEQ_NUMBER_MASK);
7460 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7461
7462 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7463 }
7464
b4661739
EG
7465 /* now it's safe to release the lock */
7466 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7467
f1ef27ef 7468 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7469
7470 /* close input traffic and wait for it */
7471 /* Do not rcv packets to BRB */
7472 REG_WR(bp,
7473 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7474 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7475 /* Do not direct rcv packets that are not for MCP to
7476 * the BRB */
7477 REG_WR(bp,
7478 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7479 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7480 /* clear AEU */
7481 REG_WR(bp,
7482 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7483 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7484 msleep(10);
7485
7486 /* save NIG port swap info */
7487 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7488 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7489 /* reset device */
7490 REG_WR(bp,
7491 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7492 0xd3ffffff);
34f80b04
EG
7493 REG_WR(bp,
7494 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7495 0x1403);
da5a662a
VZ
7496 /* take the NIG out of reset and restore swap values */
7497 REG_WR(bp,
7498 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7499 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7500 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7501 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7502
7503 /* send unload done to the MCP */
7504 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7505
7506 /* restore our func and fw_seq */
7507 bp->func = func;
7508 bp->fw_seq =
7509 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7510 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7511
7512 } else
7513 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7514 }
7515}
7516
7517static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7518{
7519 u32 val, val2, val3, val4, id;
72ce58c3 7520 u16 pmc;
34f80b04
EG
7521
7522 /* Get the chip revision id and number. */
7523 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7524 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7525 id = ((val & 0xffff) << 16);
7526 val = REG_RD(bp, MISC_REG_CHIP_REV);
7527 id |= ((val & 0xf) << 12);
7528 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7529 id |= ((val & 0xff) << 4);
5a40e08e 7530 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7531 id |= (val & 0xf);
7532 bp->common.chip_id = id;
7533 bp->link_params.chip_id = bp->common.chip_id;
7534 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7535
1c06328c
EG
7536 val = (REG_RD(bp, 0x2874) & 0x55);
7537 if ((bp->common.chip_id & 0x1) ||
7538 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7539 bp->flags |= ONE_PORT_FLAG;
7540 BNX2X_DEV_INFO("single port device\n");
7541 }
7542
34f80b04
EG
7543 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7544 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7545 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7546 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7547 bp->common.flash_size, bp->common.flash_size);
7548
7549 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7550 bp->link_params.shmem_base = bp->common.shmem_base;
7551 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7552
7553 if (!bp->common.shmem_base ||
7554 (bp->common.shmem_base < 0xA0000) ||
7555 (bp->common.shmem_base >= 0xC0000)) {
7556 BNX2X_DEV_INFO("MCP not active\n");
7557 bp->flags |= NO_MCP_FLAG;
7558 return;
7559 }
7560
7561 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7562 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7563 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7564 BNX2X_ERR("BAD MCP validity signature\n");
7565
7566 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7567 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7568
7569 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7570 SHARED_HW_CFG_LED_MODE_MASK) >>
7571 SHARED_HW_CFG_LED_MODE_SHIFT);
7572
c2c8b03e
EG
7573 bp->link_params.feature_config_flags = 0;
7574 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7575 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7576 bp->link_params.feature_config_flags |=
7577 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7578 else
7579 bp->link_params.feature_config_flags &=
7580 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7581
34f80b04
EG
7582 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7583 bp->common.bc_ver = val;
7584 BNX2X_DEV_INFO("bc_ver %X\n", val);
7585 if (val < BNX2X_BC_VER) {
7586 /* for now only warn
7587 * later we might need to enforce this */
7588 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7589 " please upgrade BC\n", BNX2X_BC_VER, val);
7590 }
72ce58c3
EG
7591
7592 if (BP_E1HVN(bp) == 0) {
7593 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7594 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7595 } else {
7596 /* no WOL capability for E1HVN != 0 */
7597 bp->flags |= NO_WOL_FLAG;
7598 }
7599 BNX2X_DEV_INFO("%sWoL capable\n",
7600 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7601
7602 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7603 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7604 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7605 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7606
7607 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7608 val, val2, val3, val4);
7609}
7610
7611static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7612 u32 switch_cfg)
a2fbb9ea 7613{
34f80b04 7614 int port = BP_PORT(bp);
a2fbb9ea
ET
7615 u32 ext_phy_type;
7616
a2fbb9ea
ET
7617 switch (switch_cfg) {
7618 case SWITCH_CFG_1G:
7619 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7620
c18487ee
YR
7621 ext_phy_type =
7622 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7623 switch (ext_phy_type) {
7624 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7625 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7626 ext_phy_type);
7627
34f80b04
EG
7628 bp->port.supported |= (SUPPORTED_10baseT_Half |
7629 SUPPORTED_10baseT_Full |
7630 SUPPORTED_100baseT_Half |
7631 SUPPORTED_100baseT_Full |
7632 SUPPORTED_1000baseT_Full |
7633 SUPPORTED_2500baseX_Full |
7634 SUPPORTED_TP |
7635 SUPPORTED_FIBRE |
7636 SUPPORTED_Autoneg |
7637 SUPPORTED_Pause |
7638 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7639 break;
7640
7641 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7642 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7643 ext_phy_type);
7644
34f80b04
EG
7645 bp->port.supported |= (SUPPORTED_10baseT_Half |
7646 SUPPORTED_10baseT_Full |
7647 SUPPORTED_100baseT_Half |
7648 SUPPORTED_100baseT_Full |
7649 SUPPORTED_1000baseT_Full |
7650 SUPPORTED_TP |
7651 SUPPORTED_FIBRE |
7652 SUPPORTED_Autoneg |
7653 SUPPORTED_Pause |
7654 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7655 break;
7656
7657 default:
7658 BNX2X_ERR("NVRAM config error. "
7659 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7660 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7661 return;
7662 }
7663
34f80b04
EG
7664 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7665 port*0x10);
7666 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7667 break;
7668
7669 case SWITCH_CFG_10G:
7670 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7671
c18487ee
YR
7672 ext_phy_type =
7673 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7674 switch (ext_phy_type) {
7675 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7676 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7677 ext_phy_type);
7678
34f80b04
EG
7679 bp->port.supported |= (SUPPORTED_10baseT_Half |
7680 SUPPORTED_10baseT_Full |
7681 SUPPORTED_100baseT_Half |
7682 SUPPORTED_100baseT_Full |
7683 SUPPORTED_1000baseT_Full |
7684 SUPPORTED_2500baseX_Full |
7685 SUPPORTED_10000baseT_Full |
7686 SUPPORTED_TP |
7687 SUPPORTED_FIBRE |
7688 SUPPORTED_Autoneg |
7689 SUPPORTED_Pause |
7690 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7691 break;
7692
589abe3a
EG
7693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7694 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7695 ext_phy_type);
f1410647 7696
34f80b04 7697 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7698 SUPPORTED_1000baseT_Full |
34f80b04 7699 SUPPORTED_FIBRE |
589abe3a 7700 SUPPORTED_Autoneg |
34f80b04
EG
7701 SUPPORTED_Pause |
7702 SUPPORTED_Asym_Pause);
f1410647
ET
7703 break;
7704
589abe3a
EG
7705 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7706 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7707 ext_phy_type);
7708
34f80b04 7709 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7710 SUPPORTED_2500baseX_Full |
34f80b04 7711 SUPPORTED_1000baseT_Full |
589abe3a
EG
7712 SUPPORTED_FIBRE |
7713 SUPPORTED_Autoneg |
7714 SUPPORTED_Pause |
7715 SUPPORTED_Asym_Pause);
7716 break;
7717
7718 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7719 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7720 ext_phy_type);
7721
7722 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7723 SUPPORTED_FIBRE |
7724 SUPPORTED_Pause |
7725 SUPPORTED_Asym_Pause);
f1410647
ET
7726 break;
7727
589abe3a
EG
7728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7729 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7730 ext_phy_type);
7731
34f80b04
EG
7732 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7733 SUPPORTED_1000baseT_Full |
7734 SUPPORTED_FIBRE |
34f80b04
EG
7735 SUPPORTED_Pause |
7736 SUPPORTED_Asym_Pause);
f1410647
ET
7737 break;
7738
589abe3a
EG
7739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7740 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7741 ext_phy_type);
7742
34f80b04 7743 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7744 SUPPORTED_1000baseT_Full |
34f80b04 7745 SUPPORTED_Autoneg |
589abe3a 7746 SUPPORTED_FIBRE |
34f80b04
EG
7747 SUPPORTED_Pause |
7748 SUPPORTED_Asym_Pause);
c18487ee
YR
7749 break;
7750
f1410647
ET
7751 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7752 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7753 ext_phy_type);
7754
34f80b04
EG
7755 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7756 SUPPORTED_TP |
7757 SUPPORTED_Autoneg |
7758 SUPPORTED_Pause |
7759 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7760 break;
7761
28577185
EG
7762 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7763 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7764 ext_phy_type);
7765
7766 bp->port.supported |= (SUPPORTED_10baseT_Half |
7767 SUPPORTED_10baseT_Full |
7768 SUPPORTED_100baseT_Half |
7769 SUPPORTED_100baseT_Full |
7770 SUPPORTED_1000baseT_Full |
7771 SUPPORTED_10000baseT_Full |
7772 SUPPORTED_TP |
7773 SUPPORTED_Autoneg |
7774 SUPPORTED_Pause |
7775 SUPPORTED_Asym_Pause);
7776 break;
7777
c18487ee
YR
7778 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7779 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7780 bp->link_params.ext_phy_config);
7781 break;
7782
a2fbb9ea
ET
7783 default:
7784 BNX2X_ERR("NVRAM config error. "
7785 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7786 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7787 return;
7788 }
7789
34f80b04
EG
7790 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7791 port*0x18);
7792 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7793
a2fbb9ea
ET
7794 break;
7795
7796 default:
7797 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7798 bp->port.link_config);
a2fbb9ea
ET
7799 return;
7800 }
34f80b04 7801 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7802
7803 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7804 if (!(bp->link_params.speed_cap_mask &
7805 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7806 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7807
c18487ee
YR
7808 if (!(bp->link_params.speed_cap_mask &
7809 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7810 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7811
c18487ee
YR
7812 if (!(bp->link_params.speed_cap_mask &
7813 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7814 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7815
c18487ee
YR
7816 if (!(bp->link_params.speed_cap_mask &
7817 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7818 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7819
c18487ee
YR
7820 if (!(bp->link_params.speed_cap_mask &
7821 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7822 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7823 SUPPORTED_1000baseT_Full);
a2fbb9ea 7824
c18487ee
YR
7825 if (!(bp->link_params.speed_cap_mask &
7826 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7827 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7828
c18487ee
YR
7829 if (!(bp->link_params.speed_cap_mask &
7830 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7831 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7832
34f80b04 7833 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7834}
7835
34f80b04 7836static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7837{
c18487ee 7838 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7839
34f80b04 7840 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7841 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7842 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7843 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7844 bp->port.advertising = bp->port.supported;
a2fbb9ea 7845 } else {
c18487ee
YR
7846 u32 ext_phy_type =
7847 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7848
7849 if ((ext_phy_type ==
7850 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7851 (ext_phy_type ==
7852 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7853 /* force 10G, no AN */
c18487ee 7854 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7855 bp->port.advertising =
a2fbb9ea
ET
7856 (ADVERTISED_10000baseT_Full |
7857 ADVERTISED_FIBRE);
7858 break;
7859 }
7860 BNX2X_ERR("NVRAM config error. "
7861 "Invalid link_config 0x%x"
7862 " Autoneg not supported\n",
34f80b04 7863 bp->port.link_config);
a2fbb9ea
ET
7864 return;
7865 }
7866 break;
7867
7868 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7869 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7870 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7871 bp->port.advertising = (ADVERTISED_10baseT_Full |
7872 ADVERTISED_TP);
a2fbb9ea
ET
7873 } else {
7874 BNX2X_ERR("NVRAM config error. "
7875 "Invalid link_config 0x%x"
7876 " speed_cap_mask 0x%x\n",
34f80b04 7877 bp->port.link_config,
c18487ee 7878 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7879 return;
7880 }
7881 break;
7882
7883 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7884 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7885 bp->link_params.req_line_speed = SPEED_10;
7886 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7887 bp->port.advertising = (ADVERTISED_10baseT_Half |
7888 ADVERTISED_TP);
a2fbb9ea
ET
7889 } else {
7890 BNX2X_ERR("NVRAM config error. "
7891 "Invalid link_config 0x%x"
7892 " speed_cap_mask 0x%x\n",
34f80b04 7893 bp->port.link_config,
c18487ee 7894 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7895 return;
7896 }
7897 break;
7898
7899 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7900 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7901 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7902 bp->port.advertising = (ADVERTISED_100baseT_Full |
7903 ADVERTISED_TP);
a2fbb9ea
ET
7904 } else {
7905 BNX2X_ERR("NVRAM config error. "
7906 "Invalid link_config 0x%x"
7907 " speed_cap_mask 0x%x\n",
34f80b04 7908 bp->port.link_config,
c18487ee 7909 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7910 return;
7911 }
7912 break;
7913
7914 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7915 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7916 bp->link_params.req_line_speed = SPEED_100;
7917 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7918 bp->port.advertising = (ADVERTISED_100baseT_Half |
7919 ADVERTISED_TP);
a2fbb9ea
ET
7920 } else {
7921 BNX2X_ERR("NVRAM config error. "
7922 "Invalid link_config 0x%x"
7923 " speed_cap_mask 0x%x\n",
34f80b04 7924 bp->port.link_config,
c18487ee 7925 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7926 return;
7927 }
7928 break;
7929
7930 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7931 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7932 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7933 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7934 ADVERTISED_TP);
a2fbb9ea
ET
7935 } else {
7936 BNX2X_ERR("NVRAM config error. "
7937 "Invalid link_config 0x%x"
7938 " speed_cap_mask 0x%x\n",
34f80b04 7939 bp->port.link_config,
c18487ee 7940 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7941 return;
7942 }
7943 break;
7944
7945 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7946 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7947 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7948 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7949 ADVERTISED_TP);
a2fbb9ea
ET
7950 } else {
7951 BNX2X_ERR("NVRAM config error. "
7952 "Invalid link_config 0x%x"
7953 " speed_cap_mask 0x%x\n",
34f80b04 7954 bp->port.link_config,
c18487ee 7955 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7956 return;
7957 }
7958 break;
7959
7960 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7961 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7962 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7963 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7964 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7965 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7966 ADVERTISED_FIBRE);
a2fbb9ea
ET
7967 } else {
7968 BNX2X_ERR("NVRAM config error. "
7969 "Invalid link_config 0x%x"
7970 " speed_cap_mask 0x%x\n",
34f80b04 7971 bp->port.link_config,
c18487ee 7972 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7973 return;
7974 }
7975 break;
7976
7977 default:
7978 BNX2X_ERR("NVRAM config error. "
7979 "BAD link speed link_config 0x%x\n",
34f80b04 7980 bp->port.link_config);
c18487ee 7981 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7982 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7983 break;
7984 }
a2fbb9ea 7985
34f80b04
EG
7986 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7987 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7988 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7989 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7990 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7991
c18487ee 7992 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7993 " advertising 0x%x\n",
c18487ee
YR
7994 bp->link_params.req_line_speed,
7995 bp->link_params.req_duplex,
34f80b04 7996 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7997}
7998
34f80b04 7999static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8000{
34f80b04
EG
8001 int port = BP_PORT(bp);
8002 u32 val, val2;
589abe3a 8003 u32 config;
c2c8b03e 8004 u16 i;
a2fbb9ea 8005
c18487ee 8006 bp->link_params.bp = bp;
34f80b04 8007 bp->link_params.port = port;
c18487ee 8008
c18487ee 8009 bp->link_params.lane_config =
a2fbb9ea 8010 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8011 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8012 SHMEM_RD(bp,
8013 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8014 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8015 SHMEM_RD(bp,
8016 dev_info.port_hw_config[port].speed_capability_mask);
8017
34f80b04 8018 bp->port.link_config =
a2fbb9ea
ET
8019 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8020
c2c8b03e
EG
8021 /* Get the 4 lanes xgxs config rx and tx */
8022 for (i = 0; i < 2; i++) {
8023 val = SHMEM_RD(bp,
8024 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8025 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8026 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8027
8028 val = SHMEM_RD(bp,
8029 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8030 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8031 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8032 }
8033
589abe3a
EG
8034 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8035 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8036 bp->link_params.feature_config_flags |=
8037 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8038 else
8039 bp->link_params.feature_config_flags &=
8040 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8041
3ce2c3f9
EG
8042 /* If the device is capable of WoL, set the default state according
8043 * to the HW
8044 */
8045 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8046 (config & PORT_FEATURE_WOL_ENABLED));
8047
c2c8b03e
EG
8048 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8049 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8050 bp->link_params.lane_config,
8051 bp->link_params.ext_phy_config,
34f80b04 8052 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8053
34f80b04 8054 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8055 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8056 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8057
8058 bnx2x_link_settings_requested(bp);
8059
8060 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8061 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8062 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8063 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8064 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8065 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8066 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8067 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8068 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8069 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8070}
8071
8072static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8073{
8074 int func = BP_FUNC(bp);
8075 u32 val, val2;
8076 int rc = 0;
a2fbb9ea 8077
34f80b04 8078 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8079
34f80b04
EG
8080 bp->e1hov = 0;
8081 bp->e1hmf = 0;
8082 if (CHIP_IS_E1H(bp)) {
8083 bp->mf_config =
8084 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8085
3196a88a
EG
8086 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8087 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8088 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8089
34f80b04
EG
8090 bp->e1hov = val;
8091 bp->e1hmf = 1;
8092 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8093 "(0x%04x)\n",
8094 func, bp->e1hov, bp->e1hov);
8095 } else {
8096 BNX2X_DEV_INFO("Single function mode\n");
8097 if (BP_E1HVN(bp)) {
8098 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8099 " aborting\n", func);
8100 rc = -EPERM;
8101 }
8102 }
8103 }
a2fbb9ea 8104
34f80b04
EG
8105 if (!BP_NOMCP(bp)) {
8106 bnx2x_get_port_hwinfo(bp);
8107
8108 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8109 DRV_MSG_SEQ_NUMBER_MASK);
8110 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8111 }
8112
8113 if (IS_E1HMF(bp)) {
8114 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8115 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8116 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8117 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8118 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8119 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8120 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8121 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8122 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8123 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8124 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8125 ETH_ALEN);
8126 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8127 ETH_ALEN);
a2fbb9ea 8128 }
34f80b04
EG
8129
8130 return rc;
a2fbb9ea
ET
8131 }
8132
34f80b04
EG
8133 if (BP_NOMCP(bp)) {
8134 /* only supposed to happen on emulation/FPGA */
33471629 8135 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8136 random_ether_addr(bp->dev->dev_addr);
8137 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8138 }
a2fbb9ea 8139
34f80b04
EG
8140 return rc;
8141}
8142
8143static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8144{
8145 int func = BP_FUNC(bp);
87942b46 8146 int timer_interval;
34f80b04
EG
8147 int rc;
8148
da5a662a
VZ
8149 /* Disable interrupt handling until HW is initialized */
8150 atomic_set(&bp->intr_sem, 1);
8151
34f80b04 8152 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8153
1cf167f2 8154 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8155 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8156
8157 rc = bnx2x_get_hwinfo(bp);
8158
8159 /* need to reset chip if undi was active */
8160 if (!BP_NOMCP(bp))
8161 bnx2x_undi_unload(bp);
8162
8163 if (CHIP_REV_IS_FPGA(bp))
8164 printk(KERN_ERR PFX "FPGA detected\n");
8165
8166 if (BP_NOMCP(bp) && (func == 0))
8167 printk(KERN_ERR PFX
8168 "MCP disabled, must load devices in order!\n");
8169
555f6c78 8170 /* Set multi queue mode */
8badd27a
EG
8171 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8172 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8173 printk(KERN_ERR PFX
8badd27a 8174 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8175 multi_mode = ETH_RSS_MODE_DISABLED;
8176 }
8177 bp->multi_mode = multi_mode;
8178
8179
7a9b2557
VZ
8180 /* Set TPA flags */
8181 if (disable_tpa) {
8182 bp->flags &= ~TPA_ENABLE_FLAG;
8183 bp->dev->features &= ~NETIF_F_LRO;
8184 } else {
8185 bp->flags |= TPA_ENABLE_FLAG;
8186 bp->dev->features |= NETIF_F_LRO;
8187 }
8188
8d5726c4 8189 bp->mrrs = mrrs;
7a9b2557 8190
34f80b04
EG
8191 bp->tx_ring_size = MAX_TX_AVAIL;
8192 bp->rx_ring_size = MAX_RX_AVAIL;
8193
8194 bp->rx_csum = 1;
34f80b04
EG
8195
8196 bp->tx_ticks = 50;
8197 bp->rx_ticks = 25;
8198
87942b46
EG
8199 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8200 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8201
8202 init_timer(&bp->timer);
8203 bp->timer.expires = jiffies + bp->current_interval;
8204 bp->timer.data = (unsigned long) bp;
8205 bp->timer.function = bnx2x_timer;
8206
8207 return rc;
a2fbb9ea
ET
8208}
8209
8210/*
8211 * ethtool service functions
8212 */
8213
8214/* All ethtool functions called with rtnl_lock */
8215
8216static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8217{
8218 struct bnx2x *bp = netdev_priv(dev);
8219
34f80b04
EG
8220 cmd->supported = bp->port.supported;
8221 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8222
8223 if (netif_carrier_ok(dev)) {
c18487ee
YR
8224 cmd->speed = bp->link_vars.line_speed;
8225 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8226 } else {
c18487ee
YR
8227 cmd->speed = bp->link_params.req_line_speed;
8228 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8229 }
34f80b04
EG
8230 if (IS_E1HMF(bp)) {
8231 u16 vn_max_rate;
8232
8233 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8234 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8235 if (vn_max_rate < cmd->speed)
8236 cmd->speed = vn_max_rate;
8237 }
a2fbb9ea 8238
c18487ee
YR
8239 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8240 u32 ext_phy_type =
8241 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8242
8243 switch (ext_phy_type) {
8244 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8245 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8246 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8247 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8248 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8249 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8250 cmd->port = PORT_FIBRE;
8251 break;
8252
8253 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8254 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8255 cmd->port = PORT_TP;
8256 break;
8257
c18487ee
YR
8258 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8259 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8260 bp->link_params.ext_phy_config);
8261 break;
8262
f1410647
ET
8263 default:
8264 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8265 bp->link_params.ext_phy_config);
8266 break;
f1410647
ET
8267 }
8268 } else
a2fbb9ea 8269 cmd->port = PORT_TP;
a2fbb9ea 8270
34f80b04 8271 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8272 cmd->transceiver = XCVR_INTERNAL;
8273
c18487ee 8274 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8275 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8276 else
a2fbb9ea 8277 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8278
8279 cmd->maxtxpkt = 0;
8280 cmd->maxrxpkt = 0;
8281
8282 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8283 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8284 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8285 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8286 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8287 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8288 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8289
8290 return 0;
8291}
8292
8293static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8294{
8295 struct bnx2x *bp = netdev_priv(dev);
8296 u32 advertising;
8297
34f80b04
EG
8298 if (IS_E1HMF(bp))
8299 return 0;
8300
a2fbb9ea
ET
8301 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8302 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8303 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8304 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8305 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8306 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8307 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8308
a2fbb9ea 8309 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8310 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8311 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8312 return -EINVAL;
f1410647 8313 }
a2fbb9ea
ET
8314
8315 /* advertise the requested speed and duplex if supported */
34f80b04 8316 cmd->advertising &= bp->port.supported;
a2fbb9ea 8317
c18487ee
YR
8318 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8319 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8320 bp->port.advertising |= (ADVERTISED_Autoneg |
8321 cmd->advertising);
a2fbb9ea
ET
8322
8323 } else { /* forced speed */
8324 /* advertise the requested speed and duplex if supported */
8325 switch (cmd->speed) {
8326 case SPEED_10:
8327 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8328 if (!(bp->port.supported &
f1410647
ET
8329 SUPPORTED_10baseT_Full)) {
8330 DP(NETIF_MSG_LINK,
8331 "10M full not supported\n");
a2fbb9ea 8332 return -EINVAL;
f1410647 8333 }
a2fbb9ea
ET
8334
8335 advertising = (ADVERTISED_10baseT_Full |
8336 ADVERTISED_TP);
8337 } else {
34f80b04 8338 if (!(bp->port.supported &
f1410647
ET
8339 SUPPORTED_10baseT_Half)) {
8340 DP(NETIF_MSG_LINK,
8341 "10M half not supported\n");
a2fbb9ea 8342 return -EINVAL;
f1410647 8343 }
a2fbb9ea
ET
8344
8345 advertising = (ADVERTISED_10baseT_Half |
8346 ADVERTISED_TP);
8347 }
8348 break;
8349
8350 case SPEED_100:
8351 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8352 if (!(bp->port.supported &
f1410647
ET
8353 SUPPORTED_100baseT_Full)) {
8354 DP(NETIF_MSG_LINK,
8355 "100M full not supported\n");
a2fbb9ea 8356 return -EINVAL;
f1410647 8357 }
a2fbb9ea
ET
8358
8359 advertising = (ADVERTISED_100baseT_Full |
8360 ADVERTISED_TP);
8361 } else {
34f80b04 8362 if (!(bp->port.supported &
f1410647
ET
8363 SUPPORTED_100baseT_Half)) {
8364 DP(NETIF_MSG_LINK,
8365 "100M half not supported\n");
a2fbb9ea 8366 return -EINVAL;
f1410647 8367 }
a2fbb9ea
ET
8368
8369 advertising = (ADVERTISED_100baseT_Half |
8370 ADVERTISED_TP);
8371 }
8372 break;
8373
8374 case SPEED_1000:
f1410647
ET
8375 if (cmd->duplex != DUPLEX_FULL) {
8376 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8377 return -EINVAL;
f1410647 8378 }
a2fbb9ea 8379
34f80b04 8380 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8381 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8382 return -EINVAL;
f1410647 8383 }
a2fbb9ea
ET
8384
8385 advertising = (ADVERTISED_1000baseT_Full |
8386 ADVERTISED_TP);
8387 break;
8388
8389 case SPEED_2500:
f1410647
ET
8390 if (cmd->duplex != DUPLEX_FULL) {
8391 DP(NETIF_MSG_LINK,
8392 "2.5G half not supported\n");
a2fbb9ea 8393 return -EINVAL;
f1410647 8394 }
a2fbb9ea 8395
34f80b04 8396 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8397 DP(NETIF_MSG_LINK,
8398 "2.5G full not supported\n");
a2fbb9ea 8399 return -EINVAL;
f1410647 8400 }
a2fbb9ea 8401
f1410647 8402 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8403 ADVERTISED_TP);
8404 break;
8405
8406 case SPEED_10000:
f1410647
ET
8407 if (cmd->duplex != DUPLEX_FULL) {
8408 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8409 return -EINVAL;
f1410647 8410 }
a2fbb9ea 8411
34f80b04 8412 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8413 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8414 return -EINVAL;
f1410647 8415 }
a2fbb9ea
ET
8416
8417 advertising = (ADVERTISED_10000baseT_Full |
8418 ADVERTISED_FIBRE);
8419 break;
8420
8421 default:
f1410647 8422 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8423 return -EINVAL;
8424 }
8425
c18487ee
YR
8426 bp->link_params.req_line_speed = cmd->speed;
8427 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8428 bp->port.advertising = advertising;
a2fbb9ea
ET
8429 }
8430
c18487ee 8431 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8432 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8433 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8434 bp->port.advertising);
a2fbb9ea 8435
34f80b04 8436 if (netif_running(dev)) {
bb2a0f7a 8437 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8438 bnx2x_link_set(bp);
8439 }
a2fbb9ea
ET
8440
8441 return 0;
8442}
8443
c18487ee
YR
8444#define PHY_FW_VER_LEN 10
8445
a2fbb9ea
ET
8446static void bnx2x_get_drvinfo(struct net_device *dev,
8447 struct ethtool_drvinfo *info)
8448{
8449 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8450 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8451
8452 strcpy(info->driver, DRV_MODULE_NAME);
8453 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8454
8455 phy_fw_ver[0] = '\0';
34f80b04 8456 if (bp->port.pmf) {
4a37fb66 8457 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8458 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8459 (bp->state != BNX2X_STATE_CLOSED),
8460 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8461 bnx2x_release_phy_lock(bp);
34f80b04 8462 }
c18487ee 8463
f0e53a84
EG
8464 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8465 (bp->common.bc_ver & 0xff0000) >> 16,
8466 (bp->common.bc_ver & 0xff00) >> 8,
8467 (bp->common.bc_ver & 0xff),
8468 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8469 strcpy(info->bus_info, pci_name(bp->pdev));
8470 info->n_stats = BNX2X_NUM_STATS;
8471 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8472 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8473 info->regdump_len = 0;
8474}
8475
8476static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8477{
8478 struct bnx2x *bp = netdev_priv(dev);
8479
8480 if (bp->flags & NO_WOL_FLAG) {
8481 wol->supported = 0;
8482 wol->wolopts = 0;
8483 } else {
8484 wol->supported = WAKE_MAGIC;
8485 if (bp->wol)
8486 wol->wolopts = WAKE_MAGIC;
8487 else
8488 wol->wolopts = 0;
8489 }
8490 memset(&wol->sopass, 0, sizeof(wol->sopass));
8491}
8492
8493static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8494{
8495 struct bnx2x *bp = netdev_priv(dev);
8496
8497 if (wol->wolopts & ~WAKE_MAGIC)
8498 return -EINVAL;
8499
8500 if (wol->wolopts & WAKE_MAGIC) {
8501 if (bp->flags & NO_WOL_FLAG)
8502 return -EINVAL;
8503
8504 bp->wol = 1;
34f80b04 8505 } else
a2fbb9ea 8506 bp->wol = 0;
34f80b04 8507
a2fbb9ea
ET
8508 return 0;
8509}
8510
8511static u32 bnx2x_get_msglevel(struct net_device *dev)
8512{
8513 struct bnx2x *bp = netdev_priv(dev);
8514
8515 return bp->msglevel;
8516}
8517
8518static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8519{
8520 struct bnx2x *bp = netdev_priv(dev);
8521
8522 if (capable(CAP_NET_ADMIN))
8523 bp->msglevel = level;
8524}
8525
8526static int bnx2x_nway_reset(struct net_device *dev)
8527{
8528 struct bnx2x *bp = netdev_priv(dev);
8529
34f80b04
EG
8530 if (!bp->port.pmf)
8531 return 0;
a2fbb9ea 8532
34f80b04 8533 if (netif_running(dev)) {
bb2a0f7a 8534 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8535 bnx2x_link_set(bp);
8536 }
a2fbb9ea
ET
8537
8538 return 0;
8539}
8540
8541static int bnx2x_get_eeprom_len(struct net_device *dev)
8542{
8543 struct bnx2x *bp = netdev_priv(dev);
8544
34f80b04 8545 return bp->common.flash_size;
a2fbb9ea
ET
8546}
8547
8548static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8549{
34f80b04 8550 int port = BP_PORT(bp);
a2fbb9ea
ET
8551 int count, i;
8552 u32 val = 0;
8553
8554 /* adjust timeout for emulation/FPGA */
8555 count = NVRAM_TIMEOUT_COUNT;
8556 if (CHIP_REV_IS_SLOW(bp))
8557 count *= 100;
8558
8559 /* request access to nvram interface */
8560 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8561 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8562
8563 for (i = 0; i < count*10; i++) {
8564 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8565 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8566 break;
8567
8568 udelay(5);
8569 }
8570
8571 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8572 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8573 return -EBUSY;
8574 }
8575
8576 return 0;
8577}
8578
8579static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8580{
34f80b04 8581 int port = BP_PORT(bp);
a2fbb9ea
ET
8582 int count, i;
8583 u32 val = 0;
8584
8585 /* adjust timeout for emulation/FPGA */
8586 count = NVRAM_TIMEOUT_COUNT;
8587 if (CHIP_REV_IS_SLOW(bp))
8588 count *= 100;
8589
8590 /* relinquish nvram interface */
8591 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8592 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8593
8594 for (i = 0; i < count*10; i++) {
8595 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8596 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8597 break;
8598
8599 udelay(5);
8600 }
8601
8602 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8603 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8604 return -EBUSY;
8605 }
8606
8607 return 0;
8608}
8609
8610static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8611{
8612 u32 val;
8613
8614 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8615
8616 /* enable both bits, even on read */
8617 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8618 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8619 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8620}
8621
8622static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8623{
8624 u32 val;
8625
8626 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8627
8628 /* disable both bits, even after read */
8629 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8630 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8631 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8632}
8633
4781bfad 8634static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8635 u32 cmd_flags)
8636{
f1410647 8637 int count, i, rc;
a2fbb9ea
ET
8638 u32 val;
8639
8640 /* build the command word */
8641 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8642
8643 /* need to clear DONE bit separately */
8644 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8645
8646 /* address of the NVRAM to read from */
8647 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8648 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8649
8650 /* issue a read command */
8651 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8652
8653 /* adjust timeout for emulation/FPGA */
8654 count = NVRAM_TIMEOUT_COUNT;
8655 if (CHIP_REV_IS_SLOW(bp))
8656 count *= 100;
8657
8658 /* wait for completion */
8659 *ret_val = 0;
8660 rc = -EBUSY;
8661 for (i = 0; i < count; i++) {
8662 udelay(5);
8663 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8664
8665 if (val & MCPR_NVM_COMMAND_DONE) {
8666 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8667 /* we read nvram data in cpu order
8668 * but ethtool sees it as an array of bytes
8669 * converting to big-endian will do the work */
4781bfad 8670 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8671 rc = 0;
8672 break;
8673 }
8674 }
8675
8676 return rc;
8677}
8678
8679static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8680 int buf_size)
8681{
8682 int rc;
8683 u32 cmd_flags;
4781bfad 8684 __be32 val;
a2fbb9ea
ET
8685
8686 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8687 DP(BNX2X_MSG_NVM,
c14423fe 8688 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8689 offset, buf_size);
8690 return -EINVAL;
8691 }
8692
34f80b04
EG
8693 if (offset + buf_size > bp->common.flash_size) {
8694 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8695 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8696 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8697 return -EINVAL;
8698 }
8699
8700 /* request access to nvram interface */
8701 rc = bnx2x_acquire_nvram_lock(bp);
8702 if (rc)
8703 return rc;
8704
8705 /* enable access to nvram interface */
8706 bnx2x_enable_nvram_access(bp);
8707
8708 /* read the first word(s) */
8709 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8710 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8711 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8712 memcpy(ret_buf, &val, 4);
8713
8714 /* advance to the next dword */
8715 offset += sizeof(u32);
8716 ret_buf += sizeof(u32);
8717 buf_size -= sizeof(u32);
8718 cmd_flags = 0;
8719 }
8720
8721 if (rc == 0) {
8722 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8723 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8724 memcpy(ret_buf, &val, 4);
8725 }
8726
8727 /* disable access to nvram interface */
8728 bnx2x_disable_nvram_access(bp);
8729 bnx2x_release_nvram_lock(bp);
8730
8731 return rc;
8732}
8733
8734static int bnx2x_get_eeprom(struct net_device *dev,
8735 struct ethtool_eeprom *eeprom, u8 *eebuf)
8736{
8737 struct bnx2x *bp = netdev_priv(dev);
8738 int rc;
8739
2add3acb
EG
8740 if (!netif_running(dev))
8741 return -EAGAIN;
8742
34f80b04 8743 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8744 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8745 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8746 eeprom->len, eeprom->len);
8747
8748 /* parameters already validated in ethtool_get_eeprom */
8749
8750 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8751
8752 return rc;
8753}
8754
8755static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8756 u32 cmd_flags)
8757{
f1410647 8758 int count, i, rc;
a2fbb9ea
ET
8759
8760 /* build the command word */
8761 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8762
8763 /* need to clear DONE bit separately */
8764 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8765
8766 /* write the data */
8767 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8768
8769 /* address of the NVRAM to write to */
8770 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8771 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8772
8773 /* issue the write command */
8774 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8775
8776 /* adjust timeout for emulation/FPGA */
8777 count = NVRAM_TIMEOUT_COUNT;
8778 if (CHIP_REV_IS_SLOW(bp))
8779 count *= 100;
8780
8781 /* wait for completion */
8782 rc = -EBUSY;
8783 for (i = 0; i < count; i++) {
8784 udelay(5);
8785 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8786 if (val & MCPR_NVM_COMMAND_DONE) {
8787 rc = 0;
8788 break;
8789 }
8790 }
8791
8792 return rc;
8793}
8794
f1410647 8795#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8796
8797static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8798 int buf_size)
8799{
8800 int rc;
8801 u32 cmd_flags;
8802 u32 align_offset;
4781bfad 8803 __be32 val;
a2fbb9ea 8804
34f80b04
EG
8805 if (offset + buf_size > bp->common.flash_size) {
8806 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8807 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8808 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8809 return -EINVAL;
8810 }
8811
8812 /* request access to nvram interface */
8813 rc = bnx2x_acquire_nvram_lock(bp);
8814 if (rc)
8815 return rc;
8816
8817 /* enable access to nvram interface */
8818 bnx2x_enable_nvram_access(bp);
8819
8820 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8821 align_offset = (offset & ~0x03);
8822 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8823
8824 if (rc == 0) {
8825 val &= ~(0xff << BYTE_OFFSET(offset));
8826 val |= (*data_buf << BYTE_OFFSET(offset));
8827
8828 /* nvram data is returned as an array of bytes
8829 * convert it back to cpu order */
8830 val = be32_to_cpu(val);
8831
a2fbb9ea
ET
8832 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8833 cmd_flags);
8834 }
8835
8836 /* disable access to nvram interface */
8837 bnx2x_disable_nvram_access(bp);
8838 bnx2x_release_nvram_lock(bp);
8839
8840 return rc;
8841}
8842
8843static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8844 int buf_size)
8845{
8846 int rc;
8847 u32 cmd_flags;
8848 u32 val;
8849 u32 written_so_far;
8850
34f80b04 8851 if (buf_size == 1) /* ethtool */
a2fbb9ea 8852 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8853
8854 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8855 DP(BNX2X_MSG_NVM,
c14423fe 8856 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8857 offset, buf_size);
8858 return -EINVAL;
8859 }
8860
34f80b04
EG
8861 if (offset + buf_size > bp->common.flash_size) {
8862 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8863 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8864 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8865 return -EINVAL;
8866 }
8867
8868 /* request access to nvram interface */
8869 rc = bnx2x_acquire_nvram_lock(bp);
8870 if (rc)
8871 return rc;
8872
8873 /* enable access to nvram interface */
8874 bnx2x_enable_nvram_access(bp);
8875
8876 written_so_far = 0;
8877 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8878 while ((written_so_far < buf_size) && (rc == 0)) {
8879 if (written_so_far == (buf_size - sizeof(u32)))
8880 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8881 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8882 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8883 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8884 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8885
8886 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8887
8888 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8889
8890 /* advance to the next dword */
8891 offset += sizeof(u32);
8892 data_buf += sizeof(u32);
8893 written_so_far += sizeof(u32);
8894 cmd_flags = 0;
8895 }
8896
8897 /* disable access to nvram interface */
8898 bnx2x_disable_nvram_access(bp);
8899 bnx2x_release_nvram_lock(bp);
8900
8901 return rc;
8902}
8903
8904static int bnx2x_set_eeprom(struct net_device *dev,
8905 struct ethtool_eeprom *eeprom, u8 *eebuf)
8906{
8907 struct bnx2x *bp = netdev_priv(dev);
8908 int rc;
8909
9f4c9583
EG
8910 if (!netif_running(dev))
8911 return -EAGAIN;
8912
34f80b04 8913 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8914 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8915 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8916 eeprom->len, eeprom->len);
8917
8918 /* parameters already validated in ethtool_set_eeprom */
8919
c18487ee 8920 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8921 if (eeprom->magic == 0x00504859)
8922 if (bp->port.pmf) {
8923
4a37fb66 8924 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8925 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8926 bp->link_params.ext_phy_config,
8927 (bp->state != BNX2X_STATE_CLOSED),
8928 eebuf, eeprom->len);
bb2a0f7a
YG
8929 if ((bp->state == BNX2X_STATE_OPEN) ||
8930 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8931 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8932 &bp->link_vars, 1);
34f80b04
EG
8933 rc |= bnx2x_phy_init(&bp->link_params,
8934 &bp->link_vars);
bb2a0f7a 8935 }
4a37fb66 8936 bnx2x_release_phy_lock(bp);
34f80b04
EG
8937
8938 } else /* Only the PMF can access the PHY */
8939 return -EINVAL;
8940 else
c18487ee 8941 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8942
8943 return rc;
8944}
8945
8946static int bnx2x_get_coalesce(struct net_device *dev,
8947 struct ethtool_coalesce *coal)
8948{
8949 struct bnx2x *bp = netdev_priv(dev);
8950
8951 memset(coal, 0, sizeof(struct ethtool_coalesce));
8952
8953 coal->rx_coalesce_usecs = bp->rx_ticks;
8954 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8955
8956 return 0;
8957}
8958
8959static int bnx2x_set_coalesce(struct net_device *dev,
8960 struct ethtool_coalesce *coal)
8961{
8962 struct bnx2x *bp = netdev_priv(dev);
8963
8964 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8965 if (bp->rx_ticks > 3000)
8966 bp->rx_ticks = 3000;
8967
8968 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8969 if (bp->tx_ticks > 0x3000)
8970 bp->tx_ticks = 0x3000;
8971
34f80b04 8972 if (netif_running(dev))
a2fbb9ea
ET
8973 bnx2x_update_coalesce(bp);
8974
8975 return 0;
8976}
8977
8978static void bnx2x_get_ringparam(struct net_device *dev,
8979 struct ethtool_ringparam *ering)
8980{
8981 struct bnx2x *bp = netdev_priv(dev);
8982
8983 ering->rx_max_pending = MAX_RX_AVAIL;
8984 ering->rx_mini_max_pending = 0;
8985 ering->rx_jumbo_max_pending = 0;
8986
8987 ering->rx_pending = bp->rx_ring_size;
8988 ering->rx_mini_pending = 0;
8989 ering->rx_jumbo_pending = 0;
8990
8991 ering->tx_max_pending = MAX_TX_AVAIL;
8992 ering->tx_pending = bp->tx_ring_size;
8993}
8994
8995static int bnx2x_set_ringparam(struct net_device *dev,
8996 struct ethtool_ringparam *ering)
8997{
8998 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8999 int rc = 0;
a2fbb9ea
ET
9000
9001 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9002 (ering->tx_pending > MAX_TX_AVAIL) ||
9003 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9004 return -EINVAL;
9005
9006 bp->rx_ring_size = ering->rx_pending;
9007 bp->tx_ring_size = ering->tx_pending;
9008
34f80b04
EG
9009 if (netif_running(dev)) {
9010 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9011 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9012 }
9013
34f80b04 9014 return rc;
a2fbb9ea
ET
9015}
9016
9017static void bnx2x_get_pauseparam(struct net_device *dev,
9018 struct ethtool_pauseparam *epause)
9019{
9020 struct bnx2x *bp = netdev_priv(dev);
9021
c0700f90 9022 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9023 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9024
c0700f90
DM
9025 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9026 BNX2X_FLOW_CTRL_RX);
9027 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9028 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9029
9030 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9031 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9032 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9033}
9034
9035static int bnx2x_set_pauseparam(struct net_device *dev,
9036 struct ethtool_pauseparam *epause)
9037{
9038 struct bnx2x *bp = netdev_priv(dev);
9039
34f80b04
EG
9040 if (IS_E1HMF(bp))
9041 return 0;
9042
a2fbb9ea
ET
9043 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9044 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9045 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9046
c0700f90 9047 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9048
f1410647 9049 if (epause->rx_pause)
c0700f90 9050 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9051
f1410647 9052 if (epause->tx_pause)
c0700f90 9053 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9054
c0700f90
DM
9055 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9056 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9057
c18487ee 9058 if (epause->autoneg) {
34f80b04 9059 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9060 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9061 return -EINVAL;
9062 }
a2fbb9ea 9063
c18487ee 9064 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9065 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9066 }
a2fbb9ea 9067
c18487ee
YR
9068 DP(NETIF_MSG_LINK,
9069 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9070
9071 if (netif_running(dev)) {
bb2a0f7a 9072 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9073 bnx2x_link_set(bp);
9074 }
a2fbb9ea
ET
9075
9076 return 0;
9077}
9078
df0f2343
VZ
9079static int bnx2x_set_flags(struct net_device *dev, u32 data)
9080{
9081 struct bnx2x *bp = netdev_priv(dev);
9082 int changed = 0;
9083 int rc = 0;
9084
9085 /* TPA requires Rx CSUM offloading */
9086 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9087 if (!(dev->features & NETIF_F_LRO)) {
9088 dev->features |= NETIF_F_LRO;
9089 bp->flags |= TPA_ENABLE_FLAG;
9090 changed = 1;
9091 }
9092
9093 } else if (dev->features & NETIF_F_LRO) {
9094 dev->features &= ~NETIF_F_LRO;
9095 bp->flags &= ~TPA_ENABLE_FLAG;
9096 changed = 1;
9097 }
9098
9099 if (changed && netif_running(dev)) {
9100 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9101 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9102 }
9103
9104 return rc;
9105}
9106
a2fbb9ea
ET
9107static u32 bnx2x_get_rx_csum(struct net_device *dev)
9108{
9109 struct bnx2x *bp = netdev_priv(dev);
9110
9111 return bp->rx_csum;
9112}
9113
9114static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9115{
9116 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9117 int rc = 0;
a2fbb9ea
ET
9118
9119 bp->rx_csum = data;
df0f2343
VZ
9120
9121 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9122 TPA'ed packets will be discarded due to wrong TCP CSUM */
9123 if (!data) {
9124 u32 flags = ethtool_op_get_flags(dev);
9125
9126 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9127 }
9128
9129 return rc;
a2fbb9ea
ET
9130}
9131
9132static int bnx2x_set_tso(struct net_device *dev, u32 data)
9133{
755735eb 9134 if (data) {
a2fbb9ea 9135 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9136 dev->features |= NETIF_F_TSO6;
9137 } else {
a2fbb9ea 9138 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9139 dev->features &= ~NETIF_F_TSO6;
9140 }
9141
a2fbb9ea
ET
9142 return 0;
9143}
9144
f3c87cdd 9145static const struct {
a2fbb9ea
ET
9146 char string[ETH_GSTRING_LEN];
9147} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9148 { "register_test (offline)" },
9149 { "memory_test (offline)" },
9150 { "loopback_test (offline)" },
9151 { "nvram_test (online)" },
9152 { "interrupt_test (online)" },
9153 { "link_test (online)" },
d3d4f495 9154 { "idle check (online)" }
a2fbb9ea
ET
9155};
9156
9157static int bnx2x_self_test_count(struct net_device *dev)
9158{
9159 return BNX2X_NUM_TESTS;
9160}
9161
f3c87cdd
YG
9162static int bnx2x_test_registers(struct bnx2x *bp)
9163{
9164 int idx, i, rc = -ENODEV;
9165 u32 wr_val = 0;
9dabc424 9166 int port = BP_PORT(bp);
f3c87cdd
YG
9167 static const struct {
9168 u32 offset0;
9169 u32 offset1;
9170 u32 mask;
9171 } reg_tbl[] = {
9172/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9173 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9174 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9175 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9176 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9177 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9178 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9179 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9180 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9181 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9182/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9183 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9184 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9185 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9186 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9187 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9188 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9189 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9190 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9191 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9192/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9193 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9194 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9195 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9196 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9197 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9198 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9199 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9200 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9201 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9202/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9203 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9204 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9205 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9206 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9207 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9208 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9209 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9210
9211 { 0xffffffff, 0, 0x00000000 }
9212 };
9213
9214 if (!netif_running(bp->dev))
9215 return rc;
9216
9217 /* Repeat the test twice:
9218 First by writing 0x00000000, second by writing 0xffffffff */
9219 for (idx = 0; idx < 2; idx++) {
9220
9221 switch (idx) {
9222 case 0:
9223 wr_val = 0;
9224 break;
9225 case 1:
9226 wr_val = 0xffffffff;
9227 break;
9228 }
9229
9230 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9231 u32 offset, mask, save_val, val;
f3c87cdd
YG
9232
9233 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9234 mask = reg_tbl[i].mask;
9235
9236 save_val = REG_RD(bp, offset);
9237
9238 REG_WR(bp, offset, wr_val);
9239 val = REG_RD(bp, offset);
9240
9241 /* Restore the original register's value */
9242 REG_WR(bp, offset, save_val);
9243
9244 /* verify that value is as expected value */
9245 if ((val & mask) != (wr_val & mask))
9246 goto test_reg_exit;
9247 }
9248 }
9249
9250 rc = 0;
9251
9252test_reg_exit:
9253 return rc;
9254}
9255
9256static int bnx2x_test_memory(struct bnx2x *bp)
9257{
9258 int i, j, rc = -ENODEV;
9259 u32 val;
9260 static const struct {
9261 u32 offset;
9262 int size;
9263 } mem_tbl[] = {
9264 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9265 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9266 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9267 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9268 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9269 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9270 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9271
9272 { 0xffffffff, 0 }
9273 };
9274 static const struct {
9275 char *name;
9276 u32 offset;
9dabc424
YG
9277 u32 e1_mask;
9278 u32 e1h_mask;
f3c87cdd 9279 } prty_tbl[] = {
9dabc424
YG
9280 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9281 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9282 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9283 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9284 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9285 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9286
9287 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9288 };
9289
9290 if (!netif_running(bp->dev))
9291 return rc;
9292
9293 /* Go through all the memories */
9294 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9295 for (j = 0; j < mem_tbl[i].size; j++)
9296 REG_RD(bp, mem_tbl[i].offset + j*4);
9297
9298 /* Check the parity status */
9299 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9300 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9301 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9302 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9303 DP(NETIF_MSG_HW,
9304 "%s is 0x%x\n", prty_tbl[i].name, val);
9305 goto test_mem_exit;
9306 }
9307 }
9308
9309 rc = 0;
9310
9311test_mem_exit:
9312 return rc;
9313}
9314
f3c87cdd
YG
9315static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9316{
9317 int cnt = 1000;
9318
9319 if (link_up)
9320 while (bnx2x_link_test(bp) && cnt--)
9321 msleep(10);
9322}
9323
9324static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9325{
9326 unsigned int pkt_size, num_pkts, i;
9327 struct sk_buff *skb;
9328 unsigned char *packet;
9329 struct bnx2x_fastpath *fp = &bp->fp[0];
9330 u16 tx_start_idx, tx_idx;
9331 u16 rx_start_idx, rx_idx;
9332 u16 pkt_prod;
9333 struct sw_tx_bd *tx_buf;
9334 struct eth_tx_bd *tx_bd;
9335 dma_addr_t mapping;
9336 union eth_rx_cqe *cqe;
9337 u8 cqe_fp_flags;
9338 struct sw_rx_bd *rx_buf;
9339 u16 len;
9340 int rc = -ENODEV;
9341
b5bf9068
EG
9342 /* check the loopback mode */
9343 switch (loopback_mode) {
9344 case BNX2X_PHY_LOOPBACK:
9345 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9346 return -EINVAL;
9347 break;
9348 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9349 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9350 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9351 break;
9352 default:
f3c87cdd 9353 return -EINVAL;
b5bf9068 9354 }
f3c87cdd 9355
b5bf9068
EG
9356 /* prepare the loopback packet */
9357 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9358 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9359 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9360 if (!skb) {
9361 rc = -ENOMEM;
9362 goto test_loopback_exit;
9363 }
9364 packet = skb_put(skb, pkt_size);
9365 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9366 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9367 for (i = ETH_HLEN; i < pkt_size; i++)
9368 packet[i] = (unsigned char) (i & 0xff);
9369
b5bf9068 9370 /* send the loopback packet */
f3c87cdd
YG
9371 num_pkts = 0;
9372 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9373 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9374
9375 pkt_prod = fp->tx_pkt_prod++;
9376 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9377 tx_buf->first_bd = fp->tx_bd_prod;
9378 tx_buf->skb = skb;
9379
9380 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9381 mapping = pci_map_single(bp->pdev, skb->data,
9382 skb_headlen(skb), PCI_DMA_TODEVICE);
9383 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9384 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9385 tx_bd->nbd = cpu_to_le16(1);
9386 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9387 tx_bd->vlan = cpu_to_le16(pkt_prod);
9388 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9389 ETH_TX_BD_FLAGS_END_BD);
9390 tx_bd->general_data = ((UNICAST_ADDRESS <<
9391 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9392
58f4c4cf
EG
9393 wmb();
9394
4781bfad 9395 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9396 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9397 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9398 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9399
9400 mmiowb();
9401
9402 num_pkts++;
9403 fp->tx_bd_prod++;
9404 bp->dev->trans_start = jiffies;
9405
9406 udelay(100);
9407
9408 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9409 if (tx_idx != tx_start_idx + num_pkts)
9410 goto test_loopback_exit;
9411
9412 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9413 if (rx_idx != rx_start_idx + num_pkts)
9414 goto test_loopback_exit;
9415
9416 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9417 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9418 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9419 goto test_loopback_rx_exit;
9420
9421 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9422 if (len != pkt_size)
9423 goto test_loopback_rx_exit;
9424
9425 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9426 skb = rx_buf->skb;
9427 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9428 for (i = ETH_HLEN; i < pkt_size; i++)
9429 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9430 goto test_loopback_rx_exit;
9431
9432 rc = 0;
9433
9434test_loopback_rx_exit:
f3c87cdd
YG
9435
9436 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9437 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9438 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9439 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9440
9441 /* Update producers */
9442 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9443 fp->rx_sge_prod);
f3c87cdd
YG
9444
9445test_loopback_exit:
9446 bp->link_params.loopback_mode = LOOPBACK_NONE;
9447
9448 return rc;
9449}
9450
9451static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9452{
b5bf9068 9453 int rc = 0, res;
f3c87cdd
YG
9454
9455 if (!netif_running(bp->dev))
9456 return BNX2X_LOOPBACK_FAILED;
9457
f8ef6e44 9458 bnx2x_netif_stop(bp, 1);
3910c8ae 9459 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9460
b5bf9068
EG
9461 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9462 if (res) {
9463 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9464 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9465 }
9466
b5bf9068
EG
9467 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9468 if (res) {
9469 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9470 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9471 }
9472
3910c8ae 9473 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9474 bnx2x_netif_start(bp);
9475
9476 return rc;
9477}
9478
9479#define CRC32_RESIDUAL 0xdebb20e3
9480
9481static int bnx2x_test_nvram(struct bnx2x *bp)
9482{
9483 static const struct {
9484 int offset;
9485 int size;
9486 } nvram_tbl[] = {
9487 { 0, 0x14 }, /* bootstrap */
9488 { 0x14, 0xec }, /* dir */
9489 { 0x100, 0x350 }, /* manuf_info */
9490 { 0x450, 0xf0 }, /* feature_info */
9491 { 0x640, 0x64 }, /* upgrade_key_info */
9492 { 0x6a4, 0x64 },
9493 { 0x708, 0x70 }, /* manuf_key_info */
9494 { 0x778, 0x70 },
9495 { 0, 0 }
9496 };
4781bfad 9497 __be32 buf[0x350 / 4];
f3c87cdd
YG
9498 u8 *data = (u8 *)buf;
9499 int i, rc;
9500 u32 magic, csum;
9501
9502 rc = bnx2x_nvram_read(bp, 0, data, 4);
9503 if (rc) {
9504 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9505 goto test_nvram_exit;
9506 }
9507
9508 magic = be32_to_cpu(buf[0]);
9509 if (magic != 0x669955aa) {
9510 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9511 rc = -ENODEV;
9512 goto test_nvram_exit;
9513 }
9514
9515 for (i = 0; nvram_tbl[i].size; i++) {
9516
9517 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9518 nvram_tbl[i].size);
9519 if (rc) {
9520 DP(NETIF_MSG_PROBE,
9521 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9522 goto test_nvram_exit;
9523 }
9524
9525 csum = ether_crc_le(nvram_tbl[i].size, data);
9526 if (csum != CRC32_RESIDUAL) {
9527 DP(NETIF_MSG_PROBE,
9528 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9529 rc = -ENODEV;
9530 goto test_nvram_exit;
9531 }
9532 }
9533
9534test_nvram_exit:
9535 return rc;
9536}
9537
9538static int bnx2x_test_intr(struct bnx2x *bp)
9539{
9540 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9541 int i, rc;
9542
9543 if (!netif_running(bp->dev))
9544 return -ENODEV;
9545
8d9c5f34 9546 config->hdr.length = 0;
af246401
EG
9547 if (CHIP_IS_E1(bp))
9548 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9549 else
9550 config->hdr.offset = BP_FUNC(bp);
0626b899 9551 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9552 config->hdr.reserved1 = 0;
9553
9554 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9555 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9556 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9557 if (rc == 0) {
9558 bp->set_mac_pending++;
9559 for (i = 0; i < 10; i++) {
9560 if (!bp->set_mac_pending)
9561 break;
9562 msleep_interruptible(10);
9563 }
9564 if (i == 10)
9565 rc = -ENODEV;
9566 }
9567
9568 return rc;
9569}
9570
a2fbb9ea
ET
9571static void bnx2x_self_test(struct net_device *dev,
9572 struct ethtool_test *etest, u64 *buf)
9573{
9574 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9575
9576 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9577
f3c87cdd 9578 if (!netif_running(dev))
a2fbb9ea 9579 return;
a2fbb9ea 9580
33471629 9581 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9582 if (IS_E1HMF(bp))
9583 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9584
9585 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9586 u8 link_up;
9587
9588 link_up = bp->link_vars.link_up;
9589 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9590 bnx2x_nic_load(bp, LOAD_DIAG);
9591 /* wait until link state is restored */
9592 bnx2x_wait_for_link(bp, link_up);
9593
9594 if (bnx2x_test_registers(bp) != 0) {
9595 buf[0] = 1;
9596 etest->flags |= ETH_TEST_FL_FAILED;
9597 }
9598 if (bnx2x_test_memory(bp) != 0) {
9599 buf[1] = 1;
9600 etest->flags |= ETH_TEST_FL_FAILED;
9601 }
9602 buf[2] = bnx2x_test_loopback(bp, link_up);
9603 if (buf[2] != 0)
9604 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9605
f3c87cdd
YG
9606 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9607 bnx2x_nic_load(bp, LOAD_NORMAL);
9608 /* wait until link state is restored */
9609 bnx2x_wait_for_link(bp, link_up);
9610 }
9611 if (bnx2x_test_nvram(bp) != 0) {
9612 buf[3] = 1;
a2fbb9ea
ET
9613 etest->flags |= ETH_TEST_FL_FAILED;
9614 }
f3c87cdd
YG
9615 if (bnx2x_test_intr(bp) != 0) {
9616 buf[4] = 1;
9617 etest->flags |= ETH_TEST_FL_FAILED;
9618 }
9619 if (bp->port.pmf)
9620 if (bnx2x_link_test(bp) != 0) {
9621 buf[5] = 1;
9622 etest->flags |= ETH_TEST_FL_FAILED;
9623 }
f3c87cdd
YG
9624
9625#ifdef BNX2X_EXTRA_DEBUG
9626 bnx2x_panic_dump(bp);
9627#endif
a2fbb9ea
ET
9628}
9629
de832a55
EG
9630static const struct {
9631 long offset;
9632 int size;
9633 u8 string[ETH_GSTRING_LEN];
9634} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9635/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9636 { Q_STATS_OFFSET32(error_bytes_received_hi),
9637 8, "[%d]: rx_error_bytes" },
9638 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9639 8, "[%d]: rx_ucast_packets" },
9640 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9641 8, "[%d]: rx_mcast_packets" },
9642 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9643 8, "[%d]: rx_bcast_packets" },
9644 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9645 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9646 4, "[%d]: rx_phy_ip_err_discards"},
9647 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9648 4, "[%d]: rx_skb_alloc_discard" },
9649 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9650
9651/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9652 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9653 8, "[%d]: tx_packets" }
9654};
9655
bb2a0f7a
YG
9656static const struct {
9657 long offset;
9658 int size;
9659 u32 flags;
66e855f3
YG
9660#define STATS_FLAGS_PORT 1
9661#define STATS_FLAGS_FUNC 2
de832a55 9662#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9663 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9664} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9665/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9666 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9667 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9668 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9669 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9670 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9671 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9672 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9673 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9674 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9675 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9676 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9677 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9678 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9679 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9680 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9681 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9682 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9683/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9684 8, STATS_FLAGS_PORT, "rx_fragments" },
9685 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9686 8, STATS_FLAGS_PORT, "rx_jabbers" },
9687 { STATS_OFFSET32(no_buff_discard_hi),
9688 8, STATS_FLAGS_BOTH, "rx_discards" },
9689 { STATS_OFFSET32(mac_filter_discard),
9690 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9691 { STATS_OFFSET32(xxoverflow_discard),
9692 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9693 { STATS_OFFSET32(brb_drop_hi),
9694 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9695 { STATS_OFFSET32(brb_truncate_hi),
9696 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9697 { STATS_OFFSET32(pause_frames_received_hi),
9698 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9699 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9700 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9701 { STATS_OFFSET32(nig_timer_max),
9702 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9703/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9704 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9705 { STATS_OFFSET32(rx_skb_alloc_failed),
9706 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9707 { STATS_OFFSET32(hw_csum_err),
9708 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9709
9710 { STATS_OFFSET32(total_bytes_transmitted_hi),
9711 8, STATS_FLAGS_BOTH, "tx_bytes" },
9712 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9713 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9714 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9715 8, STATS_FLAGS_BOTH, "tx_packets" },
9716 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9717 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9718 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9719 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9720 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9721 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9722 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9723 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9724/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9725 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9726 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9727 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9728 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9729 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9730 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9731 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9732 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9733 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9734 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9735 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9736 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9737 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9738 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9739 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9740 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9741 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9742 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9743 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9744/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9745 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9746 { STATS_OFFSET32(pause_frames_sent_hi),
9747 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9748};
9749
de832a55
EG
9750#define IS_PORT_STAT(i) \
9751 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9752#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9753#define IS_E1HMF_MODE_STAT(bp) \
9754 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9755
a2fbb9ea
ET
9756static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9757{
bb2a0f7a 9758 struct bnx2x *bp = netdev_priv(dev);
de832a55 9759 int i, j, k;
bb2a0f7a 9760
a2fbb9ea
ET
9761 switch (stringset) {
9762 case ETH_SS_STATS:
de832a55
EG
9763 if (is_multi(bp)) {
9764 k = 0;
9765 for_each_queue(bp, i) {
9766 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9767 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9768 bnx2x_q_stats_arr[j].string, i);
9769 k += BNX2X_NUM_Q_STATS;
9770 }
9771 if (IS_E1HMF_MODE_STAT(bp))
9772 break;
9773 for (j = 0; j < BNX2X_NUM_STATS; j++)
9774 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9775 bnx2x_stats_arr[j].string);
9776 } else {
9777 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9778 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9779 continue;
9780 strcpy(buf + j*ETH_GSTRING_LEN,
9781 bnx2x_stats_arr[i].string);
9782 j++;
9783 }
bb2a0f7a 9784 }
a2fbb9ea
ET
9785 break;
9786
9787 case ETH_SS_TEST:
9788 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9789 break;
9790 }
9791}
9792
9793static int bnx2x_get_stats_count(struct net_device *dev)
9794{
bb2a0f7a 9795 struct bnx2x *bp = netdev_priv(dev);
de832a55 9796 int i, num_stats;
bb2a0f7a 9797
de832a55
EG
9798 if (is_multi(bp)) {
9799 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9800 if (!IS_E1HMF_MODE_STAT(bp))
9801 num_stats += BNX2X_NUM_STATS;
9802 } else {
9803 if (IS_E1HMF_MODE_STAT(bp)) {
9804 num_stats = 0;
9805 for (i = 0; i < BNX2X_NUM_STATS; i++)
9806 if (IS_FUNC_STAT(i))
9807 num_stats++;
9808 } else
9809 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9810 }
de832a55 9811
bb2a0f7a 9812 return num_stats;
a2fbb9ea
ET
9813}
9814
9815static void bnx2x_get_ethtool_stats(struct net_device *dev,
9816 struct ethtool_stats *stats, u64 *buf)
9817{
9818 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9819 u32 *hw_stats, *offset;
9820 int i, j, k;
bb2a0f7a 9821
de832a55
EG
9822 if (is_multi(bp)) {
9823 k = 0;
9824 for_each_queue(bp, i) {
9825 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9826 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9827 if (bnx2x_q_stats_arr[j].size == 0) {
9828 /* skip this counter */
9829 buf[k + j] = 0;
9830 continue;
9831 }
9832 offset = (hw_stats +
9833 bnx2x_q_stats_arr[j].offset);
9834 if (bnx2x_q_stats_arr[j].size == 4) {
9835 /* 4-byte counter */
9836 buf[k + j] = (u64) *offset;
9837 continue;
9838 }
9839 /* 8-byte counter */
9840 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9841 }
9842 k += BNX2X_NUM_Q_STATS;
9843 }
9844 if (IS_E1HMF_MODE_STAT(bp))
9845 return;
9846 hw_stats = (u32 *)&bp->eth_stats;
9847 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9848 if (bnx2x_stats_arr[j].size == 0) {
9849 /* skip this counter */
9850 buf[k + j] = 0;
9851 continue;
9852 }
9853 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9854 if (bnx2x_stats_arr[j].size == 4) {
9855 /* 4-byte counter */
9856 buf[k + j] = (u64) *offset;
9857 continue;
9858 }
9859 /* 8-byte counter */
9860 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9861 }
de832a55
EG
9862 } else {
9863 hw_stats = (u32 *)&bp->eth_stats;
9864 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9865 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9866 continue;
9867 if (bnx2x_stats_arr[i].size == 0) {
9868 /* skip this counter */
9869 buf[j] = 0;
9870 j++;
9871 continue;
9872 }
9873 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9874 if (bnx2x_stats_arr[i].size == 4) {
9875 /* 4-byte counter */
9876 buf[j] = (u64) *offset;
9877 j++;
9878 continue;
9879 }
9880 /* 8-byte counter */
9881 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9882 j++;
a2fbb9ea 9883 }
a2fbb9ea
ET
9884 }
9885}
9886
9887static int bnx2x_phys_id(struct net_device *dev, u32 data)
9888{
9889 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9890 int port = BP_PORT(bp);
a2fbb9ea
ET
9891 int i;
9892
34f80b04
EG
9893 if (!netif_running(dev))
9894 return 0;
9895
9896 if (!bp->port.pmf)
9897 return 0;
9898
a2fbb9ea
ET
9899 if (data == 0)
9900 data = 2;
9901
9902 for (i = 0; i < (data * 2); i++) {
c18487ee 9903 if ((i % 2) == 0)
34f80b04 9904 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9905 bp->link_params.hw_led_mode,
9906 bp->link_params.chip_id);
9907 else
34f80b04 9908 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9909 bp->link_params.hw_led_mode,
9910 bp->link_params.chip_id);
9911
a2fbb9ea
ET
9912 msleep_interruptible(500);
9913 if (signal_pending(current))
9914 break;
9915 }
9916
c18487ee 9917 if (bp->link_vars.link_up)
34f80b04 9918 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9919 bp->link_vars.line_speed,
9920 bp->link_params.hw_led_mode,
9921 bp->link_params.chip_id);
a2fbb9ea
ET
9922
9923 return 0;
9924}
9925
9926static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9927 .get_settings = bnx2x_get_settings,
9928 .set_settings = bnx2x_set_settings,
9929 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9930 .get_wol = bnx2x_get_wol,
9931 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9932 .get_msglevel = bnx2x_get_msglevel,
9933 .set_msglevel = bnx2x_set_msglevel,
9934 .nway_reset = bnx2x_nway_reset,
9935 .get_link = ethtool_op_get_link,
9936 .get_eeprom_len = bnx2x_get_eeprom_len,
9937 .get_eeprom = bnx2x_get_eeprom,
9938 .set_eeprom = bnx2x_set_eeprom,
9939 .get_coalesce = bnx2x_get_coalesce,
9940 .set_coalesce = bnx2x_set_coalesce,
9941 .get_ringparam = bnx2x_get_ringparam,
9942 .set_ringparam = bnx2x_set_ringparam,
9943 .get_pauseparam = bnx2x_get_pauseparam,
9944 .set_pauseparam = bnx2x_set_pauseparam,
9945 .get_rx_csum = bnx2x_get_rx_csum,
9946 .set_rx_csum = bnx2x_set_rx_csum,
9947 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9948 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9949 .set_flags = bnx2x_set_flags,
9950 .get_flags = ethtool_op_get_flags,
9951 .get_sg = ethtool_op_get_sg,
9952 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9953 .get_tso = ethtool_op_get_tso,
9954 .set_tso = bnx2x_set_tso,
9955 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9956 .self_test = bnx2x_self_test,
9957 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9958 .phys_id = bnx2x_phys_id,
9959 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9960 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9961};
9962
9963/* end of ethtool_ops */
9964
9965/****************************************************************************
9966* General service functions
9967****************************************************************************/
9968
9969static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9970{
9971 u16 pmcsr;
9972
9973 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9974
9975 switch (state) {
9976 case PCI_D0:
34f80b04 9977 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9978 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9979 PCI_PM_CTRL_PME_STATUS));
9980
9981 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9982 /* delay required during transition out of D3hot */
a2fbb9ea 9983 msleep(20);
34f80b04 9984 break;
a2fbb9ea 9985
34f80b04
EG
9986 case PCI_D3hot:
9987 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9988 pmcsr |= 3;
a2fbb9ea 9989
34f80b04
EG
9990 if (bp->wol)
9991 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9992
34f80b04
EG
9993 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9994 pmcsr);
a2fbb9ea 9995
34f80b04
EG
9996 /* No more memory access after this point until
9997 * device is brought back to D0.
9998 */
9999 break;
10000
10001 default:
10002 return -EINVAL;
10003 }
10004 return 0;
a2fbb9ea
ET
10005}
10006
237907c1
EG
10007static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10008{
10009 u16 rx_cons_sb;
10010
10011 /* Tell compiler that status block fields can change */
10012 barrier();
10013 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10014 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10015 rx_cons_sb++;
10016 return (fp->rx_comp_cons != rx_cons_sb);
10017}
10018
34f80b04
EG
10019/*
10020 * net_device service functions
10021 */
10022
a2fbb9ea
ET
10023static int bnx2x_poll(struct napi_struct *napi, int budget)
10024{
10025 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10026 napi);
10027 struct bnx2x *bp = fp->bp;
10028 int work_done = 0;
10029
10030#ifdef BNX2X_STOP_ON_ERROR
10031 if (unlikely(bp->panic))
34f80b04 10032 goto poll_panic;
a2fbb9ea
ET
10033#endif
10034
10035 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10036 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10037 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10038
10039 bnx2x_update_fpsb_idx(fp);
10040
237907c1 10041 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
10042 bnx2x_tx_int(fp, budget);
10043
237907c1 10044 if (bnx2x_has_rx_work(fp))
a2fbb9ea 10045 work_done = bnx2x_rx_int(fp, budget);
da5a662a 10046 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10047
10048 /* must not complete if we consumed full budget */
da5a662a 10049 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10050
10051#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10052poll_panic:
a2fbb9ea 10053#endif
288379f0 10054 napi_complete(napi);
a2fbb9ea 10055
0626b899 10056 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10057 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10058 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10059 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10060 }
a2fbb9ea
ET
10061 return work_done;
10062}
10063
755735eb
EG
10064
10065/* we split the first BD into headers and data BDs
33471629 10066 * to ease the pain of our fellow microcode engineers
755735eb
EG
10067 * we use one mapping for both BDs
10068 * So far this has only been observed to happen
10069 * in Other Operating Systems(TM)
10070 */
10071static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10072 struct bnx2x_fastpath *fp,
10073 struct eth_tx_bd **tx_bd, u16 hlen,
10074 u16 bd_prod, int nbd)
10075{
10076 struct eth_tx_bd *h_tx_bd = *tx_bd;
10077 struct eth_tx_bd *d_tx_bd;
10078 dma_addr_t mapping;
10079 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10080
10081 /* first fix first BD */
10082 h_tx_bd->nbd = cpu_to_le16(nbd);
10083 h_tx_bd->nbytes = cpu_to_le16(hlen);
10084
10085 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10086 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10087 h_tx_bd->addr_lo, h_tx_bd->nbd);
10088
10089 /* now get a new data BD
10090 * (after the pbd) and fill it */
10091 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10092 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10093
10094 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10095 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10096
10097 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10098 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10099 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10100 d_tx_bd->vlan = 0;
10101 /* this marks the BD as one that has no individual mapping
10102 * the FW ignores this flag in a BD not marked start
10103 */
10104 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10105 DP(NETIF_MSG_TX_QUEUED,
10106 "TSO split data size is %d (%x:%x)\n",
10107 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10108
10109 /* update tx_bd for marking the last BD flag */
10110 *tx_bd = d_tx_bd;
10111
10112 return bd_prod;
10113}
10114
10115static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10116{
10117 if (fix > 0)
10118 csum = (u16) ~csum_fold(csum_sub(csum,
10119 csum_partial(t_header - fix, fix, 0)));
10120
10121 else if (fix < 0)
10122 csum = (u16) ~csum_fold(csum_add(csum,
10123 csum_partial(t_header, -fix, 0)));
10124
10125 return swab16(csum);
10126}
10127
10128static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10129{
10130 u32 rc;
10131
10132 if (skb->ip_summed != CHECKSUM_PARTIAL)
10133 rc = XMIT_PLAIN;
10134
10135 else {
4781bfad 10136 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10137 rc = XMIT_CSUM_V6;
10138 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10139 rc |= XMIT_CSUM_TCP;
10140
10141 } else {
10142 rc = XMIT_CSUM_V4;
10143 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10144 rc |= XMIT_CSUM_TCP;
10145 }
10146 }
10147
10148 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10149 rc |= XMIT_GSO_V4;
10150
10151 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10152 rc |= XMIT_GSO_V6;
10153
10154 return rc;
10155}
10156
632da4d6 10157#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
10158/* check if packet requires linearization (packet is too fragmented) */
10159static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10160 u32 xmit_type)
10161{
10162 int to_copy = 0;
10163 int hlen = 0;
10164 int first_bd_sz = 0;
10165
10166 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10167 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10168
10169 if (xmit_type & XMIT_GSO) {
10170 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10171 /* Check if LSO packet needs to be copied:
10172 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10173 int wnd_size = MAX_FETCH_BD - 3;
33471629 10174 /* Number of windows to check */
755735eb
EG
10175 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10176 int wnd_idx = 0;
10177 int frag_idx = 0;
10178 u32 wnd_sum = 0;
10179
10180 /* Headers length */
10181 hlen = (int)(skb_transport_header(skb) - skb->data) +
10182 tcp_hdrlen(skb);
10183
10184 /* Amount of data (w/o headers) on linear part of SKB*/
10185 first_bd_sz = skb_headlen(skb) - hlen;
10186
10187 wnd_sum = first_bd_sz;
10188
10189 /* Calculate the first sum - it's special */
10190 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10191 wnd_sum +=
10192 skb_shinfo(skb)->frags[frag_idx].size;
10193
10194 /* If there was data on linear skb data - check it */
10195 if (first_bd_sz > 0) {
10196 if (unlikely(wnd_sum < lso_mss)) {
10197 to_copy = 1;
10198 goto exit_lbl;
10199 }
10200
10201 wnd_sum -= first_bd_sz;
10202 }
10203
10204 /* Others are easier: run through the frag list and
10205 check all windows */
10206 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10207 wnd_sum +=
10208 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10209
10210 if (unlikely(wnd_sum < lso_mss)) {
10211 to_copy = 1;
10212 break;
10213 }
10214 wnd_sum -=
10215 skb_shinfo(skb)->frags[wnd_idx].size;
10216 }
10217
10218 } else {
10219 /* in non-LSO too fragmented packet should always
10220 be linearized */
10221 to_copy = 1;
10222 }
10223 }
10224
10225exit_lbl:
10226 if (unlikely(to_copy))
10227 DP(NETIF_MSG_TX_QUEUED,
10228 "Linearization IS REQUIRED for %s packet. "
10229 "num_frags %d hlen %d first_bd_sz %d\n",
10230 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10231 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10232
10233 return to_copy;
10234}
632da4d6 10235#endif
755735eb
EG
10236
10237/* called with netif_tx_lock
a2fbb9ea 10238 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10239 * netif_wake_queue()
a2fbb9ea
ET
10240 */
10241static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10242{
10243 struct bnx2x *bp = netdev_priv(dev);
10244 struct bnx2x_fastpath *fp;
555f6c78 10245 struct netdev_queue *txq;
a2fbb9ea
ET
10246 struct sw_tx_bd *tx_buf;
10247 struct eth_tx_bd *tx_bd;
10248 struct eth_tx_parse_bd *pbd = NULL;
10249 u16 pkt_prod, bd_prod;
755735eb 10250 int nbd, fp_index;
a2fbb9ea 10251 dma_addr_t mapping;
755735eb
EG
10252 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10253 int vlan_off = (bp->e1hov ? 4 : 0);
10254 int i;
10255 u8 hlen = 0;
a2fbb9ea
ET
10256
10257#ifdef BNX2X_STOP_ON_ERROR
10258 if (unlikely(bp->panic))
10259 return NETDEV_TX_BUSY;
10260#endif
10261
555f6c78
EG
10262 fp_index = skb_get_queue_mapping(skb);
10263 txq = netdev_get_tx_queue(dev, fp_index);
10264
a2fbb9ea 10265 fp = &bp->fp[fp_index];
755735eb 10266
231fd58a 10267 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10268 fp->eth_q_stats.driver_xoff++,
555f6c78 10269 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10270 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10271 return NETDEV_TX_BUSY;
10272 }
10273
755735eb
EG
10274 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10275 " gso type %x xmit_type %x\n",
10276 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10277 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10278
632da4d6 10279#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10280 /* First, check if we need to linearize the skb
755735eb
EG
10281 (due to FW restrictions) */
10282 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10283 /* Statistics of linearization */
10284 bp->lin_cnt++;
10285 if (skb_linearize(skb) != 0) {
10286 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10287 "silently dropping this SKB\n");
10288 dev_kfree_skb_any(skb);
da5a662a 10289 return NETDEV_TX_OK;
755735eb
EG
10290 }
10291 }
632da4d6 10292#endif
755735eb 10293
a2fbb9ea 10294 /*
755735eb 10295 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10296 then for TSO or xsum we have a parsing info BD,
755735eb 10297 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10298 (don't forget to mark the last one as last,
10299 and to unmap only AFTER you write to the BD ...)
755735eb 10300 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10301 */
10302
10303 pkt_prod = fp->tx_pkt_prod++;
755735eb 10304 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10305
755735eb 10306 /* get a tx_buf and first BD */
a2fbb9ea
ET
10307 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10308 tx_bd = &fp->tx_desc_ring[bd_prod];
10309
10310 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10311 tx_bd->general_data = (UNICAST_ADDRESS <<
10312 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10313 /* header nbd */
10314 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10315
755735eb
EG
10316 /* remember the first BD of the packet */
10317 tx_buf->first_bd = fp->tx_bd_prod;
10318 tx_buf->skb = skb;
a2fbb9ea
ET
10319
10320 DP(NETIF_MSG_TX_QUEUED,
10321 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10322 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10323
0c6671b0
EG
10324#ifdef BCM_VLAN
10325 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10326 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10327 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10328 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10329 vlan_off += 4;
10330 } else
0c6671b0 10331#endif
755735eb 10332 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10333
755735eb 10334 if (xmit_type) {
755735eb 10335 /* turn on parsing and get a BD */
a2fbb9ea
ET
10336 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10337 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10338
10339 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10340 }
10341
10342 if (xmit_type & XMIT_CSUM) {
10343 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10344
10345 /* for now NS flag is not used in Linux */
4781bfad
EG
10346 pbd->global_data =
10347 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10348 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10349
755735eb
EG
10350 pbd->ip_hlen = (skb_transport_header(skb) -
10351 skb_network_header(skb)) / 2;
10352
10353 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10354
755735eb
EG
10355 pbd->total_hlen = cpu_to_le16(hlen);
10356 hlen = hlen*2 - vlan_off;
a2fbb9ea 10357
755735eb
EG
10358 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10359
10360 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10361 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10362 ETH_TX_BD_FLAGS_IP_CSUM;
10363 else
10364 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10365
10366 if (xmit_type & XMIT_CSUM_TCP) {
10367 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10368
10369 } else {
10370 s8 fix = SKB_CS_OFF(skb); /* signed! */
10371
a2fbb9ea 10372 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10373 pbd->cs_offset = fix / 2;
a2fbb9ea 10374
755735eb
EG
10375 DP(NETIF_MSG_TX_QUEUED,
10376 "hlen %d offset %d fix %d csum before fix %x\n",
10377 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10378 SKB_CS(skb));
10379
10380 /* HW bug: fixup the CSUM */
10381 pbd->tcp_pseudo_csum =
10382 bnx2x_csum_fix(skb_transport_header(skb),
10383 SKB_CS(skb), fix);
10384
10385 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10386 pbd->tcp_pseudo_csum);
10387 }
a2fbb9ea
ET
10388 }
10389
10390 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10391 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10392
10393 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10394 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10395 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10396 tx_bd->nbd = cpu_to_le16(nbd);
10397 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10398
10399 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10400 " nbytes %d flags %x vlan %x\n",
10401 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10402 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10403 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10404
755735eb 10405 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10406
10407 DP(NETIF_MSG_TX_QUEUED,
10408 "TSO packet len %d hlen %d total len %d tso size %d\n",
10409 skb->len, hlen, skb_headlen(skb),
10410 skb_shinfo(skb)->gso_size);
10411
10412 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10413
755735eb
EG
10414 if (unlikely(skb_headlen(skb) > hlen))
10415 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10416 bd_prod, ++nbd);
a2fbb9ea
ET
10417
10418 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10419 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10420 pbd->tcp_flags = pbd_tcp_flags(skb);
10421
10422 if (xmit_type & XMIT_GSO_V4) {
10423 pbd->ip_id = swab16(ip_hdr(skb)->id);
10424 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10425 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10426 ip_hdr(skb)->daddr,
10427 0, IPPROTO_TCP, 0));
755735eb
EG
10428
10429 } else
10430 pbd->tcp_pseudo_csum =
10431 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10432 &ipv6_hdr(skb)->daddr,
10433 0, IPPROTO_TCP, 0));
10434
a2fbb9ea
ET
10435 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10436 }
10437
755735eb
EG
10438 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10439 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10440
755735eb
EG
10441 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10442 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10443
755735eb
EG
10444 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10445 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10446
755735eb
EG
10447 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10448 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10449 tx_bd->nbytes = cpu_to_le16(frag->size);
10450 tx_bd->vlan = cpu_to_le16(pkt_prod);
10451 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10452
755735eb
EG
10453 DP(NETIF_MSG_TX_QUEUED,
10454 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10455 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10456 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10457 }
10458
755735eb 10459 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10460 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10461
10462 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10463 tx_bd, tx_bd->bd_flags.as_bitfield);
10464
a2fbb9ea
ET
10465 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10466
755735eb 10467 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10468 * if the packet contains or ends with it
10469 */
10470 if (TX_BD_POFF(bd_prod) < nbd)
10471 nbd++;
10472
10473 if (pbd)
10474 DP(NETIF_MSG_TX_QUEUED,
10475 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10476 " tcp_flags %x xsum %x seq %u hlen %u\n",
10477 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10478 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10479 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10480
755735eb 10481 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10482
58f4c4cf
EG
10483 /*
10484 * Make sure that the BD data is updated before updating the producer
10485 * since FW might read the BD right after the producer is updated.
10486 * This is only applicable for weak-ordered memory model archs such
10487 * as IA-64. The following barrier is also mandatory since FW will
10488 * assumes packets must have BDs.
10489 */
10490 wmb();
10491
4781bfad 10492 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10493 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10494 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10495 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10496
10497 mmiowb();
10498
755735eb 10499 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10500 dev->trans_start = jiffies;
10501
10502 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10503 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10504 if we put Tx into XOFF state. */
10505 smp_mb();
555f6c78 10506 netif_tx_stop_queue(txq);
de832a55 10507 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10508 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10509 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10510 }
10511 fp->tx_pkt++;
10512
10513 return NETDEV_TX_OK;
10514}
10515
bb2a0f7a 10516/* called with rtnl_lock */
a2fbb9ea
ET
10517static int bnx2x_open(struct net_device *dev)
10518{
10519 struct bnx2x *bp = netdev_priv(dev);
10520
6eccabb3
EG
10521 netif_carrier_off(dev);
10522
a2fbb9ea
ET
10523 bnx2x_set_power_state(bp, PCI_D0);
10524
bb2a0f7a 10525 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10526}
10527
bb2a0f7a 10528/* called with rtnl_lock */
a2fbb9ea
ET
10529static int bnx2x_close(struct net_device *dev)
10530{
a2fbb9ea
ET
10531 struct bnx2x *bp = netdev_priv(dev);
10532
10533 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10534 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10535 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10536 if (!CHIP_REV_IS_SLOW(bp))
10537 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10538
10539 return 0;
10540}
10541
34f80b04
EG
10542/* called with netif_tx_lock from set_multicast */
10543static void bnx2x_set_rx_mode(struct net_device *dev)
10544{
10545 struct bnx2x *bp = netdev_priv(dev);
10546 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10547 int port = BP_PORT(bp);
10548
10549 if (bp->state != BNX2X_STATE_OPEN) {
10550 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10551 return;
10552 }
10553
10554 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10555
10556 if (dev->flags & IFF_PROMISC)
10557 rx_mode = BNX2X_RX_MODE_PROMISC;
10558
10559 else if ((dev->flags & IFF_ALLMULTI) ||
10560 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10561 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10562
10563 else { /* some multicasts */
10564 if (CHIP_IS_E1(bp)) {
10565 int i, old, offset;
10566 struct dev_mc_list *mclist;
10567 struct mac_configuration_cmd *config =
10568 bnx2x_sp(bp, mcast_config);
10569
10570 for (i = 0, mclist = dev->mc_list;
10571 mclist && (i < dev->mc_count);
10572 i++, mclist = mclist->next) {
10573
10574 config->config_table[i].
10575 cam_entry.msb_mac_addr =
10576 swab16(*(u16 *)&mclist->dmi_addr[0]);
10577 config->config_table[i].
10578 cam_entry.middle_mac_addr =
10579 swab16(*(u16 *)&mclist->dmi_addr[2]);
10580 config->config_table[i].
10581 cam_entry.lsb_mac_addr =
10582 swab16(*(u16 *)&mclist->dmi_addr[4]);
10583 config->config_table[i].cam_entry.flags =
10584 cpu_to_le16(port);
10585 config->config_table[i].
10586 target_table_entry.flags = 0;
10587 config->config_table[i].
10588 target_table_entry.client_id = 0;
10589 config->config_table[i].
10590 target_table_entry.vlan_id = 0;
10591
10592 DP(NETIF_MSG_IFUP,
10593 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10594 config->config_table[i].
10595 cam_entry.msb_mac_addr,
10596 config->config_table[i].
10597 cam_entry.middle_mac_addr,
10598 config->config_table[i].
10599 cam_entry.lsb_mac_addr);
10600 }
8d9c5f34 10601 old = config->hdr.length;
34f80b04
EG
10602 if (old > i) {
10603 for (; i < old; i++) {
10604 if (CAM_IS_INVALID(config->
10605 config_table[i])) {
af246401 10606 /* already invalidated */
34f80b04
EG
10607 break;
10608 }
10609 /* invalidate */
10610 CAM_INVALIDATE(config->
10611 config_table[i]);
10612 }
10613 }
10614
10615 if (CHIP_REV_IS_SLOW(bp))
10616 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10617 else
10618 offset = BNX2X_MAX_MULTICAST*(1 + port);
10619
8d9c5f34 10620 config->hdr.length = i;
34f80b04 10621 config->hdr.offset = offset;
8d9c5f34 10622 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10623 config->hdr.reserved1 = 0;
10624
10625 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10626 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10627 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10628 0);
10629 } else { /* E1H */
10630 /* Accept one or more multicasts */
10631 struct dev_mc_list *mclist;
10632 u32 mc_filter[MC_HASH_SIZE];
10633 u32 crc, bit, regidx;
10634 int i;
10635
10636 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10637
10638 for (i = 0, mclist = dev->mc_list;
10639 mclist && (i < dev->mc_count);
10640 i++, mclist = mclist->next) {
10641
7c510e4b
JB
10642 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10643 mclist->dmi_addr);
34f80b04
EG
10644
10645 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10646 bit = (crc >> 24) & 0xff;
10647 regidx = bit >> 5;
10648 bit &= 0x1f;
10649 mc_filter[regidx] |= (1 << bit);
10650 }
10651
10652 for (i = 0; i < MC_HASH_SIZE; i++)
10653 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10654 mc_filter[i]);
10655 }
10656 }
10657
10658 bp->rx_mode = rx_mode;
10659 bnx2x_set_storm_rx_mode(bp);
10660}
10661
10662/* called with rtnl_lock */
a2fbb9ea
ET
10663static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10664{
10665 struct sockaddr *addr = p;
10666 struct bnx2x *bp = netdev_priv(dev);
10667
34f80b04 10668 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10669 return -EINVAL;
10670
10671 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10672 if (netif_running(dev)) {
10673 if (CHIP_IS_E1(bp))
3101c2bc 10674 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10675 else
3101c2bc 10676 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10677 }
a2fbb9ea
ET
10678
10679 return 0;
10680}
10681
c18487ee 10682/* called with rtnl_lock */
a2fbb9ea
ET
10683static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10684{
10685 struct mii_ioctl_data *data = if_mii(ifr);
10686 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10687 int port = BP_PORT(bp);
a2fbb9ea
ET
10688 int err;
10689
10690 switch (cmd) {
10691 case SIOCGMIIPHY:
34f80b04 10692 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10693
c14423fe 10694 /* fallthrough */
c18487ee 10695
a2fbb9ea 10696 case SIOCGMIIREG: {
c18487ee 10697 u16 mii_regval;
a2fbb9ea 10698
c18487ee
YR
10699 if (!netif_running(dev))
10700 return -EAGAIN;
a2fbb9ea 10701
34f80b04 10702 mutex_lock(&bp->port.phy_mutex);
3196a88a 10703 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10704 DEFAULT_PHY_DEV_ADDR,
10705 (data->reg_num & 0x1f), &mii_regval);
10706 data->val_out = mii_regval;
34f80b04 10707 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10708 return err;
10709 }
10710
10711 case SIOCSMIIREG:
10712 if (!capable(CAP_NET_ADMIN))
10713 return -EPERM;
10714
c18487ee
YR
10715 if (!netif_running(dev))
10716 return -EAGAIN;
10717
34f80b04 10718 mutex_lock(&bp->port.phy_mutex);
3196a88a 10719 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10720 DEFAULT_PHY_DEV_ADDR,
10721 (data->reg_num & 0x1f), data->val_in);
34f80b04 10722 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10723 return err;
10724
10725 default:
10726 /* do nothing */
10727 break;
10728 }
10729
10730 return -EOPNOTSUPP;
10731}
10732
34f80b04 10733/* called with rtnl_lock */
a2fbb9ea
ET
10734static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10735{
10736 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10737 int rc = 0;
a2fbb9ea
ET
10738
10739 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10740 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10741 return -EINVAL;
10742
10743 /* This does not race with packet allocation
c14423fe 10744 * because the actual alloc size is
a2fbb9ea
ET
10745 * only updated as part of load
10746 */
10747 dev->mtu = new_mtu;
10748
10749 if (netif_running(dev)) {
34f80b04
EG
10750 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10751 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10752 }
34f80b04
EG
10753
10754 return rc;
a2fbb9ea
ET
10755}
10756
10757static void bnx2x_tx_timeout(struct net_device *dev)
10758{
10759 struct bnx2x *bp = netdev_priv(dev);
10760
10761#ifdef BNX2X_STOP_ON_ERROR
10762 if (!bp->panic)
10763 bnx2x_panic();
10764#endif
10765 /* This allows the netif to be shutdown gracefully before resetting */
10766 schedule_work(&bp->reset_task);
10767}
10768
10769#ifdef BCM_VLAN
34f80b04 10770/* called with rtnl_lock */
a2fbb9ea
ET
10771static void bnx2x_vlan_rx_register(struct net_device *dev,
10772 struct vlan_group *vlgrp)
10773{
10774 struct bnx2x *bp = netdev_priv(dev);
10775
10776 bp->vlgrp = vlgrp;
0c6671b0
EG
10777
10778 /* Set flags according to the required capabilities */
10779 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10780
10781 if (dev->features & NETIF_F_HW_VLAN_TX)
10782 bp->flags |= HW_VLAN_TX_FLAG;
10783
10784 if (dev->features & NETIF_F_HW_VLAN_RX)
10785 bp->flags |= HW_VLAN_RX_FLAG;
10786
a2fbb9ea 10787 if (netif_running(dev))
49d66772 10788 bnx2x_set_client_config(bp);
a2fbb9ea 10789}
34f80b04 10790
a2fbb9ea
ET
10791#endif
10792
10793#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10794static void poll_bnx2x(struct net_device *dev)
10795{
10796 struct bnx2x *bp = netdev_priv(dev);
10797
10798 disable_irq(bp->pdev->irq);
10799 bnx2x_interrupt(bp->pdev->irq, dev);
10800 enable_irq(bp->pdev->irq);
10801}
10802#endif
10803
c64213cd
SH
10804static const struct net_device_ops bnx2x_netdev_ops = {
10805 .ndo_open = bnx2x_open,
10806 .ndo_stop = bnx2x_close,
10807 .ndo_start_xmit = bnx2x_start_xmit,
10808 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10809 .ndo_set_mac_address = bnx2x_change_mac_addr,
10810 .ndo_validate_addr = eth_validate_addr,
10811 .ndo_do_ioctl = bnx2x_ioctl,
10812 .ndo_change_mtu = bnx2x_change_mtu,
10813 .ndo_tx_timeout = bnx2x_tx_timeout,
10814#ifdef BCM_VLAN
10815 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10816#endif
10817#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10818 .ndo_poll_controller = poll_bnx2x,
10819#endif
10820};
10821
10822
34f80b04
EG
10823static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10824 struct net_device *dev)
a2fbb9ea
ET
10825{
10826 struct bnx2x *bp;
10827 int rc;
10828
10829 SET_NETDEV_DEV(dev, &pdev->dev);
10830 bp = netdev_priv(dev);
10831
34f80b04
EG
10832 bp->dev = dev;
10833 bp->pdev = pdev;
a2fbb9ea 10834 bp->flags = 0;
34f80b04 10835 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10836
10837 rc = pci_enable_device(pdev);
10838 if (rc) {
10839 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10840 goto err_out;
10841 }
10842
10843 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10844 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10845 " aborting\n");
10846 rc = -ENODEV;
10847 goto err_out_disable;
10848 }
10849
10850 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10851 printk(KERN_ERR PFX "Cannot find second PCI device"
10852 " base address, aborting\n");
10853 rc = -ENODEV;
10854 goto err_out_disable;
10855 }
10856
34f80b04
EG
10857 if (atomic_read(&pdev->enable_cnt) == 1) {
10858 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10859 if (rc) {
10860 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10861 " aborting\n");
10862 goto err_out_disable;
10863 }
a2fbb9ea 10864
34f80b04
EG
10865 pci_set_master(pdev);
10866 pci_save_state(pdev);
10867 }
a2fbb9ea
ET
10868
10869 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10870 if (bp->pm_cap == 0) {
10871 printk(KERN_ERR PFX "Cannot find power management"
10872 " capability, aborting\n");
10873 rc = -EIO;
10874 goto err_out_release;
10875 }
10876
10877 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10878 if (bp->pcie_cap == 0) {
10879 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10880 " aborting\n");
10881 rc = -EIO;
10882 goto err_out_release;
10883 }
10884
10885 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10886 bp->flags |= USING_DAC_FLAG;
10887 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10888 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10889 " failed, aborting\n");
10890 rc = -EIO;
10891 goto err_out_release;
10892 }
10893
10894 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10895 printk(KERN_ERR PFX "System does not support DMA,"
10896 " aborting\n");
10897 rc = -EIO;
10898 goto err_out_release;
10899 }
10900
34f80b04
EG
10901 dev->mem_start = pci_resource_start(pdev, 0);
10902 dev->base_addr = dev->mem_start;
10903 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10904
10905 dev->irq = pdev->irq;
10906
275f165f 10907 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10908 if (!bp->regview) {
10909 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10910 rc = -ENOMEM;
10911 goto err_out_release;
10912 }
10913
34f80b04
EG
10914 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10915 min_t(u64, BNX2X_DB_SIZE,
10916 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10917 if (!bp->doorbells) {
10918 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10919 rc = -ENOMEM;
10920 goto err_out_unmap;
10921 }
10922
10923 bnx2x_set_power_state(bp, PCI_D0);
10924
34f80b04
EG
10925 /* clean indirect addresses */
10926 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10927 PCICFG_VENDOR_ID_OFFSET);
10928 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10929 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10930 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10931 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10932
34f80b04 10933 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10934
c64213cd 10935 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10936 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10937 dev->features |= NETIF_F_SG;
10938 dev->features |= NETIF_F_HW_CSUM;
10939 if (bp->flags & USING_DAC_FLAG)
10940 dev->features |= NETIF_F_HIGHDMA;
10941#ifdef BCM_VLAN
10942 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10943 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10944#endif
10945 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10946 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10947
10948 return 0;
10949
10950err_out_unmap:
10951 if (bp->regview) {
10952 iounmap(bp->regview);
10953 bp->regview = NULL;
10954 }
a2fbb9ea
ET
10955 if (bp->doorbells) {
10956 iounmap(bp->doorbells);
10957 bp->doorbells = NULL;
10958 }
10959
10960err_out_release:
34f80b04
EG
10961 if (atomic_read(&pdev->enable_cnt) == 1)
10962 pci_release_regions(pdev);
a2fbb9ea
ET
10963
10964err_out_disable:
10965 pci_disable_device(pdev);
10966 pci_set_drvdata(pdev, NULL);
10967
10968err_out:
10969 return rc;
10970}
10971
25047950
ET
10972static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10973{
10974 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10975
10976 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10977 return val;
10978}
10979
10980/* return value of 1=2.5GHz 2=5GHz */
10981static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10982{
10983 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10984
10985 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10986 return val;
10987}
10988
a2fbb9ea
ET
10989static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10990 const struct pci_device_id *ent)
10991{
10992 static int version_printed;
10993 struct net_device *dev = NULL;
10994 struct bnx2x *bp;
25047950 10995 int rc;
a2fbb9ea
ET
10996
10997 if (version_printed++ == 0)
10998 printk(KERN_INFO "%s", version);
10999
11000 /* dev zeroed in init_etherdev */
555f6c78 11001 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11002 if (!dev) {
11003 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11004 return -ENOMEM;
34f80b04 11005 }
a2fbb9ea 11006
a2fbb9ea
ET
11007 bp = netdev_priv(dev);
11008 bp->msglevel = debug;
11009
34f80b04 11010 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11011 if (rc < 0) {
11012 free_netdev(dev);
11013 return rc;
11014 }
11015
a2fbb9ea
ET
11016 pci_set_drvdata(pdev, dev);
11017
34f80b04 11018 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11019 if (rc)
11020 goto init_one_exit;
11021
11022 rc = register_netdev(dev);
34f80b04 11023 if (rc) {
693fc0d1 11024 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11025 goto init_one_exit;
11026 }
11027
25047950 11028 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11029 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11030 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11031 bnx2x_get_pcie_width(bp),
11032 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11033 dev->base_addr, bp->pdev->irq);
e174961c 11034 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 11035 return 0;
34f80b04
EG
11036
11037init_one_exit:
11038 if (bp->regview)
11039 iounmap(bp->regview);
11040
11041 if (bp->doorbells)
11042 iounmap(bp->doorbells);
11043
11044 free_netdev(dev);
11045
11046 if (atomic_read(&pdev->enable_cnt) == 1)
11047 pci_release_regions(pdev);
11048
11049 pci_disable_device(pdev);
11050 pci_set_drvdata(pdev, NULL);
11051
11052 return rc;
a2fbb9ea
ET
11053}
11054
11055static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11056{
11057 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11058 struct bnx2x *bp;
11059
11060 if (!dev) {
228241eb
ET
11061 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11062 return;
11063 }
228241eb 11064 bp = netdev_priv(dev);
a2fbb9ea 11065
a2fbb9ea
ET
11066 unregister_netdev(dev);
11067
11068 if (bp->regview)
11069 iounmap(bp->regview);
11070
11071 if (bp->doorbells)
11072 iounmap(bp->doorbells);
11073
11074 free_netdev(dev);
34f80b04
EG
11075
11076 if (atomic_read(&pdev->enable_cnt) == 1)
11077 pci_release_regions(pdev);
11078
a2fbb9ea
ET
11079 pci_disable_device(pdev);
11080 pci_set_drvdata(pdev, NULL);
11081}
11082
11083static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11084{
11085 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11086 struct bnx2x *bp;
11087
34f80b04
EG
11088 if (!dev) {
11089 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11090 return -ENODEV;
11091 }
11092 bp = netdev_priv(dev);
a2fbb9ea 11093
34f80b04 11094 rtnl_lock();
a2fbb9ea 11095
34f80b04 11096 pci_save_state(pdev);
228241eb 11097
34f80b04
EG
11098 if (!netif_running(dev)) {
11099 rtnl_unlock();
11100 return 0;
11101 }
a2fbb9ea
ET
11102
11103 netif_device_detach(dev);
a2fbb9ea 11104
da5a662a 11105 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11106
a2fbb9ea 11107 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11108
34f80b04
EG
11109 rtnl_unlock();
11110
a2fbb9ea
ET
11111 return 0;
11112}
11113
11114static int bnx2x_resume(struct pci_dev *pdev)
11115{
11116 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11117 struct bnx2x *bp;
a2fbb9ea
ET
11118 int rc;
11119
228241eb
ET
11120 if (!dev) {
11121 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11122 return -ENODEV;
11123 }
228241eb 11124 bp = netdev_priv(dev);
a2fbb9ea 11125
34f80b04
EG
11126 rtnl_lock();
11127
228241eb 11128 pci_restore_state(pdev);
34f80b04
EG
11129
11130 if (!netif_running(dev)) {
11131 rtnl_unlock();
11132 return 0;
11133 }
11134
a2fbb9ea
ET
11135 bnx2x_set_power_state(bp, PCI_D0);
11136 netif_device_attach(dev);
11137
da5a662a 11138 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11139
34f80b04
EG
11140 rtnl_unlock();
11141
11142 return rc;
a2fbb9ea
ET
11143}
11144
f8ef6e44
YG
11145static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11146{
11147 int i;
11148
11149 bp->state = BNX2X_STATE_ERROR;
11150
11151 bp->rx_mode = BNX2X_RX_MODE_NONE;
11152
11153 bnx2x_netif_stop(bp, 0);
11154
11155 del_timer_sync(&bp->timer);
11156 bp->stats_state = STATS_STATE_DISABLED;
11157 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11158
11159 /* Release IRQs */
11160 bnx2x_free_irq(bp);
11161
11162 if (CHIP_IS_E1(bp)) {
11163 struct mac_configuration_cmd *config =
11164 bnx2x_sp(bp, mcast_config);
11165
8d9c5f34 11166 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11167 CAM_INVALIDATE(config->config_table[i]);
11168 }
11169
11170 /* Free SKBs, SGEs, TPA pool and driver internals */
11171 bnx2x_free_skbs(bp);
555f6c78 11172 for_each_rx_queue(bp, i)
f8ef6e44 11173 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11174 for_each_rx_queue(bp, i)
7cde1c8b 11175 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11176 bnx2x_free_mem(bp);
11177
11178 bp->state = BNX2X_STATE_CLOSED;
11179
11180 netif_carrier_off(bp->dev);
11181
11182 return 0;
11183}
11184
11185static void bnx2x_eeh_recover(struct bnx2x *bp)
11186{
11187 u32 val;
11188
11189 mutex_init(&bp->port.phy_mutex);
11190
11191 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11192 bp->link_params.shmem_base = bp->common.shmem_base;
11193 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11194
11195 if (!bp->common.shmem_base ||
11196 (bp->common.shmem_base < 0xA0000) ||
11197 (bp->common.shmem_base >= 0xC0000)) {
11198 BNX2X_DEV_INFO("MCP not active\n");
11199 bp->flags |= NO_MCP_FLAG;
11200 return;
11201 }
11202
11203 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11204 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11205 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11206 BNX2X_ERR("BAD MCP validity signature\n");
11207
11208 if (!BP_NOMCP(bp)) {
11209 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11210 & DRV_MSG_SEQ_NUMBER_MASK);
11211 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11212 }
11213}
11214
493adb1f
WX
11215/**
11216 * bnx2x_io_error_detected - called when PCI error is detected
11217 * @pdev: Pointer to PCI device
11218 * @state: The current pci connection state
11219 *
11220 * This function is called after a PCI bus error affecting
11221 * this device has been detected.
11222 */
11223static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11224 pci_channel_state_t state)
11225{
11226 struct net_device *dev = pci_get_drvdata(pdev);
11227 struct bnx2x *bp = netdev_priv(dev);
11228
11229 rtnl_lock();
11230
11231 netif_device_detach(dev);
11232
11233 if (netif_running(dev))
f8ef6e44 11234 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11235
11236 pci_disable_device(pdev);
11237
11238 rtnl_unlock();
11239
11240 /* Request a slot reset */
11241 return PCI_ERS_RESULT_NEED_RESET;
11242}
11243
11244/**
11245 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11246 * @pdev: Pointer to PCI device
11247 *
11248 * Restart the card from scratch, as if from a cold-boot.
11249 */
11250static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11251{
11252 struct net_device *dev = pci_get_drvdata(pdev);
11253 struct bnx2x *bp = netdev_priv(dev);
11254
11255 rtnl_lock();
11256
11257 if (pci_enable_device(pdev)) {
11258 dev_err(&pdev->dev,
11259 "Cannot re-enable PCI device after reset\n");
11260 rtnl_unlock();
11261 return PCI_ERS_RESULT_DISCONNECT;
11262 }
11263
11264 pci_set_master(pdev);
11265 pci_restore_state(pdev);
11266
11267 if (netif_running(dev))
11268 bnx2x_set_power_state(bp, PCI_D0);
11269
11270 rtnl_unlock();
11271
11272 return PCI_ERS_RESULT_RECOVERED;
11273}
11274
11275/**
11276 * bnx2x_io_resume - called when traffic can start flowing again
11277 * @pdev: Pointer to PCI device
11278 *
11279 * This callback is called when the error recovery driver tells us that
11280 * its OK to resume normal operation.
11281 */
11282static void bnx2x_io_resume(struct pci_dev *pdev)
11283{
11284 struct net_device *dev = pci_get_drvdata(pdev);
11285 struct bnx2x *bp = netdev_priv(dev);
11286
11287 rtnl_lock();
11288
f8ef6e44
YG
11289 bnx2x_eeh_recover(bp);
11290
493adb1f 11291 if (netif_running(dev))
f8ef6e44 11292 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11293
11294 netif_device_attach(dev);
11295
11296 rtnl_unlock();
11297}
11298
11299static struct pci_error_handlers bnx2x_err_handler = {
11300 .error_detected = bnx2x_io_error_detected,
11301 .slot_reset = bnx2x_io_slot_reset,
11302 .resume = bnx2x_io_resume,
11303};
11304
a2fbb9ea 11305static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11306 .name = DRV_MODULE_NAME,
11307 .id_table = bnx2x_pci_tbl,
11308 .probe = bnx2x_init_one,
11309 .remove = __devexit_p(bnx2x_remove_one),
11310 .suspend = bnx2x_suspend,
11311 .resume = bnx2x_resume,
11312 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11313};
11314
11315static int __init bnx2x_init(void)
11316{
1cf167f2
EG
11317 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11318 if (bnx2x_wq == NULL) {
11319 printk(KERN_ERR PFX "Cannot create workqueue\n");
11320 return -ENOMEM;
11321 }
11322
a2fbb9ea
ET
11323 return pci_register_driver(&bnx2x_pci_driver);
11324}
11325
11326static void __exit bnx2x_cleanup(void)
11327{
11328 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11329
11330 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11331}
11332
11333module_init(bnx2x_init);
11334module_exit(bnx2x_cleanup);
11335