]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Driver dump
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
8d5726c4
EG
91
92static int mrrs = -1;
93module_param(mrrs, int, 0);
94MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
95
a2fbb9ea 96module_param(debug, int, 0);
19680c48 97MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 98MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 99MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 100
1cf167f2 101static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
102
103enum bnx2x_board_type {
104 BCM57710 = 0,
34f80b04
EG
105 BCM57711 = 1,
106 BCM57711E = 2,
a2fbb9ea
ET
107};
108
34f80b04 109/* indexed by board_type, above */
53a10565 110static struct {
a2fbb9ea
ET
111 char *name;
112} board_info[] __devinitdata = {
34f80b04
EG
113 { "Broadcom NetXtreme II BCM57710 XGb" },
114 { "Broadcom NetXtreme II BCM57711 XGb" },
115 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
116};
117
34f80b04 118
a2fbb9ea
ET
119static const struct pci_device_id bnx2x_pci_tbl[] = {
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
126 { 0 }
127};
128
129MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
130
131/****************************************************************************
132* General service functions
133****************************************************************************/
134
135/* used only at init
136 * locking is done by mcp
137 */
138static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
139{
140 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
141 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
142 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
143 PCICFG_VENDOR_ID_OFFSET);
144}
145
a2fbb9ea
ET
146static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147{
148 u32 val;
149
150 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
151 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
152 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
153 PCICFG_VENDOR_ID_OFFSET);
154
155 return val;
156}
a2fbb9ea
ET
157
158static const u32 dmae_reg_go_c[] = {
159 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
160 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
161 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
162 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
163};
164
165/* copy command into DMAE command memory and set DMAE command go */
166static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 int idx)
168{
169 u32 cmd_offset;
170 int i;
171
172 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
173 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
174 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
175
ad8d3948
EG
176 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
177 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
178 }
179 REG_WR(bp, dmae_reg_go_c[idx], 1);
180}
181
ad8d3948
EG
182void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
183 u32 len32)
a2fbb9ea 184{
ad8d3948 185 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 186 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
187 int cnt = 200;
188
189 if (!bp->dmae_ready) {
190 u32 *data = bnx2x_sp(bp, wb_data[0]);
191
192 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
193 " using indirect\n", dst_addr, len32);
194 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 return;
196 }
197
198 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
199
200 memset(dmae, 0, sizeof(struct dmae_command));
201
202 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
203 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
205#ifdef __BIG_ENDIAN
206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
207#else
208 DMAE_CMD_ENDIANITY_DW_SWAP |
209#endif
34f80b04
EG
210 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
211 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
212 dmae->src_addr_lo = U64_LO(dma_addr);
213 dmae->src_addr_hi = U64_HI(dma_addr);
214 dmae->dst_addr_lo = dst_addr >> 2;
215 dmae->dst_addr_hi = 0;
216 dmae->len = len32;
217 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
218 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 219 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 220
ad8d3948 221 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
222 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
223 "dst_addr [%x:%08x (%08x)]\n"
224 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
225 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
226 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
227 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 228 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
229 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
230 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
231
232 *wb_comp = 0;
233
34f80b04 234 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
235
236 udelay(5);
ad8d3948
EG
237
238 while (*wb_comp != DMAE_COMP_VAL) {
239 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
240
ad8d3948 241 if (!cnt) {
a2fbb9ea
ET
242 BNX2X_ERR("dmae timeout!\n");
243 break;
244 }
ad8d3948 245 cnt--;
12469401
YG
246 /* adjust delay for emulation/FPGA */
247 if (CHIP_REV_IS_SLOW(bp))
248 msleep(100);
249 else
250 udelay(5);
a2fbb9ea 251 }
ad8d3948
EG
252
253 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
254}
255
c18487ee 256void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 257{
ad8d3948 258 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 259 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
260 int cnt = 200;
261
262 if (!bp->dmae_ready) {
263 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 int i;
265
266 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
267 " using indirect\n", src_addr, len32);
268 for (i = 0; i < len32; i++)
269 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 return;
271 }
272
273 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
274
275 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
276 memset(dmae, 0, sizeof(struct dmae_command));
277
278 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
279 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
280 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
281#ifdef __BIG_ENDIAN
282 DMAE_CMD_ENDIANITY_B_DW_SWAP |
283#else
284 DMAE_CMD_ENDIANITY_DW_SWAP |
285#endif
34f80b04
EG
286 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
287 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
288 dmae->src_addr_lo = src_addr >> 2;
289 dmae->src_addr_hi = 0;
290 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
292 dmae->len = len32;
293 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
294 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 295 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 296
ad8d3948 297 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
298 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
299 "dst_addr [%x:%08x (%08x)]\n"
300 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
301 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
302 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
303 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
304
305 *wb_comp = 0;
306
34f80b04 307 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
308
309 udelay(5);
ad8d3948
EG
310
311 while (*wb_comp != DMAE_COMP_VAL) {
312
ad8d3948 313 if (!cnt) {
a2fbb9ea
ET
314 BNX2X_ERR("dmae timeout!\n");
315 break;
316 }
ad8d3948 317 cnt--;
12469401
YG
318 /* adjust delay for emulation/FPGA */
319 if (CHIP_REV_IS_SLOW(bp))
320 msleep(100);
321 else
322 udelay(5);
a2fbb9ea 323 }
ad8d3948 324 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
325 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
326 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
327
328 mutex_unlock(&bp->dmae_mutex);
329}
330
331/* used only for slowpath so not inlined */
332static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333{
334 u32 wb_write[2];
335
336 wb_write[0] = val_hi;
337 wb_write[1] = val_lo;
338 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 339}
a2fbb9ea 340
ad8d3948
EG
341#ifdef USE_WB_RD
342static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343{
344 u32 wb_data[2];
345
346 REG_RD_DMAE(bp, reg, wb_data, 2);
347
348 return HILO_U64(wb_data[0], wb_data[1]);
349}
350#endif
351
a2fbb9ea
ET
352static int bnx2x_mc_assert(struct bnx2x *bp)
353{
a2fbb9ea 354 char last_idx;
34f80b04
EG
355 int i, rc = 0;
356 u32 row0, row1, row2, row3;
357
358 /* XSTORM */
359 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
360 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 if (last_idx)
362 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363
364 /* print the asserts */
365 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366
367 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i));
369 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
371 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
372 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
373 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375
376 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
377 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
378 " 0x%08x 0x%08x 0x%08x\n",
379 i, row3, row2, row1, row0);
380 rc++;
381 } else {
382 break;
383 }
384 }
385
386 /* TSTORM */
387 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
388 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 if (last_idx)
390 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391
392 /* print the asserts */
393 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394
395 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i));
397 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
399 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
400 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
401 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403
404 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
405 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
406 " 0x%08x 0x%08x 0x%08x\n",
407 i, row3, row2, row1, row0);
408 rc++;
409 } else {
410 break;
411 }
412 }
413
414 /* CSTORM */
415 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
416 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 if (last_idx)
418 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419
420 /* print the asserts */
421 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422
423 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i));
425 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
427 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
428 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
429 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431
432 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
433 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
434 " 0x%08x 0x%08x 0x%08x\n",
435 i, row3, row2, row1, row0);
436 rc++;
437 } else {
438 break;
439 }
440 }
441
442 /* USTORM */
443 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
444 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 if (last_idx)
446 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447
448 /* print the asserts */
449 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450
451 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i));
453 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 4);
455 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
456 USTORM_ASSERT_LIST_OFFSET(i) + 8);
457 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459
460 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
461 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
462 " 0x%08x 0x%08x 0x%08x\n",
463 i, row3, row2, row1, row0);
464 rc++;
465 } else {
466 break;
a2fbb9ea
ET
467 }
468 }
34f80b04 469
a2fbb9ea
ET
470 return rc;
471}
c14423fe 472
a2fbb9ea
ET
473static void bnx2x_fw_dump(struct bnx2x *bp)
474{
475 u32 mark, offset;
4781bfad 476 __be32 data[9];
a2fbb9ea
ET
477 int word;
478
479 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
480 mark = ((mark + 0x3) & ~0x3);
481 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
482
483 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
484 for (word = 0; word < 8; word++)
485 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
486 offset + 4*word));
487 data[8] = 0x0;
49d66772 488 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
489 }
490 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
491 for (word = 0; word < 8; word++)
492 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
493 offset + 4*word));
494 data[8] = 0x0;
49d66772 495 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
496 }
497 printk("\n" KERN_ERR PFX "end of fw dump\n");
498}
499
500static void bnx2x_panic_dump(struct bnx2x *bp)
501{
502 int i;
503 u16 j, start, end;
504
66e855f3
YG
505 bp->stats_state = STATS_STATE_DISABLED;
506 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507
a2fbb9ea
ET
508 BNX2X_ERR("begin crash dump -----------------\n");
509
8440d2b6
EG
510 /* Indices */
511 /* Common */
512 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
513 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
514 " spq_prod_idx(%u)\n",
515 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
516 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
517
518 /* Rx */
519 for_each_rx_queue(bp, i) {
a2fbb9ea 520 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 521
8440d2b6 522 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
523 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
524 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 525 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
526 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
527 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
528 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
529 " fp_u_idx(%x) *sb_u_idx(%x)\n",
530 fp->rx_sge_prod, fp->last_max_sge,
531 le16_to_cpu(fp->fp_u_idx),
532 fp->status_blk->u_status_block.status_block_index);
533 }
a2fbb9ea 534
8440d2b6
EG
535 /* Tx */
536 for_each_tx_queue(bp, i) {
537 struct bnx2x_fastpath *fp = &bp->fp[i];
538 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 539
8440d2b6
EG
540 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
541 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
542 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
543 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
544 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
545 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
546 fp->status_blk->c_status_block.status_block_index,
547 hw_prods->packets_prod, hw_prods->bds_prod);
548 }
a2fbb9ea 549
8440d2b6
EG
550 /* Rings */
551 /* Rx */
552 for_each_rx_queue(bp, i) {
553 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
554
555 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
556 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 557 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
558 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
559 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
560
561 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 562 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
563 }
564
3196a88a
EG
565 start = RX_SGE(fp->rx_sge_prod);
566 end = RX_SGE(fp->last_max_sge);
8440d2b6 567 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
568 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
569 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
570
571 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
572 j, rx_sge[1], rx_sge[0], sw_page->page);
573 }
574
a2fbb9ea
ET
575 start = RCQ_BD(fp->rx_comp_cons - 10);
576 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 577 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
578 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
579
580 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
581 j, cqe[0], cqe[1], cqe[2], cqe[3]);
582 }
583 }
584
8440d2b6
EG
585 /* Tx */
586 for_each_tx_queue(bp, i) {
587 struct bnx2x_fastpath *fp = &bp->fp[i];
588
589 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
590 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
591 for (j = start; j != end; j = TX_BD(j + 1)) {
592 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
593
594 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
595 sw_bd->skb, sw_bd->first_bd);
596 }
597
598 start = TX_BD(fp->tx_bd_cons - 10);
599 end = TX_BD(fp->tx_bd_cons + 254);
600 for (j = start; j != end; j = TX_BD(j + 1)) {
601 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
602
603 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
604 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
605 }
606 }
a2fbb9ea 607
34f80b04 608 bnx2x_fw_dump(bp);
a2fbb9ea
ET
609 bnx2x_mc_assert(bp);
610 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
611}
612
615f8fd9 613static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 614{
34f80b04 615 int port = BP_PORT(bp);
a2fbb9ea
ET
616 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
617 u32 val = REG_RD(bp, addr);
618 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 619 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
620
621 if (msix) {
8badd27a
EG
622 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
623 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
624 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
625 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
626 } else if (msi) {
627 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
628 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
630 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
631 } else {
632 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 633 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
634 HC_CONFIG_0_REG_INT_LINE_EN_0 |
635 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 636
8badd27a
EG
637 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
638 val, port, addr);
615f8fd9
ET
639
640 REG_WR(bp, addr, val);
641
a2fbb9ea
ET
642 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
643 }
644
8badd27a
EG
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
646 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
647
648 REG_WR(bp, addr, val);
34f80b04
EG
649
650 if (CHIP_IS_E1H(bp)) {
651 /* init leading/trailing edge */
652 if (IS_E1HMF(bp)) {
8badd27a 653 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 654 if (bp->port.pmf)
4acac6a5
EG
655 /* enable nig and gpio3 attention */
656 val |= 0x1100;
34f80b04
EG
657 } else
658 val = 0xffff;
659
660 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
661 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
662 }
a2fbb9ea
ET
663}
664
615f8fd9 665static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 666{
34f80b04 667 int port = BP_PORT(bp);
a2fbb9ea
ET
668 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
669 u32 val = REG_RD(bp, addr);
670
671 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
672 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
673 HC_CONFIG_0_REG_INT_LINE_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675
676 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
677 val, port, addr);
678
8badd27a
EG
679 /* flush all outstanding writes */
680 mmiowb();
681
a2fbb9ea
ET
682 REG_WR(bp, addr, val);
683 if (REG_RD(bp, addr) != val)
684 BNX2X_ERR("BUG! proper val not read from IGU!\n");
685}
686
f8ef6e44 687static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 688{
a2fbb9ea 689 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 690 int i, offset;
a2fbb9ea 691
34f80b04 692 /* disable interrupt handling */
a2fbb9ea 693 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
694 if (disable_hw)
695 /* prevent the HW from sending interrupts */
696 bnx2x_int_disable(bp);
a2fbb9ea
ET
697
698 /* make sure all ISRs are done */
699 if (msix) {
8badd27a
EG
700 synchronize_irq(bp->msix_table[0].vector);
701 offset = 1;
a2fbb9ea 702 for_each_queue(bp, i)
8badd27a 703 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
704 } else
705 synchronize_irq(bp->pdev->irq);
706
707 /* make sure sp_task is not running */
1cf167f2
EG
708 cancel_delayed_work(&bp->sp_task);
709 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
710}
711
34f80b04 712/* fast path */
a2fbb9ea
ET
713
714/*
34f80b04 715 * General service functions
a2fbb9ea
ET
716 */
717
34f80b04 718static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
719 u8 storm, u16 index, u8 op, u8 update)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
723 struct igu_ack_register igu_ack;
724
725 igu_ack.status_block_index = index;
726 igu_ack.sb_id_and_flags =
34f80b04 727 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
728 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
729 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
730 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
731
5c862848
EG
732 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
733 (*(u32 *)&igu_ack), hc_addr);
734 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
735}
736
737static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
738{
739 struct host_status_block *fpsb = fp->status_blk;
740 u16 rc = 0;
741
742 barrier(); /* status block is written to by the chip */
743 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
744 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
745 rc |= 1;
746 }
747 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
748 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
749 rc |= 2;
750 }
751 return rc;
752}
753
a2fbb9ea
ET
754static u16 bnx2x_ack_int(struct bnx2x *bp)
755{
5c862848
EG
756 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
757 COMMAND_REG_SIMD_MASK);
758 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 759
5c862848
EG
760 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
761 result, hc_addr);
a2fbb9ea 762
a2fbb9ea
ET
763 return result;
764}
765
766
767/*
768 * fast path service functions
769 */
770
237907c1
EG
771static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
772{
773 u16 tx_cons_sb;
774
775 /* Tell compiler that status block fields can change */
776 barrier();
777 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
778 return (fp->tx_pkt_cons != tx_cons_sb);
779}
780
781static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
782{
783 /* Tell compiler that consumer and producer can change */
784 barrier();
785 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
786
237907c1
EG
787}
788
a2fbb9ea
ET
789/* free skb in the packet ring at pos idx
790 * return idx of last bd freed
791 */
792static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
793 u16 idx)
794{
795 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
796 struct eth_tx_bd *tx_bd;
797 struct sk_buff *skb = tx_buf->skb;
34f80b04 798 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
799 int nbd;
800
801 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
802 idx, tx_buf, skb);
803
804 /* unmap first bd */
805 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
806 tx_bd = &fp->tx_desc_ring[bd_idx];
807 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
808 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
809
810 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 811 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
812#ifdef BNX2X_STOP_ON_ERROR
813 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 814 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
815 bnx2x_panic();
816 }
817#endif
818
819 /* Skip a parse bd and the TSO split header bd
820 since they have no mapping */
821 if (nbd)
822 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
823
824 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
825 ETH_TX_BD_FLAGS_TCP_CSUM |
826 ETH_TX_BD_FLAGS_SW_LSO)) {
827 if (--nbd)
828 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
829 tx_bd = &fp->tx_desc_ring[bd_idx];
830 /* is this a TSO split header bd? */
831 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
832 if (--nbd)
833 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
834 }
835 }
836
837 /* now free frags */
838 while (nbd > 0) {
839
840 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
841 tx_bd = &fp->tx_desc_ring[bd_idx];
842 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
843 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
844 if (--nbd)
845 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
846 }
847
848 /* release skb */
53e5e96e 849 WARN_ON(!skb);
a2fbb9ea
ET
850 dev_kfree_skb(skb);
851 tx_buf->first_bd = 0;
852 tx_buf->skb = NULL;
853
34f80b04 854 return new_cons;
a2fbb9ea
ET
855}
856
34f80b04 857static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 858{
34f80b04
EG
859 s16 used;
860 u16 prod;
861 u16 cons;
a2fbb9ea 862
34f80b04 863 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
864 prod = fp->tx_bd_prod;
865 cons = fp->tx_bd_cons;
866
34f80b04
EG
867 /* NUM_TX_RINGS = number of "next-page" entries
868 It will be used as a threshold */
869 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 870
34f80b04 871#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
872 WARN_ON(used < 0);
873 WARN_ON(used > fp->bp->tx_ring_size);
874 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 875#endif
a2fbb9ea 876
34f80b04 877 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
878}
879
880static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
881{
882 struct bnx2x *bp = fp->bp;
555f6c78 883 struct netdev_queue *txq;
a2fbb9ea
ET
884 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
885 int done = 0;
886
887#ifdef BNX2X_STOP_ON_ERROR
888 if (unlikely(bp->panic))
889 return;
890#endif
891
555f6c78 892 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
893 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
894 sw_cons = fp->tx_pkt_cons;
895
896 while (sw_cons != hw_cons) {
897 u16 pkt_cons;
898
899 pkt_cons = TX_BD(sw_cons);
900
901 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
902
34f80b04 903 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
904 hw_cons, sw_cons, pkt_cons);
905
34f80b04 906/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
907 rmb();
908 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
909 }
910*/
911 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
912 sw_cons++;
913 done++;
914
915 if (done == work)
916 break;
917 }
918
919 fp->tx_pkt_cons = sw_cons;
920 fp->tx_bd_cons = bd_cons;
921
555f6c78
EG
922 /* Need to make the tx_bd_cons update visible to start_xmit()
923 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
924 * memory barrier, there is a small possibility that start_xmit()
925 * will miss it and cause the queue to be stopped forever.
926 */
927 smp_mb();
928
929 /* TBD need a thresh? */
555f6c78 930 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 931
555f6c78 932 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 933
555f6c78 934 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 935 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 936 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 937 netif_tx_wake_queue(txq);
a2fbb9ea 938
555f6c78 939 __netif_tx_unlock(txq);
a2fbb9ea
ET
940 }
941}
942
3196a88a 943
a2fbb9ea
ET
944static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
945 union eth_rx_cqe *rr_cqe)
946{
947 struct bnx2x *bp = fp->bp;
948 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
949 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
950
34f80b04 951 DP(BNX2X_MSG_SP,
a2fbb9ea 952 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 953 fp->index, cid, command, bp->state,
34f80b04 954 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
955
956 bp->spq_left++;
957
0626b899 958 if (fp->index) {
a2fbb9ea
ET
959 switch (command | fp->state) {
960 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
961 BNX2X_FP_STATE_OPENING):
962 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
963 cid);
964 fp->state = BNX2X_FP_STATE_OPEN;
965 break;
966
967 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
968 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
969 cid);
970 fp->state = BNX2X_FP_STATE_HALTED;
971 break;
972
973 default:
34f80b04
EG
974 BNX2X_ERR("unexpected MC reply (%d) "
975 "fp->state is %x\n", command, fp->state);
976 break;
a2fbb9ea 977 }
34f80b04 978 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
979 return;
980 }
c14423fe 981
a2fbb9ea
ET
982 switch (command | bp->state) {
983 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
984 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
985 bp->state = BNX2X_STATE_OPEN;
986 break;
987
988 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
989 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
990 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
991 fp->state = BNX2X_FP_STATE_HALTED;
992 break;
993
a2fbb9ea 994 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 995 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 996 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
997 break;
998
3196a88a 999
a2fbb9ea 1000 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1001 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1002 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1003 bp->set_mac_pending = 0;
a2fbb9ea
ET
1004 break;
1005
49d66772 1006 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1007 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1008 break;
1009
a2fbb9ea 1010 default:
34f80b04 1011 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1012 command, bp->state);
34f80b04 1013 break;
a2fbb9ea 1014 }
34f80b04 1015 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1016}
1017
7a9b2557
VZ
1018static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1019 struct bnx2x_fastpath *fp, u16 index)
1020{
1021 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1022 struct page *page = sw_buf->page;
1023 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1024
1025 /* Skip "next page" elements */
1026 if (!page)
1027 return;
1028
1029 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1030 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1031 __free_pages(page, PAGES_PER_SGE_SHIFT);
1032
1033 sw_buf->page = NULL;
1034 sge->addr_hi = 0;
1035 sge->addr_lo = 0;
1036}
1037
1038static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1039 struct bnx2x_fastpath *fp, int last)
1040{
1041 int i;
1042
1043 for (i = 0; i < last; i++)
1044 bnx2x_free_rx_sge(bp, fp, i);
1045}
1046
1047static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1048 struct bnx2x_fastpath *fp, u16 index)
1049{
1050 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1051 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1052 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1053 dma_addr_t mapping;
1054
1055 if (unlikely(page == NULL))
1056 return -ENOMEM;
1057
4f40f2cb 1058 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1059 PCI_DMA_FROMDEVICE);
8d8bb39b 1060 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1061 __free_pages(page, PAGES_PER_SGE_SHIFT);
1062 return -ENOMEM;
1063 }
1064
1065 sw_buf->page = page;
1066 pci_unmap_addr_set(sw_buf, mapping, mapping);
1067
1068 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1069 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1070
1071 return 0;
1072}
1073
a2fbb9ea
ET
1074static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1075 struct bnx2x_fastpath *fp, u16 index)
1076{
1077 struct sk_buff *skb;
1078 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1079 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1080 dma_addr_t mapping;
1081
1082 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1083 if (unlikely(skb == NULL))
1084 return -ENOMEM;
1085
437cf2f1 1086 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1087 PCI_DMA_FROMDEVICE);
8d8bb39b 1088 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1089 dev_kfree_skb(skb);
1090 return -ENOMEM;
1091 }
1092
1093 rx_buf->skb = skb;
1094 pci_unmap_addr_set(rx_buf, mapping, mapping);
1095
1096 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1097 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1098
1099 return 0;
1100}
1101
1102/* note that we are not allocating a new skb,
1103 * we are just moving one from cons to prod
1104 * we are not creating a new mapping,
1105 * so there is no need to check for dma_mapping_error().
1106 */
1107static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1108 struct sk_buff *skb, u16 cons, u16 prod)
1109{
1110 struct bnx2x *bp = fp->bp;
1111 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1112 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1113 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1114 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1115
1116 pci_dma_sync_single_for_device(bp->pdev,
1117 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1118 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1119
1120 prod_rx_buf->skb = cons_rx_buf->skb;
1121 pci_unmap_addr_set(prod_rx_buf, mapping,
1122 pci_unmap_addr(cons_rx_buf, mapping));
1123 *prod_bd = *cons_bd;
1124}
1125
7a9b2557
VZ
1126static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1127 u16 idx)
1128{
1129 u16 last_max = fp->last_max_sge;
1130
1131 if (SUB_S16(idx, last_max) > 0)
1132 fp->last_max_sge = idx;
1133}
1134
1135static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1136{
1137 int i, j;
1138
1139 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1140 int idx = RX_SGE_CNT * i - 1;
1141
1142 for (j = 0; j < 2; j++) {
1143 SGE_MASK_CLEAR_BIT(fp, idx);
1144 idx--;
1145 }
1146 }
1147}
1148
1149static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1150 struct eth_fast_path_rx_cqe *fp_cqe)
1151{
1152 struct bnx2x *bp = fp->bp;
4f40f2cb 1153 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1154 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1155 SGE_PAGE_SHIFT;
7a9b2557
VZ
1156 u16 last_max, last_elem, first_elem;
1157 u16 delta = 0;
1158 u16 i;
1159
1160 if (!sge_len)
1161 return;
1162
1163 /* First mark all used pages */
1164 for (i = 0; i < sge_len; i++)
1165 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1166
1167 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1168 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1169
1170 /* Here we assume that the last SGE index is the biggest */
1171 prefetch((void *)(fp->sge_mask));
1172 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1173
1174 last_max = RX_SGE(fp->last_max_sge);
1175 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1176 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1177
1178 /* If ring is not full */
1179 if (last_elem + 1 != first_elem)
1180 last_elem++;
1181
1182 /* Now update the prod */
1183 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1184 if (likely(fp->sge_mask[i]))
1185 break;
1186
1187 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1188 delta += RX_SGE_MASK_ELEM_SZ;
1189 }
1190
1191 if (delta > 0) {
1192 fp->rx_sge_prod += delta;
1193 /* clear page-end entries */
1194 bnx2x_clear_sge_mask_next_elems(fp);
1195 }
1196
1197 DP(NETIF_MSG_RX_STATUS,
1198 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1199 fp->last_max_sge, fp->rx_sge_prod);
1200}
1201
1202static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1203{
1204 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1205 memset(fp->sge_mask, 0xff,
1206 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1207
33471629
EG
1208 /* Clear the two last indices in the page to 1:
1209 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1210 hence will never be indicated and should be removed from
1211 the calculations. */
1212 bnx2x_clear_sge_mask_next_elems(fp);
1213}
1214
1215static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1216 struct sk_buff *skb, u16 cons, u16 prod)
1217{
1218 struct bnx2x *bp = fp->bp;
1219 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1220 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1221 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1222 dma_addr_t mapping;
1223
1224 /* move empty skb from pool to prod and map it */
1225 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1226 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1227 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1228 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1229
1230 /* move partial skb from cons to pool (don't unmap yet) */
1231 fp->tpa_pool[queue] = *cons_rx_buf;
1232
1233 /* mark bin state as start - print error if current state != stop */
1234 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1235 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1236
1237 fp->tpa_state[queue] = BNX2X_TPA_START;
1238
1239 /* point prod_bd to new skb */
1240 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1241 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1242
1243#ifdef BNX2X_STOP_ON_ERROR
1244 fp->tpa_queue_used |= (1 << queue);
1245#ifdef __powerpc64__
1246 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1247#else
1248 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1249#endif
1250 fp->tpa_queue_used);
1251#endif
1252}
1253
1254static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1255 struct sk_buff *skb,
1256 struct eth_fast_path_rx_cqe *fp_cqe,
1257 u16 cqe_idx)
1258{
1259 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1260 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1261 u32 i, frag_len, frag_size, pages;
1262 int err;
1263 int j;
1264
1265 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1266 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1267
1268 /* This is needed in order to enable forwarding support */
1269 if (frag_size)
4f40f2cb 1270 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1271 max(frag_size, (u32)len_on_bd));
1272
1273#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1274 if (pages >
1275 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1276 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1277 pages, cqe_idx);
1278 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1279 fp_cqe->pkt_len, len_on_bd);
1280 bnx2x_panic();
1281 return -EINVAL;
1282 }
1283#endif
1284
1285 /* Run through the SGL and compose the fragmented skb */
1286 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1287 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1288
1289 /* FW gives the indices of the SGE as if the ring is an array
1290 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1291 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1292 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1293 old_rx_pg = *rx_pg;
1294
1295 /* If we fail to allocate a substitute page, we simply stop
1296 where we are and drop the whole packet */
1297 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1298 if (unlikely(err)) {
de832a55 1299 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1300 return err;
1301 }
1302
1303 /* Unmap the page as we r going to pass it to the stack */
1304 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1305 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1306
1307 /* Add one frag and update the appropriate fields in the skb */
1308 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1309
1310 skb->data_len += frag_len;
1311 skb->truesize += frag_len;
1312 skb->len += frag_len;
1313
1314 frag_size -= frag_len;
1315 }
1316
1317 return 0;
1318}
1319
1320static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1321 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1322 u16 cqe_idx)
1323{
1324 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1325 struct sk_buff *skb = rx_buf->skb;
1326 /* alloc new skb */
1327 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1328
1329 /* Unmap skb in the pool anyway, as we are going to change
1330 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1331 fails. */
1332 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1333 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1334
7a9b2557 1335 if (likely(new_skb)) {
66e855f3
YG
1336 /* fix ip xsum and give it to the stack */
1337 /* (no need to map the new skb) */
0c6671b0
EG
1338#ifdef BCM_VLAN
1339 int is_vlan_cqe =
1340 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1341 PARSING_FLAGS_VLAN);
1342 int is_not_hwaccel_vlan_cqe =
1343 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1344#endif
7a9b2557
VZ
1345
1346 prefetch(skb);
1347 prefetch(((char *)(skb)) + 128);
1348
7a9b2557
VZ
1349#ifdef BNX2X_STOP_ON_ERROR
1350 if (pad + len > bp->rx_buf_size) {
1351 BNX2X_ERR("skb_put is about to fail... "
1352 "pad %d len %d rx_buf_size %d\n",
1353 pad, len, bp->rx_buf_size);
1354 bnx2x_panic();
1355 return;
1356 }
1357#endif
1358
1359 skb_reserve(skb, pad);
1360 skb_put(skb, len);
1361
1362 skb->protocol = eth_type_trans(skb, bp->dev);
1363 skb->ip_summed = CHECKSUM_UNNECESSARY;
1364
1365 {
1366 struct iphdr *iph;
1367
1368 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1369#ifdef BCM_VLAN
1370 /* If there is no Rx VLAN offloading -
1371 take VLAN tag into an account */
1372 if (unlikely(is_not_hwaccel_vlan_cqe))
1373 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1374#endif
7a9b2557
VZ
1375 iph->check = 0;
1376 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1377 }
1378
1379 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1380 &cqe->fast_path_cqe, cqe_idx)) {
1381#ifdef BCM_VLAN
0c6671b0
EG
1382 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1383 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1384 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1385 le16_to_cpu(cqe->fast_path_cqe.
1386 vlan_tag));
1387 else
1388#endif
1389 netif_receive_skb(skb);
1390 } else {
1391 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1392 " - dropping packet!\n");
1393 dev_kfree_skb(skb);
1394 }
1395
7a9b2557
VZ
1396
1397 /* put new skb in bin */
1398 fp->tpa_pool[queue].skb = new_skb;
1399
1400 } else {
66e855f3 1401 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1402 DP(NETIF_MSG_RX_STATUS,
1403 "Failed to allocate new skb - dropping packet!\n");
de832a55 1404 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1405 }
1406
1407 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1408}
1409
1410static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1411 struct bnx2x_fastpath *fp,
1412 u16 bd_prod, u16 rx_comp_prod,
1413 u16 rx_sge_prod)
1414{
8d9c5f34 1415 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1416 int i;
1417
1418 /* Update producers */
1419 rx_prods.bd_prod = bd_prod;
1420 rx_prods.cqe_prod = rx_comp_prod;
1421 rx_prods.sge_prod = rx_sge_prod;
1422
58f4c4cf
EG
1423 /*
1424 * Make sure that the BD and SGE data is updated before updating the
1425 * producers since FW might read the BD/SGE right after the producer
1426 * is updated.
1427 * This is only applicable for weak-ordered memory model archs such
1428 * as IA-64. The following barrier is also mandatory since FW will
1429 * assumes BDs must have buffers.
1430 */
1431 wmb();
1432
8d9c5f34
EG
1433 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1434 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1435 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1436 ((u32 *)&rx_prods)[i]);
1437
58f4c4cf
EG
1438 mmiowb(); /* keep prod updates ordered */
1439
7a9b2557 1440 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1441 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1442 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1443}
1444
a2fbb9ea
ET
1445static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1446{
1447 struct bnx2x *bp = fp->bp;
34f80b04 1448 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1449 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1450 int rx_pkt = 0;
1451
1452#ifdef BNX2X_STOP_ON_ERROR
1453 if (unlikely(bp->panic))
1454 return 0;
1455#endif
1456
34f80b04
EG
1457 /* CQ "next element" is of the size of the regular element,
1458 that's why it's ok here */
a2fbb9ea
ET
1459 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1460 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1461 hw_comp_cons++;
1462
1463 bd_cons = fp->rx_bd_cons;
1464 bd_prod = fp->rx_bd_prod;
34f80b04 1465 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1466 sw_comp_cons = fp->rx_comp_cons;
1467 sw_comp_prod = fp->rx_comp_prod;
1468
1469 /* Memory barrier necessary as speculative reads of the rx
1470 * buffer can be ahead of the index in the status block
1471 */
1472 rmb();
1473
1474 DP(NETIF_MSG_RX_STATUS,
1475 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1476 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1477
1478 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1479 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1480 struct sk_buff *skb;
1481 union eth_rx_cqe *cqe;
34f80b04
EG
1482 u8 cqe_fp_flags;
1483 u16 len, pad;
a2fbb9ea
ET
1484
1485 comp_ring_cons = RCQ_BD(sw_comp_cons);
1486 bd_prod = RX_BD(bd_prod);
1487 bd_cons = RX_BD(bd_cons);
1488
1489 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1490 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1491
a2fbb9ea 1492 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1493 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1494 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1495 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1496 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1497 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1498
1499 /* is this a slowpath msg? */
34f80b04 1500 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1501 bnx2x_sp_event(fp, cqe);
1502 goto next_cqe;
1503
1504 /* this is an rx packet */
1505 } else {
1506 rx_buf = &fp->rx_buf_ring[bd_cons];
1507 skb = rx_buf->skb;
a2fbb9ea
ET
1508 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1509 pad = cqe->fast_path_cqe.placement_offset;
1510
7a9b2557
VZ
1511 /* If CQE is marked both TPA_START and TPA_END
1512 it is a non-TPA CQE */
1513 if ((!fp->disable_tpa) &&
1514 (TPA_TYPE(cqe_fp_flags) !=
1515 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1516 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1517
1518 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1519 DP(NETIF_MSG_RX_STATUS,
1520 "calling tpa_start on queue %d\n",
1521 queue);
1522
1523 bnx2x_tpa_start(fp, queue, skb,
1524 bd_cons, bd_prod);
1525 goto next_rx;
1526 }
1527
1528 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1529 DP(NETIF_MSG_RX_STATUS,
1530 "calling tpa_stop on queue %d\n",
1531 queue);
1532
1533 if (!BNX2X_RX_SUM_FIX(cqe))
1534 BNX2X_ERR("STOP on none TCP "
1535 "data\n");
1536
1537 /* This is a size of the linear data
1538 on this skb */
1539 len = le16_to_cpu(cqe->fast_path_cqe.
1540 len_on_bd);
1541 bnx2x_tpa_stop(bp, fp, queue, pad,
1542 len, cqe, comp_ring_cons);
1543#ifdef BNX2X_STOP_ON_ERROR
1544 if (bp->panic)
1545 return -EINVAL;
1546#endif
1547
1548 bnx2x_update_sge_prod(fp,
1549 &cqe->fast_path_cqe);
1550 goto next_cqe;
1551 }
1552 }
1553
a2fbb9ea
ET
1554 pci_dma_sync_single_for_device(bp->pdev,
1555 pci_unmap_addr(rx_buf, mapping),
1556 pad + RX_COPY_THRESH,
1557 PCI_DMA_FROMDEVICE);
1558 prefetch(skb);
1559 prefetch(((char *)(skb)) + 128);
1560
1561 /* is this an error packet? */
34f80b04 1562 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1563 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1564 "ERROR flags %x rx packet %u\n",
1565 cqe_fp_flags, sw_comp_cons);
de832a55 1566 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1567 goto reuse_rx;
1568 }
1569
1570 /* Since we don't have a jumbo ring
1571 * copy small packets if mtu > 1500
1572 */
1573 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1574 (len <= RX_COPY_THRESH)) {
1575 struct sk_buff *new_skb;
1576
1577 new_skb = netdev_alloc_skb(bp->dev,
1578 len + pad);
1579 if (new_skb == NULL) {
1580 DP(NETIF_MSG_RX_ERR,
34f80b04 1581 "ERROR packet dropped "
a2fbb9ea 1582 "because of alloc failure\n");
de832a55 1583 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1584 goto reuse_rx;
1585 }
1586
1587 /* aligned copy */
1588 skb_copy_from_linear_data_offset(skb, pad,
1589 new_skb->data + pad, len);
1590 skb_reserve(new_skb, pad);
1591 skb_put(new_skb, len);
1592
1593 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1594
1595 skb = new_skb;
1596
1597 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1598 pci_unmap_single(bp->pdev,
1599 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1600 bp->rx_buf_size,
a2fbb9ea
ET
1601 PCI_DMA_FROMDEVICE);
1602 skb_reserve(skb, pad);
1603 skb_put(skb, len);
1604
1605 } else {
1606 DP(NETIF_MSG_RX_ERR,
34f80b04 1607 "ERROR packet dropped because "
a2fbb9ea 1608 "of alloc failure\n");
de832a55 1609 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1610reuse_rx:
1611 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612 goto next_rx;
1613 }
1614
1615 skb->protocol = eth_type_trans(skb, bp->dev);
1616
1617 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1618 if (bp->rx_csum) {
1adcd8be
EG
1619 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1620 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1621 else
de832a55 1622 fp->eth_q_stats.hw_csum_err++;
66e855f3 1623 }
a2fbb9ea
ET
1624 }
1625
748e5439 1626 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1627#ifdef BCM_VLAN
0c6671b0 1628 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1629 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1630 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1631 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1632 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1633 else
1634#endif
34f80b04 1635 netif_receive_skb(skb);
a2fbb9ea 1636
a2fbb9ea
ET
1637
1638next_rx:
1639 rx_buf->skb = NULL;
1640
1641 bd_cons = NEXT_RX_IDX(bd_cons);
1642 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1643 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1644 rx_pkt++;
a2fbb9ea
ET
1645next_cqe:
1646 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1647 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1648
34f80b04 1649 if (rx_pkt == budget)
a2fbb9ea
ET
1650 break;
1651 } /* while */
1652
1653 fp->rx_bd_cons = bd_cons;
34f80b04 1654 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1655 fp->rx_comp_cons = sw_comp_cons;
1656 fp->rx_comp_prod = sw_comp_prod;
1657
7a9b2557
VZ
1658 /* Update producers */
1659 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1660 fp->rx_sge_prod);
a2fbb9ea
ET
1661
1662 fp->rx_pkt += rx_pkt;
1663 fp->rx_calls++;
1664
1665 return rx_pkt;
1666}
1667
1668static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1669{
1670 struct bnx2x_fastpath *fp = fp_cookie;
1671 struct bnx2x *bp = fp->bp;
0626b899 1672 int index = fp->index;
a2fbb9ea 1673
da5a662a
VZ
1674 /* Return here if interrupt is disabled */
1675 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1676 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1677 return IRQ_HANDLED;
1678 }
1679
34f80b04 1680 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1681 index, fp->sb_id);
1682 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1683
1684#ifdef BNX2X_STOP_ON_ERROR
1685 if (unlikely(bp->panic))
1686 return IRQ_HANDLED;
1687#endif
1688
1689 prefetch(fp->rx_cons_sb);
1690 prefetch(fp->tx_cons_sb);
1691 prefetch(&fp->status_blk->c_status_block.status_block_index);
1692 prefetch(&fp->status_blk->u_status_block.status_block_index);
1693
288379f0 1694 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1695
a2fbb9ea
ET
1696 return IRQ_HANDLED;
1697}
1698
1699static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1700{
555f6c78 1701 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1702 u16 status = bnx2x_ack_int(bp);
34f80b04 1703 u16 mask;
a2fbb9ea 1704
34f80b04 1705 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1706 if (unlikely(status == 0)) {
1707 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1708 return IRQ_NONE;
1709 }
34f80b04 1710 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1711
34f80b04 1712 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1713 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1714 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1715 return IRQ_HANDLED;
1716 }
1717
3196a88a
EG
1718#ifdef BNX2X_STOP_ON_ERROR
1719 if (unlikely(bp->panic))
1720 return IRQ_HANDLED;
1721#endif
1722
34f80b04
EG
1723 mask = 0x2 << bp->fp[0].sb_id;
1724 if (status & mask) {
a2fbb9ea
ET
1725 struct bnx2x_fastpath *fp = &bp->fp[0];
1726
1727 prefetch(fp->rx_cons_sb);
1728 prefetch(fp->tx_cons_sb);
1729 prefetch(&fp->status_blk->c_status_block.status_block_index);
1730 prefetch(&fp->status_blk->u_status_block.status_block_index);
1731
288379f0 1732 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1733
34f80b04 1734 status &= ~mask;
a2fbb9ea
ET
1735 }
1736
a2fbb9ea 1737
34f80b04 1738 if (unlikely(status & 0x1)) {
1cf167f2 1739 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1740
1741 status &= ~0x1;
1742 if (!status)
1743 return IRQ_HANDLED;
1744 }
1745
34f80b04
EG
1746 if (status)
1747 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1748 status);
a2fbb9ea 1749
c18487ee 1750 return IRQ_HANDLED;
a2fbb9ea
ET
1751}
1752
c18487ee 1753/* end of fast path */
a2fbb9ea 1754
bb2a0f7a 1755static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1756
c18487ee
YR
1757/* Link */
1758
1759/*
1760 * General service functions
1761 */
a2fbb9ea 1762
4a37fb66 1763static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1764{
1765 u32 lock_status;
1766 u32 resource_bit = (1 << resource);
4a37fb66
YG
1767 int func = BP_FUNC(bp);
1768 u32 hw_lock_control_reg;
c18487ee 1769 int cnt;
a2fbb9ea 1770
c18487ee
YR
1771 /* Validating that the resource is within range */
1772 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1773 DP(NETIF_MSG_HW,
1774 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1775 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1776 return -EINVAL;
1777 }
a2fbb9ea 1778
4a37fb66
YG
1779 if (func <= 5) {
1780 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1781 } else {
1782 hw_lock_control_reg =
1783 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1784 }
1785
c18487ee 1786 /* Validating that the resource is not already taken */
4a37fb66 1787 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1788 if (lock_status & resource_bit) {
1789 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1790 lock_status, resource_bit);
1791 return -EEXIST;
1792 }
a2fbb9ea 1793
46230476
EG
1794 /* Try for 5 second every 5ms */
1795 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1796 /* Try to acquire the lock */
4a37fb66
YG
1797 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1798 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1799 if (lock_status & resource_bit)
1800 return 0;
a2fbb9ea 1801
c18487ee 1802 msleep(5);
a2fbb9ea 1803 }
c18487ee
YR
1804 DP(NETIF_MSG_HW, "Timeout\n");
1805 return -EAGAIN;
1806}
a2fbb9ea 1807
4a37fb66 1808static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1809{
1810 u32 lock_status;
1811 u32 resource_bit = (1 << resource);
4a37fb66
YG
1812 int func = BP_FUNC(bp);
1813 u32 hw_lock_control_reg;
a2fbb9ea 1814
c18487ee
YR
1815 /* Validating that the resource is within range */
1816 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1817 DP(NETIF_MSG_HW,
1818 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1819 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1820 return -EINVAL;
1821 }
1822
4a37fb66
YG
1823 if (func <= 5) {
1824 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1825 } else {
1826 hw_lock_control_reg =
1827 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1828 }
1829
c18487ee 1830 /* Validating that the resource is currently taken */
4a37fb66 1831 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1832 if (!(lock_status & resource_bit)) {
1833 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1834 lock_status, resource_bit);
1835 return -EFAULT;
a2fbb9ea
ET
1836 }
1837
4a37fb66 1838 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1839 return 0;
1840}
1841
1842/* HW Lock for shared dual port PHYs */
4a37fb66 1843static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1844{
34f80b04 1845 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1846
46c6a674
EG
1847 if (bp->port.need_hw_lock)
1848 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1849}
a2fbb9ea 1850
4a37fb66 1851static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1852{
46c6a674
EG
1853 if (bp->port.need_hw_lock)
1854 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1855
34f80b04 1856 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1857}
a2fbb9ea 1858
4acac6a5
EG
1859int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1860{
1861 /* The GPIO should be swapped if swap register is set and active */
1862 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1863 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1864 int gpio_shift = gpio_num +
1865 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1866 u32 gpio_mask = (1 << gpio_shift);
1867 u32 gpio_reg;
1868 int value;
1869
1870 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1871 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1872 return -EINVAL;
1873 }
1874
1875 /* read GPIO value */
1876 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1877
1878 /* get the requested pin value */
1879 if ((gpio_reg & gpio_mask) == gpio_mask)
1880 value = 1;
1881 else
1882 value = 0;
1883
1884 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1885
1886 return value;
1887}
1888
17de50b7 1889int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1890{
1891 /* The GPIO should be swapped if swap register is set and active */
1892 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1893 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1894 int gpio_shift = gpio_num +
1895 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1896 u32 gpio_mask = (1 << gpio_shift);
1897 u32 gpio_reg;
a2fbb9ea 1898
c18487ee
YR
1899 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1900 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1901 return -EINVAL;
1902 }
a2fbb9ea 1903
4a37fb66 1904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1905 /* read GPIO and mask except the float bits */
1906 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1907
c18487ee
YR
1908 switch (mode) {
1909 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1910 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1911 gpio_num, gpio_shift);
1912 /* clear FLOAT and set CLR */
1913 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1914 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1915 break;
a2fbb9ea 1916
c18487ee
YR
1917 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1918 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1919 gpio_num, gpio_shift);
1920 /* clear FLOAT and set SET */
1921 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1922 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1923 break;
a2fbb9ea 1924
17de50b7 1925 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1926 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1927 gpio_num, gpio_shift);
1928 /* set FLOAT */
1929 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1930 break;
a2fbb9ea 1931
c18487ee
YR
1932 default:
1933 break;
a2fbb9ea
ET
1934 }
1935
c18487ee 1936 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1937 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1938
c18487ee 1939 return 0;
a2fbb9ea
ET
1940}
1941
4acac6a5
EG
1942int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1943{
1944 /* The GPIO should be swapped if swap register is set and active */
1945 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1946 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1947 int gpio_shift = gpio_num +
1948 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1949 u32 gpio_mask = (1 << gpio_shift);
1950 u32 gpio_reg;
1951
1952 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1953 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1954 return -EINVAL;
1955 }
1956
1957 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1958 /* read GPIO int */
1959 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1960
1961 switch (mode) {
1962 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1963 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1964 "output low\n", gpio_num, gpio_shift);
1965 /* clear SET and set CLR */
1966 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1967 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1968 break;
1969
1970 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1971 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1972 "output high\n", gpio_num, gpio_shift);
1973 /* clear CLR and set SET */
1974 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1976 break;
1977
1978 default:
1979 break;
1980 }
1981
1982 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1983 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1984
1985 return 0;
1986}
1987
c18487ee 1988static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1989{
c18487ee
YR
1990 u32 spio_mask = (1 << spio_num);
1991 u32 spio_reg;
a2fbb9ea 1992
c18487ee
YR
1993 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1994 (spio_num > MISC_REGISTERS_SPIO_7)) {
1995 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1996 return -EINVAL;
a2fbb9ea
ET
1997 }
1998
4a37fb66 1999 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2000 /* read SPIO and mask except the float bits */
2001 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2002
c18487ee 2003 switch (mode) {
6378c025 2004 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2005 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2006 /* clear FLOAT and set CLR */
2007 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2008 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2009 break;
a2fbb9ea 2010
6378c025 2011 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2012 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2013 /* clear FLOAT and set SET */
2014 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2015 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2016 break;
a2fbb9ea 2017
c18487ee
YR
2018 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2019 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2020 /* set FLOAT */
2021 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2022 break;
a2fbb9ea 2023
c18487ee
YR
2024 default:
2025 break;
a2fbb9ea
ET
2026 }
2027
c18487ee 2028 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2029 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2030
a2fbb9ea
ET
2031 return 0;
2032}
2033
c18487ee 2034static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2035{
ad33ea3a
EG
2036 switch (bp->link_vars.ieee_fc &
2037 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2038 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2039 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2040 ADVERTISED_Pause);
2041 break;
2042 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2043 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2044 ADVERTISED_Pause);
2045 break;
2046 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2047 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
2048 break;
2049 default:
34f80b04 2050 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2051 ADVERTISED_Pause);
2052 break;
2053 }
2054}
f1410647 2055
c18487ee
YR
2056static void bnx2x_link_report(struct bnx2x *bp)
2057{
2058 if (bp->link_vars.link_up) {
2059 if (bp->state == BNX2X_STATE_OPEN)
2060 netif_carrier_on(bp->dev);
2061 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2062
c18487ee 2063 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2064
c18487ee
YR
2065 if (bp->link_vars.duplex == DUPLEX_FULL)
2066 printk("full duplex");
2067 else
2068 printk("half duplex");
f1410647 2069
c0700f90
DM
2070 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2071 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2072 printk(", receive ");
c0700f90 2073 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2074 printk("& transmit ");
2075 } else {
2076 printk(", transmit ");
2077 }
2078 printk("flow control ON");
2079 }
2080 printk("\n");
f1410647 2081
c18487ee
YR
2082 } else { /* link_down */
2083 netif_carrier_off(bp->dev);
2084 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2085 }
c18487ee
YR
2086}
2087
b5bf9068 2088static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2089{
19680c48
EG
2090 if (!BP_NOMCP(bp)) {
2091 u8 rc;
a2fbb9ea 2092
19680c48 2093 /* Initialize link parameters structure variables */
8c99e7b0
YR
2094 /* It is recommended to turn off RX FC for jumbo frames
2095 for better performance */
2096 if (IS_E1HMF(bp))
c0700f90 2097 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2098 else if (bp->dev->mtu > 5000)
c0700f90 2099 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2100 else
c0700f90 2101 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2102
4a37fb66 2103 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2104
2105 if (load_mode == LOAD_DIAG)
2106 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2107
19680c48 2108 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2109
4a37fb66 2110 bnx2x_release_phy_lock(bp);
a2fbb9ea 2111
3c96c68b
EG
2112 bnx2x_calc_fc_adv(bp);
2113
b5bf9068
EG
2114 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2115 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2116 bnx2x_link_report(bp);
b5bf9068 2117 }
34f80b04 2118
19680c48
EG
2119 return rc;
2120 }
2121 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2122 return -EINVAL;
a2fbb9ea
ET
2123}
2124
c18487ee 2125static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2126{
19680c48 2127 if (!BP_NOMCP(bp)) {
4a37fb66 2128 bnx2x_acquire_phy_lock(bp);
19680c48 2129 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2130 bnx2x_release_phy_lock(bp);
a2fbb9ea 2131
19680c48
EG
2132 bnx2x_calc_fc_adv(bp);
2133 } else
2134 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2135}
a2fbb9ea 2136
c18487ee
YR
2137static void bnx2x__link_reset(struct bnx2x *bp)
2138{
19680c48 2139 if (!BP_NOMCP(bp)) {
4a37fb66 2140 bnx2x_acquire_phy_lock(bp);
589abe3a 2141 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2142 bnx2x_release_phy_lock(bp);
19680c48
EG
2143 } else
2144 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2145}
a2fbb9ea 2146
c18487ee
YR
2147static u8 bnx2x_link_test(struct bnx2x *bp)
2148{
2149 u8 rc;
a2fbb9ea 2150
4a37fb66 2151 bnx2x_acquire_phy_lock(bp);
c18487ee 2152 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2153 bnx2x_release_phy_lock(bp);
a2fbb9ea 2154
c18487ee
YR
2155 return rc;
2156}
a2fbb9ea 2157
8a1c38d1 2158static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2159{
8a1c38d1
EG
2160 u32 r_param = bp->link_vars.line_speed / 8;
2161 u32 fair_periodic_timeout_usec;
2162 u32 t_fair;
34f80b04 2163
8a1c38d1
EG
2164 memset(&(bp->cmng.rs_vars), 0,
2165 sizeof(struct rate_shaping_vars_per_port));
2166 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2167
8a1c38d1
EG
2168 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2169 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2170
8a1c38d1
EG
2171 /* this is the threshold below which no timer arming will occur
2172 1.25 coefficient is for the threshold to be a little bigger
2173 than the real time, to compensate for timer in-accuracy */
2174 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2175 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2176
8a1c38d1
EG
2177 /* resolution of fairness timer */
2178 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2179 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2180 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2181
8a1c38d1
EG
2182 /* this is the threshold below which we won't arm the timer anymore */
2183 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2184
8a1c38d1
EG
2185 /* we multiply by 1e3/8 to get bytes/msec.
2186 We don't want the credits to pass a credit
2187 of the t_fair*FAIR_MEM (algorithm resolution) */
2188 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2189 /* since each tick is 4 usec */
2190 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2191}
2192
8a1c38d1 2193static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2194{
2195 struct rate_shaping_vars_per_vn m_rs_vn;
2196 struct fairness_vars_per_vn m_fair_vn;
2197 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2198 u16 vn_min_rate, vn_max_rate;
2199 int i;
2200
2201 /* If function is hidden - set min and max to zeroes */
2202 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2203 vn_min_rate = 0;
2204 vn_max_rate = 0;
2205
2206 } else {
2207 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2208 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2209 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2210 if current min rate is zero - set it to 1.
33471629 2211 This is a requirement of the algorithm. */
8a1c38d1 2212 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2213 vn_min_rate = DEF_MIN_RATE;
2214 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2215 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2216 }
2217
8a1c38d1
EG
2218 DP(NETIF_MSG_IFUP,
2219 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2220 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2221
2222 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2223 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2224
2225 /* global vn counter - maximal Mbps for this vn */
2226 m_rs_vn.vn_counter.rate = vn_max_rate;
2227
2228 /* quota - number of bytes transmitted in this period */
2229 m_rs_vn.vn_counter.quota =
2230 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2231
8a1c38d1 2232 if (bp->vn_weight_sum) {
34f80b04
EG
2233 /* credit for each period of the fairness algorithm:
2234 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2235 vn_weight_sum should not be larger than 10000, thus
2236 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2237 than zero */
34f80b04 2238 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2239 max((u32)(vn_min_rate * (T_FAIR_COEF /
2240 (8 * bp->vn_weight_sum))),
2241 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2242 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2243 m_fair_vn.vn_credit_delta);
2244 }
2245
34f80b04
EG
2246 /* Store it to internal memory */
2247 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2248 REG_WR(bp, BAR_XSTRORM_INTMEM +
2249 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2250 ((u32 *)(&m_rs_vn))[i]);
2251
2252 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2253 REG_WR(bp, BAR_XSTRORM_INTMEM +
2254 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2255 ((u32 *)(&m_fair_vn))[i]);
2256}
2257
8a1c38d1 2258
c18487ee
YR
2259/* This function is called upon link interrupt */
2260static void bnx2x_link_attn(struct bnx2x *bp)
2261{
bb2a0f7a
YG
2262 /* Make sure that we are synced with the current statistics */
2263 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2264
c18487ee 2265 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2266
bb2a0f7a
YG
2267 if (bp->link_vars.link_up) {
2268
1c06328c
EG
2269 /* dropless flow control */
2270 if (CHIP_IS_E1H(bp)) {
2271 int port = BP_PORT(bp);
2272 u32 pause_enabled = 0;
2273
2274 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2275 pause_enabled = 1;
2276
2277 REG_WR(bp, BAR_USTRORM_INTMEM +
2278 USTORM_PAUSE_ENABLED_OFFSET(port),
2279 pause_enabled);
2280 }
2281
bb2a0f7a
YG
2282 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2283 struct host_port_stats *pstats;
2284
2285 pstats = bnx2x_sp(bp, port_stats);
2286 /* reset old bmac stats */
2287 memset(&(pstats->mac_stx[0]), 0,
2288 sizeof(struct mac_stx));
2289 }
2290 if ((bp->state == BNX2X_STATE_OPEN) ||
2291 (bp->state == BNX2X_STATE_DISABLED))
2292 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2293 }
2294
c18487ee
YR
2295 /* indicate link status */
2296 bnx2x_link_report(bp);
34f80b04
EG
2297
2298 if (IS_E1HMF(bp)) {
8a1c38d1 2299 int port = BP_PORT(bp);
34f80b04 2300 int func;
8a1c38d1 2301 int vn;
34f80b04
EG
2302
2303 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2304 if (vn == BP_E1HVN(bp))
2305 continue;
2306
8a1c38d1 2307 func = ((vn << 1) | port);
34f80b04
EG
2308
2309 /* Set the attention towards other drivers
2310 on the same port */
2311 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2312 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2313 }
34f80b04 2314
8a1c38d1
EG
2315 if (bp->link_vars.link_up) {
2316 int i;
2317
2318 /* Init rate shaping and fairness contexts */
2319 bnx2x_init_port_minmax(bp);
34f80b04 2320
34f80b04 2321 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2322 bnx2x_init_vn_minmax(bp, 2*vn + port);
2323
2324 /* Store it to internal memory */
2325 for (i = 0;
2326 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2327 REG_WR(bp, BAR_XSTRORM_INTMEM +
2328 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2329 ((u32 *)(&bp->cmng))[i]);
2330 }
34f80b04 2331 }
c18487ee 2332}
a2fbb9ea 2333
c18487ee
YR
2334static void bnx2x__link_status_update(struct bnx2x *bp)
2335{
2336 if (bp->state != BNX2X_STATE_OPEN)
2337 return;
a2fbb9ea 2338
c18487ee 2339 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2340
bb2a0f7a
YG
2341 if (bp->link_vars.link_up)
2342 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2343 else
2344 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2345
c18487ee
YR
2346 /* indicate link status */
2347 bnx2x_link_report(bp);
a2fbb9ea 2348}
a2fbb9ea 2349
34f80b04
EG
2350static void bnx2x_pmf_update(struct bnx2x *bp)
2351{
2352 int port = BP_PORT(bp);
2353 u32 val;
2354
2355 bp->port.pmf = 1;
2356 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2357
2358 /* enable nig attention */
2359 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2360 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2361 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2362
2363 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2364}
2365
c18487ee 2366/* end of Link */
a2fbb9ea
ET
2367
2368/* slow path */
2369
2370/*
2371 * General service functions
2372 */
2373
2374/* the slow path queue is odd since completions arrive on the fastpath ring */
2375static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2376 u32 data_hi, u32 data_lo, int common)
2377{
34f80b04 2378 int func = BP_FUNC(bp);
a2fbb9ea 2379
34f80b04
EG
2380 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2381 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2382 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2383 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2384 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2385
2386#ifdef BNX2X_STOP_ON_ERROR
2387 if (unlikely(bp->panic))
2388 return -EIO;
2389#endif
2390
34f80b04 2391 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2392
2393 if (!bp->spq_left) {
2394 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2395 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2396 bnx2x_panic();
2397 return -EBUSY;
2398 }
f1410647 2399
a2fbb9ea
ET
2400 /* CID needs port number to be encoded int it */
2401 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2402 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2403 HW_CID(bp, cid)));
2404 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2405 if (common)
2406 bp->spq_prod_bd->hdr.type |=
2407 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2408
2409 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2410 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2411
2412 bp->spq_left--;
2413
2414 if (bp->spq_prod_bd == bp->spq_last_bd) {
2415 bp->spq_prod_bd = bp->spq;
2416 bp->spq_prod_idx = 0;
2417 DP(NETIF_MSG_TIMER, "end of spq\n");
2418
2419 } else {
2420 bp->spq_prod_bd++;
2421 bp->spq_prod_idx++;
2422 }
2423
34f80b04 2424 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2425 bp->spq_prod_idx);
2426
34f80b04 2427 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2428 return 0;
2429}
2430
2431/* acquire split MCP access lock register */
4a37fb66 2432static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2433{
a2fbb9ea 2434 u32 i, j, val;
34f80b04 2435 int rc = 0;
a2fbb9ea
ET
2436
2437 might_sleep();
2438 i = 100;
2439 for (j = 0; j < i*10; j++) {
2440 val = (1UL << 31);
2441 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2442 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2443 if (val & (1L << 31))
2444 break;
2445
2446 msleep(5);
2447 }
a2fbb9ea 2448 if (!(val & (1L << 31))) {
19680c48 2449 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2450 rc = -EBUSY;
2451 }
2452
2453 return rc;
2454}
2455
4a37fb66
YG
2456/* release split MCP access lock register */
2457static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2458{
2459 u32 val = 0;
2460
2461 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2462}
2463
2464static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2465{
2466 struct host_def_status_block *def_sb = bp->def_status_blk;
2467 u16 rc = 0;
2468
2469 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2470 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2471 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2472 rc |= 1;
2473 }
2474 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2475 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2476 rc |= 2;
2477 }
2478 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2479 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2480 rc |= 4;
2481 }
2482 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2483 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2484 rc |= 8;
2485 }
2486 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2487 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2488 rc |= 16;
2489 }
2490 return rc;
2491}
2492
2493/*
2494 * slow path service functions
2495 */
2496
2497static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2498{
34f80b04 2499 int port = BP_PORT(bp);
5c862848
EG
2500 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2501 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2502 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2503 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2504 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2505 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2506 u32 aeu_mask;
87942b46 2507 u32 nig_mask = 0;
a2fbb9ea 2508
a2fbb9ea
ET
2509 if (bp->attn_state & asserted)
2510 BNX2X_ERR("IGU ERROR\n");
2511
3fcaf2e5
EG
2512 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2513 aeu_mask = REG_RD(bp, aeu_addr);
2514
a2fbb9ea 2515 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2516 aeu_mask, asserted);
2517 aeu_mask &= ~(asserted & 0xff);
2518 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2519
3fcaf2e5
EG
2520 REG_WR(bp, aeu_addr, aeu_mask);
2521 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2522
3fcaf2e5 2523 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2524 bp->attn_state |= asserted;
3fcaf2e5 2525 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2526
2527 if (asserted & ATTN_HARD_WIRED_MASK) {
2528 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2529
a5e9a7cf
EG
2530 bnx2x_acquire_phy_lock(bp);
2531
877e9aa4 2532 /* save nig interrupt mask */
87942b46 2533 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2534 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2535
c18487ee 2536 bnx2x_link_attn(bp);
a2fbb9ea
ET
2537
2538 /* handle unicore attn? */
2539 }
2540 if (asserted & ATTN_SW_TIMER_4_FUNC)
2541 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2542
2543 if (asserted & GPIO_2_FUNC)
2544 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2545
2546 if (asserted & GPIO_3_FUNC)
2547 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2548
2549 if (asserted & GPIO_4_FUNC)
2550 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2551
2552 if (port == 0) {
2553 if (asserted & ATTN_GENERAL_ATTN_1) {
2554 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2555 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2556 }
2557 if (asserted & ATTN_GENERAL_ATTN_2) {
2558 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2559 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2560 }
2561 if (asserted & ATTN_GENERAL_ATTN_3) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2564 }
2565 } else {
2566 if (asserted & ATTN_GENERAL_ATTN_4) {
2567 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2568 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2569 }
2570 if (asserted & ATTN_GENERAL_ATTN_5) {
2571 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2572 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2573 }
2574 if (asserted & ATTN_GENERAL_ATTN_6) {
2575 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2576 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2577 }
2578 }
2579
2580 } /* if hardwired */
2581
5c862848
EG
2582 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2583 asserted, hc_addr);
2584 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2585
2586 /* now set back the mask */
a5e9a7cf 2587 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2588 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2589 bnx2x_release_phy_lock(bp);
2590 }
a2fbb9ea
ET
2591}
2592
877e9aa4 2593static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2594{
34f80b04 2595 int port = BP_PORT(bp);
877e9aa4
ET
2596 int reg_offset;
2597 u32 val;
2598
34f80b04
EG
2599 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2600 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2601
34f80b04 2602 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2603
2604 val = REG_RD(bp, reg_offset);
2605 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2606 REG_WR(bp, reg_offset, val);
2607
2608 BNX2X_ERR("SPIO5 hw attention\n");
2609
35b19ba5
EG
2610 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2611 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2612 /* Fan failure attention */
2613
17de50b7 2614 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2615 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2616 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2617 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2618 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2619 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2620 /* mark the failure */
c18487ee 2621 bp->link_params.ext_phy_config &=
877e9aa4 2622 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2623 bp->link_params.ext_phy_config |=
877e9aa4
ET
2624 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2625 SHMEM_WR(bp,
2626 dev_info.port_hw_config[port].
2627 external_phy_config,
c18487ee 2628 bp->link_params.ext_phy_config);
877e9aa4
ET
2629 /* log the failure */
2630 printk(KERN_ERR PFX "Fan Failure on Network"
2631 " Controller %s has caused the driver to"
2632 " shutdown the card to prevent permanent"
2633 " damage. Please contact Dell Support for"
2634 " assistance\n", bp->dev->name);
2635 break;
2636
2637 default:
2638 break;
2639 }
2640 }
34f80b04 2641
589abe3a
EG
2642 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2643 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2644 bnx2x_acquire_phy_lock(bp);
2645 bnx2x_handle_module_detect_int(&bp->link_params);
2646 bnx2x_release_phy_lock(bp);
2647 }
2648
34f80b04
EG
2649 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2650
2651 val = REG_RD(bp, reg_offset);
2652 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2653 REG_WR(bp, reg_offset, val);
2654
2655 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2656 (attn & HW_INTERRUT_ASSERT_SET_0));
2657 bnx2x_panic();
2658 }
877e9aa4
ET
2659}
2660
2661static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2662{
2663 u32 val;
2664
0626b899 2665 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2666
2667 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2668 BNX2X_ERR("DB hw attention 0x%x\n", val);
2669 /* DORQ discard attention */
2670 if (val & 0x2)
2671 BNX2X_ERR("FATAL error from DORQ\n");
2672 }
34f80b04
EG
2673
2674 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2675
2676 int port = BP_PORT(bp);
2677 int reg_offset;
2678
2679 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2680 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2681
2682 val = REG_RD(bp, reg_offset);
2683 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2684 REG_WR(bp, reg_offset, val);
2685
2686 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2687 (attn & HW_INTERRUT_ASSERT_SET_1));
2688 bnx2x_panic();
2689 }
877e9aa4
ET
2690}
2691
2692static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2693{
2694 u32 val;
2695
2696 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2697
2698 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2699 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2700 /* CFC error attention */
2701 if (val & 0x2)
2702 BNX2X_ERR("FATAL error from CFC\n");
2703 }
2704
2705 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2706
2707 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2708 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2709 /* RQ_USDMDP_FIFO_OVERFLOW */
2710 if (val & 0x18000)
2711 BNX2X_ERR("FATAL error from PXP\n");
2712 }
34f80b04
EG
2713
2714 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2715
2716 int port = BP_PORT(bp);
2717 int reg_offset;
2718
2719 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2720 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2721
2722 val = REG_RD(bp, reg_offset);
2723 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2724 REG_WR(bp, reg_offset, val);
2725
2726 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2727 (attn & HW_INTERRUT_ASSERT_SET_2));
2728 bnx2x_panic();
2729 }
877e9aa4
ET
2730}
2731
2732static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2733{
34f80b04
EG
2734 u32 val;
2735
877e9aa4
ET
2736 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2737
34f80b04
EG
2738 if (attn & BNX2X_PMF_LINK_ASSERT) {
2739 int func = BP_FUNC(bp);
2740
2741 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2742 bnx2x__link_status_update(bp);
2743 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2744 DRV_STATUS_PMF)
2745 bnx2x_pmf_update(bp);
2746
2747 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2748
2749 BNX2X_ERR("MC assert!\n");
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2751 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2753 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2754 bnx2x_panic();
2755
2756 } else if (attn & BNX2X_MCP_ASSERT) {
2757
2758 BNX2X_ERR("MCP assert!\n");
2759 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2760 bnx2x_fw_dump(bp);
877e9aa4
ET
2761
2762 } else
2763 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2764 }
2765
2766 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2767 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2768 if (attn & BNX2X_GRC_TIMEOUT) {
2769 val = CHIP_IS_E1H(bp) ?
2770 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2771 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2772 }
2773 if (attn & BNX2X_GRC_RSV) {
2774 val = CHIP_IS_E1H(bp) ?
2775 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2776 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2777 }
877e9aa4 2778 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2779 }
2780}
2781
2782static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2783{
a2fbb9ea
ET
2784 struct attn_route attn;
2785 struct attn_route group_mask;
34f80b04 2786 int port = BP_PORT(bp);
877e9aa4 2787 int index;
a2fbb9ea
ET
2788 u32 reg_addr;
2789 u32 val;
3fcaf2e5 2790 u32 aeu_mask;
a2fbb9ea
ET
2791
2792 /* need to take HW lock because MCP or other port might also
2793 try to handle this event */
4a37fb66 2794 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2795
2796 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2797 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2798 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2799 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2800 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2801 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2802
2803 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2804 if (deasserted & (1 << index)) {
2805 group_mask = bp->attn_group[index];
2806
34f80b04
EG
2807 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2808 index, group_mask.sig[0], group_mask.sig[1],
2809 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2810
877e9aa4
ET
2811 bnx2x_attn_int_deasserted3(bp,
2812 attn.sig[3] & group_mask.sig[3]);
2813 bnx2x_attn_int_deasserted1(bp,
2814 attn.sig[1] & group_mask.sig[1]);
2815 bnx2x_attn_int_deasserted2(bp,
2816 attn.sig[2] & group_mask.sig[2]);
2817 bnx2x_attn_int_deasserted0(bp,
2818 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2819
a2fbb9ea
ET
2820 if ((attn.sig[0] & group_mask.sig[0] &
2821 HW_PRTY_ASSERT_SET_0) ||
2822 (attn.sig[1] & group_mask.sig[1] &
2823 HW_PRTY_ASSERT_SET_1) ||
2824 (attn.sig[2] & group_mask.sig[2] &
2825 HW_PRTY_ASSERT_SET_2))
6378c025 2826 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2827 }
2828 }
2829
4a37fb66 2830 bnx2x_release_alr(bp);
a2fbb9ea 2831
5c862848 2832 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2833
2834 val = ~deasserted;
3fcaf2e5
EG
2835 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2836 val, reg_addr);
5c862848 2837 REG_WR(bp, reg_addr, val);
a2fbb9ea 2838
a2fbb9ea 2839 if (~bp->attn_state & deasserted)
3fcaf2e5 2840 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2841
2842 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2843 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2844
3fcaf2e5
EG
2845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2846 aeu_mask = REG_RD(bp, reg_addr);
2847
2848 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2849 aeu_mask, deasserted);
2850 aeu_mask |= (deasserted & 0xff);
2851 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2852
3fcaf2e5
EG
2853 REG_WR(bp, reg_addr, aeu_mask);
2854 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2855
2856 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2857 bp->attn_state &= ~deasserted;
2858 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2859}
2860
2861static void bnx2x_attn_int(struct bnx2x *bp)
2862{
2863 /* read local copy of bits */
68d59484
EG
2864 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2865 attn_bits);
2866 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2867 attn_bits_ack);
a2fbb9ea
ET
2868 u32 attn_state = bp->attn_state;
2869
2870 /* look for changed bits */
2871 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2872 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2873
2874 DP(NETIF_MSG_HW,
2875 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2876 attn_bits, attn_ack, asserted, deasserted);
2877
2878 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2879 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2880
2881 /* handle bits that were raised */
2882 if (asserted)
2883 bnx2x_attn_int_asserted(bp, asserted);
2884
2885 if (deasserted)
2886 bnx2x_attn_int_deasserted(bp, deasserted);
2887}
2888
2889static void bnx2x_sp_task(struct work_struct *work)
2890{
1cf167f2 2891 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2892 u16 status;
2893
34f80b04 2894
a2fbb9ea
ET
2895 /* Return here if interrupt is disabled */
2896 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2897 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2898 return;
2899 }
2900
2901 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2902/* if (status == 0) */
2903/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2904
3196a88a 2905 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2906
877e9aa4
ET
2907 /* HW attentions */
2908 if (status & 0x1)
a2fbb9ea 2909 bnx2x_attn_int(bp);
a2fbb9ea 2910
68d59484 2911 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2912 IGU_INT_NOP, 1);
2913 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2914 IGU_INT_NOP, 1);
2915 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2916 IGU_INT_NOP, 1);
2917 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2918 IGU_INT_NOP, 1);
2919 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2920 IGU_INT_ENABLE, 1);
877e9aa4 2921
a2fbb9ea
ET
2922}
2923
2924static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2925{
2926 struct net_device *dev = dev_instance;
2927 struct bnx2x *bp = netdev_priv(dev);
2928
2929 /* Return here if interrupt is disabled */
2930 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2931 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2932 return IRQ_HANDLED;
2933 }
2934
8d9c5f34 2935 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2936
2937#ifdef BNX2X_STOP_ON_ERROR
2938 if (unlikely(bp->panic))
2939 return IRQ_HANDLED;
2940#endif
2941
1cf167f2 2942 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2943
2944 return IRQ_HANDLED;
2945}
2946
2947/* end of slow path */
2948
2949/* Statistics */
2950
2951/****************************************************************************
2952* Macros
2953****************************************************************************/
2954
a2fbb9ea
ET
2955/* sum[hi:lo] += add[hi:lo] */
2956#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2957 do { \
2958 s_lo += a_lo; \
f5ba6772 2959 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2960 } while (0)
2961
2962/* difference = minuend - subtrahend */
2963#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2964 do { \
bb2a0f7a
YG
2965 if (m_lo < s_lo) { \
2966 /* underflow */ \
a2fbb9ea 2967 d_hi = m_hi - s_hi; \
bb2a0f7a 2968 if (d_hi > 0) { \
6378c025 2969 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2970 d_hi--; \
2971 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2972 } else { \
6378c025 2973 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2974 d_hi = 0; \
2975 d_lo = 0; \
2976 } \
bb2a0f7a
YG
2977 } else { \
2978 /* m_lo >= s_lo */ \
a2fbb9ea 2979 if (m_hi < s_hi) { \
bb2a0f7a
YG
2980 d_hi = 0; \
2981 d_lo = 0; \
2982 } else { \
6378c025 2983 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2984 d_hi = m_hi - s_hi; \
2985 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2986 } \
2987 } \
2988 } while (0)
2989
bb2a0f7a 2990#define UPDATE_STAT64(s, t) \
a2fbb9ea 2991 do { \
bb2a0f7a
YG
2992 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2993 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2994 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2995 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2996 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2997 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2998 } while (0)
2999
bb2a0f7a 3000#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3001 do { \
bb2a0f7a
YG
3002 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3003 diff.lo, new->s##_lo, old->s##_lo); \
3004 ADD_64(estats->t##_hi, diff.hi, \
3005 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3006 } while (0)
3007
3008/* sum[hi:lo] += add */
3009#define ADD_EXTEND_64(s_hi, s_lo, a) \
3010 do { \
3011 s_lo += a; \
3012 s_hi += (s_lo < a) ? 1 : 0; \
3013 } while (0)
3014
bb2a0f7a 3015#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3016 do { \
bb2a0f7a
YG
3017 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3018 pstats->mac_stx[1].s##_lo, \
3019 new->s); \
a2fbb9ea
ET
3020 } while (0)
3021
bb2a0f7a 3022#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3023 do { \
4781bfad
EG
3024 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3025 old_tclient->s = tclient->s; \
de832a55
EG
3026 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3027 } while (0)
3028
3029#define UPDATE_EXTEND_USTAT(s, t) \
3030 do { \
3031 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3032 old_uclient->s = uclient->s; \
3033 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3034 } while (0)
3035
3036#define UPDATE_EXTEND_XSTAT(s, t) \
3037 do { \
4781bfad
EG
3038 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3039 old_xclient->s = xclient->s; \
de832a55
EG
3040 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3041 } while (0)
3042
3043/* minuend -= subtrahend */
3044#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3045 do { \
3046 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3047 } while (0)
3048
3049/* minuend[hi:lo] -= subtrahend */
3050#define SUB_EXTEND_64(m_hi, m_lo, s) \
3051 do { \
3052 SUB_64(m_hi, 0, m_lo, s); \
3053 } while (0)
3054
3055#define SUB_EXTEND_USTAT(s, t) \
3056 do { \
3057 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3058 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3059 } while (0)
3060
3061/*
3062 * General service functions
3063 */
3064
3065static inline long bnx2x_hilo(u32 *hiref)
3066{
3067 u32 lo = *(hiref + 1);
3068#if (BITS_PER_LONG == 64)
3069 u32 hi = *hiref;
3070
3071 return HILO_U64(hi, lo);
3072#else
3073 return lo;
3074#endif
3075}
3076
3077/*
3078 * Init service functions
3079 */
3080
bb2a0f7a
YG
3081static void bnx2x_storm_stats_post(struct bnx2x *bp)
3082{
3083 if (!bp->stats_pending) {
3084 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3085 int i, rc;
bb2a0f7a
YG
3086
3087 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3088 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3089 for_each_queue(bp, i)
3090 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3091
3092 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3093 ((u32 *)&ramrod_data)[1],
3094 ((u32 *)&ramrod_data)[0], 0);
3095 if (rc == 0) {
3096 /* stats ramrod has it's own slot on the spq */
3097 bp->spq_left++;
3098 bp->stats_pending = 1;
3099 }
3100 }
3101}
3102
3103static void bnx2x_stats_init(struct bnx2x *bp)
3104{
3105 int port = BP_PORT(bp);
de832a55 3106 int i;
bb2a0f7a 3107
de832a55 3108 bp->stats_pending = 0;
bb2a0f7a
YG
3109 bp->executer_idx = 0;
3110 bp->stats_counter = 0;
3111
3112 /* port stats */
3113 if (!BP_NOMCP(bp))
3114 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3115 else
3116 bp->port.port_stx = 0;
3117 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3118
3119 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3120 bp->port.old_nig_stats.brb_discard =
3121 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3122 bp->port.old_nig_stats.brb_truncate =
3123 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3124 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3125 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3126 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3127 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3128
3129 /* function stats */
de832a55
EG
3130 for_each_queue(bp, i) {
3131 struct bnx2x_fastpath *fp = &bp->fp[i];
3132
3133 memset(&fp->old_tclient, 0,
3134 sizeof(struct tstorm_per_client_stats));
3135 memset(&fp->old_uclient, 0,
3136 sizeof(struct ustorm_per_client_stats));
3137 memset(&fp->old_xclient, 0,
3138 sizeof(struct xstorm_per_client_stats));
3139 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3140 }
3141
bb2a0f7a 3142 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3143 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3144
3145 bp->stats_state = STATS_STATE_DISABLED;
3146 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3147 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3148}
3149
3150static void bnx2x_hw_stats_post(struct bnx2x *bp)
3151{
3152 struct dmae_command *dmae = &bp->stats_dmae;
3153 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3154
3155 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3156 if (CHIP_REV_IS_SLOW(bp))
3157 return;
bb2a0f7a
YG
3158
3159 /* loader */
3160 if (bp->executer_idx) {
3161 int loader_idx = PMF_DMAE_C(bp);
3162
3163 memset(dmae, 0, sizeof(struct dmae_command));
3164
3165 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3166 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3167 DMAE_CMD_DST_RESET |
3168#ifdef __BIG_ENDIAN
3169 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3170#else
3171 DMAE_CMD_ENDIANITY_DW_SWAP |
3172#endif
3173 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3174 DMAE_CMD_PORT_0) |
3175 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3176 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3177 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3178 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3179 sizeof(struct dmae_command) *
3180 (loader_idx + 1)) >> 2;
3181 dmae->dst_addr_hi = 0;
3182 dmae->len = sizeof(struct dmae_command) >> 2;
3183 if (CHIP_IS_E1(bp))
3184 dmae->len--;
3185 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3186 dmae->comp_addr_hi = 0;
3187 dmae->comp_val = 1;
3188
3189 *stats_comp = 0;
3190 bnx2x_post_dmae(bp, dmae, loader_idx);
3191
3192 } else if (bp->func_stx) {
3193 *stats_comp = 0;
3194 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3195 }
3196}
3197
3198static int bnx2x_stats_comp(struct bnx2x *bp)
3199{
3200 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3201 int cnt = 10;
3202
3203 might_sleep();
3204 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3205 if (!cnt) {
3206 BNX2X_ERR("timeout waiting for stats finished\n");
3207 break;
3208 }
3209 cnt--;
12469401 3210 msleep(1);
bb2a0f7a
YG
3211 }
3212 return 1;
3213}
3214
3215/*
3216 * Statistics service functions
3217 */
3218
3219static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3220{
3221 struct dmae_command *dmae;
3222 u32 opcode;
3223 int loader_idx = PMF_DMAE_C(bp);
3224 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3225
3226 /* sanity */
3227 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3228 BNX2X_ERR("BUG!\n");
3229 return;
3230 }
3231
3232 bp->executer_idx = 0;
3233
3234 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3235 DMAE_CMD_C_ENABLE |
3236 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3237#ifdef __BIG_ENDIAN
3238 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3239#else
3240 DMAE_CMD_ENDIANITY_DW_SWAP |
3241#endif
3242 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3243 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3244
3245 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3246 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3247 dmae->src_addr_lo = bp->port.port_stx >> 2;
3248 dmae->src_addr_hi = 0;
3249 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3250 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3251 dmae->len = DMAE_LEN32_RD_MAX;
3252 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3253 dmae->comp_addr_hi = 0;
3254 dmae->comp_val = 1;
3255
3256 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3257 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3258 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3259 dmae->src_addr_hi = 0;
7a9b2557
VZ
3260 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3261 DMAE_LEN32_RD_MAX * 4);
3262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3263 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3264 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3265 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3266 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3267 dmae->comp_val = DMAE_COMP_VAL;
3268
3269 *stats_comp = 0;
3270 bnx2x_hw_stats_post(bp);
3271 bnx2x_stats_comp(bp);
3272}
3273
3274static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3275{
3276 struct dmae_command *dmae;
34f80b04 3277 int port = BP_PORT(bp);
bb2a0f7a 3278 int vn = BP_E1HVN(bp);
a2fbb9ea 3279 u32 opcode;
bb2a0f7a 3280 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3281 u32 mac_addr;
bb2a0f7a
YG
3282 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3283
3284 /* sanity */
3285 if (!bp->link_vars.link_up || !bp->port.pmf) {
3286 BNX2X_ERR("BUG!\n");
3287 return;
3288 }
a2fbb9ea
ET
3289
3290 bp->executer_idx = 0;
bb2a0f7a
YG
3291
3292 /* MCP */
3293 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3294 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3296#ifdef __BIG_ENDIAN
bb2a0f7a 3297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3298#else
bb2a0f7a 3299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3300#endif
bb2a0f7a
YG
3301 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3302 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3303
bb2a0f7a 3304 if (bp->port.port_stx) {
a2fbb9ea
ET
3305
3306 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3307 dmae->opcode = opcode;
bb2a0f7a
YG
3308 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3309 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3310 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3311 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3312 dmae->len = sizeof(struct host_port_stats) >> 2;
3313 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3314 dmae->comp_addr_hi = 0;
3315 dmae->comp_val = 1;
a2fbb9ea
ET
3316 }
3317
bb2a0f7a
YG
3318 if (bp->func_stx) {
3319
3320 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321 dmae->opcode = opcode;
3322 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3323 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3324 dmae->dst_addr_lo = bp->func_stx >> 2;
3325 dmae->dst_addr_hi = 0;
3326 dmae->len = sizeof(struct host_func_stats) >> 2;
3327 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3328 dmae->comp_addr_hi = 0;
3329 dmae->comp_val = 1;
a2fbb9ea
ET
3330 }
3331
bb2a0f7a 3332 /* MAC */
a2fbb9ea
ET
3333 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3334 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3335 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3336#ifdef __BIG_ENDIAN
3337 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3338#else
3339 DMAE_CMD_ENDIANITY_DW_SWAP |
3340#endif
bb2a0f7a
YG
3341 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3342 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3343
c18487ee 3344 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3345
3346 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3347 NIG_REG_INGRESS_BMAC0_MEM);
3348
3349 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3350 BIGMAC_REGISTER_TX_STAT_GTBYT */
3351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3352 dmae->opcode = opcode;
3353 dmae->src_addr_lo = (mac_addr +
3354 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3355 dmae->src_addr_hi = 0;
3356 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3357 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3358 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3359 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3361 dmae->comp_addr_hi = 0;
3362 dmae->comp_val = 1;
3363
3364 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3365 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367 dmae->opcode = opcode;
3368 dmae->src_addr_lo = (mac_addr +
3369 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3370 dmae->src_addr_hi = 0;
3371 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3372 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3373 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3374 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3375 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3376 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378 dmae->comp_addr_hi = 0;
3379 dmae->comp_val = 1;
3380
c18487ee 3381 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3382
3383 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3384
3385 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3386 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3387 dmae->opcode = opcode;
3388 dmae->src_addr_lo = (mac_addr +
3389 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3390 dmae->src_addr_hi = 0;
3391 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3392 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3393 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3394 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395 dmae->comp_addr_hi = 0;
3396 dmae->comp_val = 1;
3397
3398 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3399 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3400 dmae->opcode = opcode;
3401 dmae->src_addr_lo = (mac_addr +
3402 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3403 dmae->src_addr_hi = 0;
3404 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3405 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3406 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3407 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3408 dmae->len = 1;
3409 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3410 dmae->comp_addr_hi = 0;
3411 dmae->comp_val = 1;
3412
3413 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3414 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3415 dmae->opcode = opcode;
3416 dmae->src_addr_lo = (mac_addr +
3417 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3418 dmae->src_addr_hi = 0;
3419 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3420 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3421 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3422 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3423 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3424 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3425 dmae->comp_addr_hi = 0;
3426 dmae->comp_val = 1;
3427 }
3428
3429 /* NIG */
bb2a0f7a
YG
3430 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3431 dmae->opcode = opcode;
3432 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3433 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3434 dmae->src_addr_hi = 0;
3435 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3436 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3437 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3438 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3439 dmae->comp_addr_hi = 0;
3440 dmae->comp_val = 1;
3441
3442 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3443 dmae->opcode = opcode;
3444 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3445 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3446 dmae->src_addr_hi = 0;
3447 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3448 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3449 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3450 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3451 dmae->len = (2*sizeof(u32)) >> 2;
3452 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3453 dmae->comp_addr_hi = 0;
3454 dmae->comp_val = 1;
3455
a2fbb9ea
ET
3456 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3457 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3458 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3459 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3460#ifdef __BIG_ENDIAN
3461 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3462#else
3463 DMAE_CMD_ENDIANITY_DW_SWAP |
3464#endif
bb2a0f7a
YG
3465 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3466 (vn << DMAE_CMD_E1HVN_SHIFT));
3467 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3468 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3469 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3470 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3471 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3472 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3473 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3474 dmae->len = (2*sizeof(u32)) >> 2;
3475 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3476 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3477 dmae->comp_val = DMAE_COMP_VAL;
3478
3479 *stats_comp = 0;
a2fbb9ea
ET
3480}
3481
bb2a0f7a 3482static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3483{
bb2a0f7a
YG
3484 struct dmae_command *dmae = &bp->stats_dmae;
3485 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3486
bb2a0f7a
YG
3487 /* sanity */
3488 if (!bp->func_stx) {
3489 BNX2X_ERR("BUG!\n");
3490 return;
3491 }
a2fbb9ea 3492
bb2a0f7a
YG
3493 bp->executer_idx = 0;
3494 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3495
bb2a0f7a
YG
3496 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3497 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3499#ifdef __BIG_ENDIAN
3500 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3501#else
3502 DMAE_CMD_ENDIANITY_DW_SWAP |
3503#endif
3504 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3506 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3507 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3508 dmae->dst_addr_lo = bp->func_stx >> 2;
3509 dmae->dst_addr_hi = 0;
3510 dmae->len = sizeof(struct host_func_stats) >> 2;
3511 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3512 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3513 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3514
bb2a0f7a
YG
3515 *stats_comp = 0;
3516}
a2fbb9ea 3517
bb2a0f7a
YG
3518static void bnx2x_stats_start(struct bnx2x *bp)
3519{
3520 if (bp->port.pmf)
3521 bnx2x_port_stats_init(bp);
3522
3523 else if (bp->func_stx)
3524 bnx2x_func_stats_init(bp);
3525
3526 bnx2x_hw_stats_post(bp);
3527 bnx2x_storm_stats_post(bp);
3528}
3529
3530static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3531{
3532 bnx2x_stats_comp(bp);
3533 bnx2x_stats_pmf_update(bp);
3534 bnx2x_stats_start(bp);
3535}
3536
3537static void bnx2x_stats_restart(struct bnx2x *bp)
3538{
3539 bnx2x_stats_comp(bp);
3540 bnx2x_stats_start(bp);
3541}
3542
3543static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3544{
3545 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3546 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3547 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3548 struct {
3549 u32 lo;
3550 u32 hi;
3551 } diff;
bb2a0f7a
YG
3552
3553 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3554 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3555 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3556 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3557 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3558 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3559 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3560 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3561 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3562 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3563 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3564 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3565 UPDATE_STAT64(tx_stat_gt127,
3566 tx_stat_etherstatspkts65octetsto127octets);
3567 UPDATE_STAT64(tx_stat_gt255,
3568 tx_stat_etherstatspkts128octetsto255octets);
3569 UPDATE_STAT64(tx_stat_gt511,
3570 tx_stat_etherstatspkts256octetsto511octets);
3571 UPDATE_STAT64(tx_stat_gt1023,
3572 tx_stat_etherstatspkts512octetsto1023octets);
3573 UPDATE_STAT64(tx_stat_gt1518,
3574 tx_stat_etherstatspkts1024octetsto1522octets);
3575 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3576 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3577 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3578 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3579 UPDATE_STAT64(tx_stat_gterr,
3580 tx_stat_dot3statsinternalmactransmiterrors);
3581 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3582
3583 estats->pause_frames_received_hi =
3584 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3585 estats->pause_frames_received_lo =
3586 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3587
3588 estats->pause_frames_sent_hi =
3589 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3590 estats->pause_frames_sent_lo =
3591 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3592}
3593
3594static void bnx2x_emac_stats_update(struct bnx2x *bp)
3595{
3596 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3597 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3598 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3599
3600 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3601 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3602 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3603 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3605 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3606 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3607 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3608 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3609 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3610 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3611 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3612 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3613 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3614 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3615 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3616 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3617 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3618 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3622 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3623 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3629 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3630 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3631
3632 estats->pause_frames_received_hi =
3633 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3634 estats->pause_frames_received_lo =
3635 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3636 ADD_64(estats->pause_frames_received_hi,
3637 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3638 estats->pause_frames_received_lo,
3639 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3640
3641 estats->pause_frames_sent_hi =
3642 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3643 estats->pause_frames_sent_lo =
3644 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3645 ADD_64(estats->pause_frames_sent_hi,
3646 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3647 estats->pause_frames_sent_lo,
3648 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3649}
3650
3651static int bnx2x_hw_stats_update(struct bnx2x *bp)
3652{
3653 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3654 struct nig_stats *old = &(bp->port.old_nig_stats);
3655 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3656 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3657 struct {
3658 u32 lo;
3659 u32 hi;
3660 } diff;
de832a55 3661 u32 nig_timer_max;
bb2a0f7a
YG
3662
3663 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3664 bnx2x_bmac_stats_update(bp);
3665
3666 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3667 bnx2x_emac_stats_update(bp);
3668
3669 else { /* unreached */
3670 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3671 return -1;
3672 }
a2fbb9ea 3673
bb2a0f7a
YG
3674 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3675 new->brb_discard - old->brb_discard);
66e855f3
YG
3676 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3677 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3678
bb2a0f7a
YG
3679 UPDATE_STAT64_NIG(egress_mac_pkt0,
3680 etherstatspkts1024octetsto1522octets);
3681 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3682
bb2a0f7a 3683 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3684
bb2a0f7a
YG
3685 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3686 sizeof(struct mac_stx));
3687 estats->brb_drop_hi = pstats->brb_drop_hi;
3688 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3689
bb2a0f7a 3690 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3691
de832a55
EG
3692 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3693 if (nig_timer_max != estats->nig_timer_max) {
3694 estats->nig_timer_max = nig_timer_max;
3695 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3696 }
3697
bb2a0f7a 3698 return 0;
a2fbb9ea
ET
3699}
3700
bb2a0f7a 3701static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3702{
3703 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3704 struct tstorm_per_port_stats *tport =
de832a55 3705 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3706 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3707 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3708 int i;
3709
3710 memset(&(fstats->total_bytes_received_hi), 0,
3711 sizeof(struct host_func_stats) - 2*sizeof(u32));
3712 estats->error_bytes_received_hi = 0;
3713 estats->error_bytes_received_lo = 0;
3714 estats->etherstatsoverrsizepkts_hi = 0;
3715 estats->etherstatsoverrsizepkts_lo = 0;
3716 estats->no_buff_discard_hi = 0;
3717 estats->no_buff_discard_lo = 0;
a2fbb9ea 3718
de832a55
EG
3719 for_each_queue(bp, i) {
3720 struct bnx2x_fastpath *fp = &bp->fp[i];
3721 int cl_id = fp->cl_id;
3722 struct tstorm_per_client_stats *tclient =
3723 &stats->tstorm_common.client_statistics[cl_id];
3724 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3725 struct ustorm_per_client_stats *uclient =
3726 &stats->ustorm_common.client_statistics[cl_id];
3727 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3728 struct xstorm_per_client_stats *xclient =
3729 &stats->xstorm_common.client_statistics[cl_id];
3730 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3731 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3732 u32 diff;
3733
3734 /* are storm stats valid? */
3735 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3736 bp->stats_counter) {
de832a55
EG
3737 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3738 " xstorm counter (%d) != stats_counter (%d)\n",
3739 i, xclient->stats_counter, bp->stats_counter);
3740 return -1;
3741 }
3742 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3743 bp->stats_counter) {
de832a55
EG
3744 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3745 " tstorm counter (%d) != stats_counter (%d)\n",
3746 i, tclient->stats_counter, bp->stats_counter);
3747 return -2;
3748 }
3749 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3750 bp->stats_counter) {
3751 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3752 " ustorm counter (%d) != stats_counter (%d)\n",
3753 i, uclient->stats_counter, bp->stats_counter);
3754 return -4;
3755 }
a2fbb9ea 3756
de832a55
EG
3757 qstats->total_bytes_received_hi =
3758 qstats->valid_bytes_received_hi =
a2fbb9ea 3759 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3760 qstats->total_bytes_received_lo =
3761 qstats->valid_bytes_received_lo =
a2fbb9ea 3762 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3763
de832a55 3764 qstats->error_bytes_received_hi =
bb2a0f7a 3765 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3766 qstats->error_bytes_received_lo =
bb2a0f7a 3767 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3768
de832a55
EG
3769 ADD_64(qstats->total_bytes_received_hi,
3770 qstats->error_bytes_received_hi,
3771 qstats->total_bytes_received_lo,
3772 qstats->error_bytes_received_lo);
3773
3774 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3775 total_unicast_packets_received);
3776 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3777 total_multicast_packets_received);
3778 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3779 total_broadcast_packets_received);
3780 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3781 etherstatsoverrsizepkts);
3782 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3783
3784 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3785 total_unicast_packets_received);
3786 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3787 total_multicast_packets_received);
3788 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3789 total_broadcast_packets_received);
3790 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3791 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3792 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3793
3794 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3795 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3796 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3797 le32_to_cpu(xclient->total_sent_bytes.lo);
3798
de832a55
EG
3799 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3800 total_unicast_packets_transmitted);
3801 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3802 total_multicast_packets_transmitted);
3803 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3804 total_broadcast_packets_transmitted);
3805
3806 old_tclient->checksum_discard = tclient->checksum_discard;
3807 old_tclient->ttl0_discard = tclient->ttl0_discard;
3808
3809 ADD_64(fstats->total_bytes_received_hi,
3810 qstats->total_bytes_received_hi,
3811 fstats->total_bytes_received_lo,
3812 qstats->total_bytes_received_lo);
3813 ADD_64(fstats->total_bytes_transmitted_hi,
3814 qstats->total_bytes_transmitted_hi,
3815 fstats->total_bytes_transmitted_lo,
3816 qstats->total_bytes_transmitted_lo);
3817 ADD_64(fstats->total_unicast_packets_received_hi,
3818 qstats->total_unicast_packets_received_hi,
3819 fstats->total_unicast_packets_received_lo,
3820 qstats->total_unicast_packets_received_lo);
3821 ADD_64(fstats->total_multicast_packets_received_hi,
3822 qstats->total_multicast_packets_received_hi,
3823 fstats->total_multicast_packets_received_lo,
3824 qstats->total_multicast_packets_received_lo);
3825 ADD_64(fstats->total_broadcast_packets_received_hi,
3826 qstats->total_broadcast_packets_received_hi,
3827 fstats->total_broadcast_packets_received_lo,
3828 qstats->total_broadcast_packets_received_lo);
3829 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3830 qstats->total_unicast_packets_transmitted_hi,
3831 fstats->total_unicast_packets_transmitted_lo,
3832 qstats->total_unicast_packets_transmitted_lo);
3833 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3834 qstats->total_multicast_packets_transmitted_hi,
3835 fstats->total_multicast_packets_transmitted_lo,
3836 qstats->total_multicast_packets_transmitted_lo);
3837 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3838 qstats->total_broadcast_packets_transmitted_hi,
3839 fstats->total_broadcast_packets_transmitted_lo,
3840 qstats->total_broadcast_packets_transmitted_lo);
3841 ADD_64(fstats->valid_bytes_received_hi,
3842 qstats->valid_bytes_received_hi,
3843 fstats->valid_bytes_received_lo,
3844 qstats->valid_bytes_received_lo);
3845
3846 ADD_64(estats->error_bytes_received_hi,
3847 qstats->error_bytes_received_hi,
3848 estats->error_bytes_received_lo,
3849 qstats->error_bytes_received_lo);
3850 ADD_64(estats->etherstatsoverrsizepkts_hi,
3851 qstats->etherstatsoverrsizepkts_hi,
3852 estats->etherstatsoverrsizepkts_lo,
3853 qstats->etherstatsoverrsizepkts_lo);
3854 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3855 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3856 }
3857
3858 ADD_64(fstats->total_bytes_received_hi,
3859 estats->rx_stat_ifhcinbadoctets_hi,
3860 fstats->total_bytes_received_lo,
3861 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3862
3863 memcpy(estats, &(fstats->total_bytes_received_hi),
3864 sizeof(struct host_func_stats) - 2*sizeof(u32));
3865
de832a55
EG
3866 ADD_64(estats->etherstatsoverrsizepkts_hi,
3867 estats->rx_stat_dot3statsframestoolong_hi,
3868 estats->etherstatsoverrsizepkts_lo,
3869 estats->rx_stat_dot3statsframestoolong_lo);
3870 ADD_64(estats->error_bytes_received_hi,
3871 estats->rx_stat_ifhcinbadoctets_hi,
3872 estats->error_bytes_received_lo,
3873 estats->rx_stat_ifhcinbadoctets_lo);
3874
3875 if (bp->port.pmf) {
3876 estats->mac_filter_discard =
3877 le32_to_cpu(tport->mac_filter_discard);
3878 estats->xxoverflow_discard =
3879 le32_to_cpu(tport->xxoverflow_discard);
3880 estats->brb_truncate_discard =
bb2a0f7a 3881 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3882 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3883 }
bb2a0f7a
YG
3884
3885 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3886
de832a55
EG
3887 bp->stats_pending = 0;
3888
a2fbb9ea
ET
3889 return 0;
3890}
3891
bb2a0f7a 3892static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3893{
bb2a0f7a 3894 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3895 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3896 int i;
a2fbb9ea
ET
3897
3898 nstats->rx_packets =
3899 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3900 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3901 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3902
3903 nstats->tx_packets =
3904 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3905 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3906 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3907
de832a55 3908 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3909
0e39e645 3910 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3911
de832a55
EG
3912 nstats->rx_dropped = estats->mac_discard;
3913 for_each_queue(bp, i)
3914 nstats->rx_dropped +=
3915 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3916
a2fbb9ea
ET
3917 nstats->tx_dropped = 0;
3918
3919 nstats->multicast =
de832a55 3920 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3921
bb2a0f7a 3922 nstats->collisions =
de832a55 3923 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3924
3925 nstats->rx_length_errors =
de832a55
EG
3926 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3927 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3928 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3929 bnx2x_hilo(&estats->brb_truncate_hi);
3930 nstats->rx_crc_errors =
3931 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3932 nstats->rx_frame_errors =
3933 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3934 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3935 nstats->rx_missed_errors = estats->xxoverflow_discard;
3936
3937 nstats->rx_errors = nstats->rx_length_errors +
3938 nstats->rx_over_errors +
3939 nstats->rx_crc_errors +
3940 nstats->rx_frame_errors +
0e39e645
ET
3941 nstats->rx_fifo_errors +
3942 nstats->rx_missed_errors;
a2fbb9ea 3943
bb2a0f7a 3944 nstats->tx_aborted_errors =
de832a55
EG
3945 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3946 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3947 nstats->tx_carrier_errors =
3948 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3949 nstats->tx_fifo_errors = 0;
3950 nstats->tx_heartbeat_errors = 0;
3951 nstats->tx_window_errors = 0;
3952
3953 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3954 nstats->tx_carrier_errors +
3955 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3956}
3957
3958static void bnx2x_drv_stats_update(struct bnx2x *bp)
3959{
3960 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3961 int i;
3962
3963 estats->driver_xoff = 0;
3964 estats->rx_err_discard_pkt = 0;
3965 estats->rx_skb_alloc_failed = 0;
3966 estats->hw_csum_err = 0;
3967 for_each_queue(bp, i) {
3968 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3969
3970 estats->driver_xoff += qstats->driver_xoff;
3971 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3972 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3973 estats->hw_csum_err += qstats->hw_csum_err;
3974 }
a2fbb9ea
ET
3975}
3976
bb2a0f7a 3977static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3978{
bb2a0f7a 3979 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3980
bb2a0f7a
YG
3981 if (*stats_comp != DMAE_COMP_VAL)
3982 return;
3983
3984 if (bp->port.pmf)
de832a55 3985 bnx2x_hw_stats_update(bp);
a2fbb9ea 3986
de832a55
EG
3987 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3988 BNX2X_ERR("storm stats were not updated for 3 times\n");
3989 bnx2x_panic();
3990 return;
a2fbb9ea
ET
3991 }
3992
de832a55
EG
3993 bnx2x_net_stats_update(bp);
3994 bnx2x_drv_stats_update(bp);
3995
a2fbb9ea 3996 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3997 struct tstorm_per_client_stats *old_tclient =
3998 &bp->fp->old_tclient;
3999 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4000 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4001 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4002 int i;
a2fbb9ea
ET
4003
4004 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4005 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4006 " tx pkt (%lx)\n",
4007 bnx2x_tx_avail(bp->fp),
7a9b2557 4008 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4009 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4010 " rx pkt (%lx)\n",
7a9b2557
VZ
4011 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4012 bp->fp->rx_comp_cons),
4013 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4014 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4015 "brb truncate %u\n",
4016 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4017 qstats->driver_xoff,
4018 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4019 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4020 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4021 "mac_discard %u mac_filter_discard %u "
4022 "xxovrflow_discard %u brb_truncate_discard %u "
4023 "ttl0_discard %u\n",
4781bfad 4024 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4025 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4026 bnx2x_hilo(&qstats->no_buff_discard_hi),
4027 estats->mac_discard, estats->mac_filter_discard,
4028 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4029 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4030
4031 for_each_queue(bp, i) {
4032 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4033 bnx2x_fp(bp, i, tx_pkt),
4034 bnx2x_fp(bp, i, rx_pkt),
4035 bnx2x_fp(bp, i, rx_calls));
4036 }
4037 }
4038
bb2a0f7a
YG
4039 bnx2x_hw_stats_post(bp);
4040 bnx2x_storm_stats_post(bp);
4041}
a2fbb9ea 4042
bb2a0f7a
YG
4043static void bnx2x_port_stats_stop(struct bnx2x *bp)
4044{
4045 struct dmae_command *dmae;
4046 u32 opcode;
4047 int loader_idx = PMF_DMAE_C(bp);
4048 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4049
bb2a0f7a 4050 bp->executer_idx = 0;
a2fbb9ea 4051
bb2a0f7a
YG
4052 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4053 DMAE_CMD_C_ENABLE |
4054 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4055#ifdef __BIG_ENDIAN
bb2a0f7a 4056 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4057#else
bb2a0f7a 4058 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4059#endif
bb2a0f7a
YG
4060 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4061 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4062
4063 if (bp->port.port_stx) {
4064
4065 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066 if (bp->func_stx)
4067 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4068 else
4069 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4070 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4071 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4072 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4073 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4074 dmae->len = sizeof(struct host_port_stats) >> 2;
4075 if (bp->func_stx) {
4076 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4077 dmae->comp_addr_hi = 0;
4078 dmae->comp_val = 1;
4079 } else {
4080 dmae->comp_addr_lo =
4081 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4082 dmae->comp_addr_hi =
4083 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4084 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4085
bb2a0f7a
YG
4086 *stats_comp = 0;
4087 }
a2fbb9ea
ET
4088 }
4089
bb2a0f7a
YG
4090 if (bp->func_stx) {
4091
4092 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4093 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4094 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4095 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4096 dmae->dst_addr_lo = bp->func_stx >> 2;
4097 dmae->dst_addr_hi = 0;
4098 dmae->len = sizeof(struct host_func_stats) >> 2;
4099 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4100 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4101 dmae->comp_val = DMAE_COMP_VAL;
4102
4103 *stats_comp = 0;
a2fbb9ea 4104 }
bb2a0f7a
YG
4105}
4106
4107static void bnx2x_stats_stop(struct bnx2x *bp)
4108{
4109 int update = 0;
4110
4111 bnx2x_stats_comp(bp);
4112
4113 if (bp->port.pmf)
4114 update = (bnx2x_hw_stats_update(bp) == 0);
4115
4116 update |= (bnx2x_storm_stats_update(bp) == 0);
4117
4118 if (update) {
4119 bnx2x_net_stats_update(bp);
a2fbb9ea 4120
bb2a0f7a
YG
4121 if (bp->port.pmf)
4122 bnx2x_port_stats_stop(bp);
4123
4124 bnx2x_hw_stats_post(bp);
4125 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4126 }
4127}
4128
bb2a0f7a
YG
4129static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4130{
4131}
4132
4133static const struct {
4134 void (*action)(struct bnx2x *bp);
4135 enum bnx2x_stats_state next_state;
4136} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4137/* state event */
4138{
4139/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4140/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4141/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4142/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4143},
4144{
4145/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4146/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4147/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4148/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4149}
4150};
4151
4152static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4153{
4154 enum bnx2x_stats_state state = bp->stats_state;
4155
4156 bnx2x_stats_stm[state][event].action(bp);
4157 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4158
4159 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4160 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4161 state, event, bp->stats_state);
4162}
4163
a2fbb9ea
ET
4164static void bnx2x_timer(unsigned long data)
4165{
4166 struct bnx2x *bp = (struct bnx2x *) data;
4167
4168 if (!netif_running(bp->dev))
4169 return;
4170
4171 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4172 goto timer_restart;
a2fbb9ea
ET
4173
4174 if (poll) {
4175 struct bnx2x_fastpath *fp = &bp->fp[0];
4176 int rc;
4177
4178 bnx2x_tx_int(fp, 1000);
4179 rc = bnx2x_rx_int(fp, 1000);
4180 }
4181
34f80b04
EG
4182 if (!BP_NOMCP(bp)) {
4183 int func = BP_FUNC(bp);
a2fbb9ea
ET
4184 u32 drv_pulse;
4185 u32 mcp_pulse;
4186
4187 ++bp->fw_drv_pulse_wr_seq;
4188 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4189 /* TBD - add SYSTEM_TIME */
4190 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4191 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4192
34f80b04 4193 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4194 MCP_PULSE_SEQ_MASK);
4195 /* The delta between driver pulse and mcp response
4196 * should be 1 (before mcp response) or 0 (after mcp response)
4197 */
4198 if ((drv_pulse != mcp_pulse) &&
4199 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4200 /* someone lost a heartbeat... */
4201 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4202 drv_pulse, mcp_pulse);
4203 }
4204 }
4205
bb2a0f7a
YG
4206 if ((bp->state == BNX2X_STATE_OPEN) ||
4207 (bp->state == BNX2X_STATE_DISABLED))
4208 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4209
f1410647 4210timer_restart:
a2fbb9ea
ET
4211 mod_timer(&bp->timer, jiffies + bp->current_interval);
4212}
4213
4214/* end of Statistics */
4215
4216/* nic init */
4217
4218/*
4219 * nic init service functions
4220 */
4221
34f80b04 4222static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4223{
34f80b04
EG
4224 int port = BP_PORT(bp);
4225
4226 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4227 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4228 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4229 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4230 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4231 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4232}
4233
5c862848
EG
4234static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4235 dma_addr_t mapping, int sb_id)
34f80b04
EG
4236{
4237 int port = BP_PORT(bp);
bb2a0f7a 4238 int func = BP_FUNC(bp);
a2fbb9ea 4239 int index;
34f80b04 4240 u64 section;
a2fbb9ea
ET
4241
4242 /* USTORM */
4243 section = ((u64)mapping) + offsetof(struct host_status_block,
4244 u_status_block);
34f80b04 4245 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4246
4247 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4248 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4249 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4250 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4251 U64_HI(section));
bb2a0f7a
YG
4252 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4253 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4254
4255 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4256 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4257 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4258
4259 /* CSTORM */
4260 section = ((u64)mapping) + offsetof(struct host_status_block,
4261 c_status_block);
34f80b04 4262 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4263
4264 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4265 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4266 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4267 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4268 U64_HI(section));
7a9b2557
VZ
4269 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4270 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4271
4272 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4273 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4274 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4275
4276 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4277}
4278
4279static void bnx2x_zero_def_sb(struct bnx2x *bp)
4280{
4281 int func = BP_FUNC(bp);
a2fbb9ea 4282
34f80b04
EG
4283 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4284 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4285 sizeof(struct ustorm_def_status_block)/4);
4286 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4287 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4288 sizeof(struct cstorm_def_status_block)/4);
4289 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4290 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4291 sizeof(struct xstorm_def_status_block)/4);
4292 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4293 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4294 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4295}
4296
4297static void bnx2x_init_def_sb(struct bnx2x *bp,
4298 struct host_def_status_block *def_sb,
34f80b04 4299 dma_addr_t mapping, int sb_id)
a2fbb9ea 4300{
34f80b04
EG
4301 int port = BP_PORT(bp);
4302 int func = BP_FUNC(bp);
a2fbb9ea
ET
4303 int index, val, reg_offset;
4304 u64 section;
4305
4306 /* ATTN */
4307 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4308 atten_status_block);
34f80b04 4309 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4310
49d66772
ET
4311 bp->attn_state = 0;
4312
a2fbb9ea
ET
4313 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4314 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4315
34f80b04 4316 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4317 bp->attn_group[index].sig[0] = REG_RD(bp,
4318 reg_offset + 0x10*index);
4319 bp->attn_group[index].sig[1] = REG_RD(bp,
4320 reg_offset + 0x4 + 0x10*index);
4321 bp->attn_group[index].sig[2] = REG_RD(bp,
4322 reg_offset + 0x8 + 0x10*index);
4323 bp->attn_group[index].sig[3] = REG_RD(bp,
4324 reg_offset + 0xc + 0x10*index);
4325 }
4326
a2fbb9ea
ET
4327 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4328 HC_REG_ATTN_MSG0_ADDR_L);
4329
4330 REG_WR(bp, reg_offset, U64_LO(section));
4331 REG_WR(bp, reg_offset + 4, U64_HI(section));
4332
4333 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4334
4335 val = REG_RD(bp, reg_offset);
34f80b04 4336 val |= sb_id;
a2fbb9ea
ET
4337 REG_WR(bp, reg_offset, val);
4338
4339 /* USTORM */
4340 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4341 u_def_status_block);
34f80b04 4342 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4343
4344 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4345 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4346 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4347 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4348 U64_HI(section));
5c862848 4349 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4350 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4351
4352 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4353 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4354 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4355
4356 /* CSTORM */
4357 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4358 c_def_status_block);
34f80b04 4359 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4360
4361 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4362 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4363 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4364 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4365 U64_HI(section));
5c862848 4366 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4367 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4368
4369 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4370 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4371 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4372
4373 /* TSTORM */
4374 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4375 t_def_status_block);
34f80b04 4376 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4377
4378 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4379 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4380 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4381 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4382 U64_HI(section));
5c862848 4383 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4384 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4385
4386 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4387 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4388 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4389
4390 /* XSTORM */
4391 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4392 x_def_status_block);
34f80b04 4393 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4394
4395 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4396 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4397 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4398 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4399 U64_HI(section));
5c862848 4400 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4401 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4402
4403 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4404 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4405 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4406
bb2a0f7a 4407 bp->stats_pending = 0;
66e855f3 4408 bp->set_mac_pending = 0;
bb2a0f7a 4409
34f80b04 4410 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4411}
4412
4413static void bnx2x_update_coalesce(struct bnx2x *bp)
4414{
34f80b04 4415 int port = BP_PORT(bp);
a2fbb9ea
ET
4416 int i;
4417
4418 for_each_queue(bp, i) {
34f80b04 4419 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4420
4421 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4422 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4423 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4424 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4425 bp->rx_ticks/12);
a2fbb9ea 4426 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4427 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4428 U_SB_ETH_RX_CQ_INDEX),
4429 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4430
4431 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4432 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4433 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4434 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4435 bp->tx_ticks/12);
a2fbb9ea 4436 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4437 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4438 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4439 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4440 }
4441}
4442
7a9b2557
VZ
4443static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4444 struct bnx2x_fastpath *fp, int last)
4445{
4446 int i;
4447
4448 for (i = 0; i < last; i++) {
4449 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4450 struct sk_buff *skb = rx_buf->skb;
4451
4452 if (skb == NULL) {
4453 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4454 continue;
4455 }
4456
4457 if (fp->tpa_state[i] == BNX2X_TPA_START)
4458 pci_unmap_single(bp->pdev,
4459 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4460 bp->rx_buf_size,
7a9b2557
VZ
4461 PCI_DMA_FROMDEVICE);
4462
4463 dev_kfree_skb(skb);
4464 rx_buf->skb = NULL;
4465 }
4466}
4467
a2fbb9ea
ET
4468static void bnx2x_init_rx_rings(struct bnx2x *bp)
4469{
7a9b2557 4470 int func = BP_FUNC(bp);
32626230
EG
4471 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4472 ETH_MAX_AGGREGATION_QUEUES_E1H;
4473 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4474 int i, j;
a2fbb9ea 4475
87942b46 4476 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4477 DP(NETIF_MSG_IFUP,
4478 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4479
7a9b2557 4480 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4481
555f6c78 4482 for_each_rx_queue(bp, j) {
32626230 4483 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4484
32626230 4485 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4486 fp->tpa_pool[i].skb =
4487 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4488 if (!fp->tpa_pool[i].skb) {
4489 BNX2X_ERR("Failed to allocate TPA "
4490 "skb pool for queue[%d] - "
4491 "disabling TPA on this "
4492 "queue!\n", j);
4493 bnx2x_free_tpa_pool(bp, fp, i);
4494 fp->disable_tpa = 1;
4495 break;
4496 }
4497 pci_unmap_addr_set((struct sw_rx_bd *)
4498 &bp->fp->tpa_pool[i],
4499 mapping, 0);
4500 fp->tpa_state[i] = BNX2X_TPA_STOP;
4501 }
4502 }
4503 }
4504
555f6c78 4505 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4506 struct bnx2x_fastpath *fp = &bp->fp[j];
4507
4508 fp->rx_bd_cons = 0;
4509 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4510 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4511
4512 /* "next page" elements initialization */
4513 /* SGE ring */
4514 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4515 struct eth_rx_sge *sge;
4516
4517 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4518 sge->addr_hi =
4519 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4520 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4521 sge->addr_lo =
4522 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4523 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4524 }
4525
4526 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4527
7a9b2557 4528 /* RX BD ring */
a2fbb9ea
ET
4529 for (i = 1; i <= NUM_RX_RINGS; i++) {
4530 struct eth_rx_bd *rx_bd;
4531
4532 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4533 rx_bd->addr_hi =
4534 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4535 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4536 rx_bd->addr_lo =
4537 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4538 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4539 }
4540
34f80b04 4541 /* CQ ring */
a2fbb9ea
ET
4542 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4543 struct eth_rx_cqe_next_page *nextpg;
4544
4545 nextpg = (struct eth_rx_cqe_next_page *)
4546 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4547 nextpg->addr_hi =
4548 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4549 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4550 nextpg->addr_lo =
4551 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4552 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4553 }
4554
7a9b2557
VZ
4555 /* Allocate SGEs and initialize the ring elements */
4556 for (i = 0, ring_prod = 0;
4557 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4558
7a9b2557
VZ
4559 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4560 BNX2X_ERR("was only able to allocate "
4561 "%d rx sges\n", i);
4562 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4563 /* Cleanup already allocated elements */
4564 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4565 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4566 fp->disable_tpa = 1;
4567 ring_prod = 0;
4568 break;
4569 }
4570 ring_prod = NEXT_SGE_IDX(ring_prod);
4571 }
4572 fp->rx_sge_prod = ring_prod;
4573
4574 /* Allocate BDs and initialize BD ring */
66e855f3 4575 fp->rx_comp_cons = 0;
7a9b2557 4576 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4577 for (i = 0; i < bp->rx_ring_size; i++) {
4578 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4579 BNX2X_ERR("was only able to allocate "
de832a55
EG
4580 "%d rx skbs on queue[%d]\n", i, j);
4581 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4582 break;
4583 }
4584 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4585 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4586 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4587 }
4588
7a9b2557
VZ
4589 fp->rx_bd_prod = ring_prod;
4590 /* must not have more available CQEs than BDs */
4591 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4592 cqe_ring_prod);
a2fbb9ea
ET
4593 fp->rx_pkt = fp->rx_calls = 0;
4594
7a9b2557
VZ
4595 /* Warning!
4596 * this will generate an interrupt (to the TSTORM)
4597 * must only be done after chip is initialized
4598 */
4599 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4600 fp->rx_sge_prod);
a2fbb9ea
ET
4601 if (j != 0)
4602 continue;
4603
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4605 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4606 U64_LO(fp->rx_comp_mapping));
4607 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4608 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4609 U64_HI(fp->rx_comp_mapping));
4610 }
4611}
4612
4613static void bnx2x_init_tx_ring(struct bnx2x *bp)
4614{
4615 int i, j;
4616
555f6c78 4617 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4618 struct bnx2x_fastpath *fp = &bp->fp[j];
4619
4620 for (i = 1; i <= NUM_TX_RINGS; i++) {
4621 struct eth_tx_bd *tx_bd =
4622 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4623
4624 tx_bd->addr_hi =
4625 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4626 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4627 tx_bd->addr_lo =
4628 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4629 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4630 }
4631
4632 fp->tx_pkt_prod = 0;
4633 fp->tx_pkt_cons = 0;
4634 fp->tx_bd_prod = 0;
4635 fp->tx_bd_cons = 0;
4636 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4637 fp->tx_pkt = 0;
4638 }
4639}
4640
4641static void bnx2x_init_sp_ring(struct bnx2x *bp)
4642{
34f80b04 4643 int func = BP_FUNC(bp);
a2fbb9ea
ET
4644
4645 spin_lock_init(&bp->spq_lock);
4646
4647 bp->spq_left = MAX_SPQ_PENDING;
4648 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4649 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4650 bp->spq_prod_bd = bp->spq;
4651 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4652
34f80b04 4653 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4654 U64_LO(bp->spq_mapping));
34f80b04
EG
4655 REG_WR(bp,
4656 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4657 U64_HI(bp->spq_mapping));
4658
34f80b04 4659 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4660 bp->spq_prod_idx);
4661}
4662
4663static void bnx2x_init_context(struct bnx2x *bp)
4664{
4665 int i;
4666
4667 for_each_queue(bp, i) {
4668 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4669 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4670 u8 cl_id = fp->cl_id;
0626b899 4671 u8 sb_id = fp->sb_id;
a2fbb9ea 4672
34f80b04
EG
4673 context->ustorm_st_context.common.sb_index_numbers =
4674 BNX2X_RX_SB_INDEX_NUM;
0626b899 4675 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4676 context->ustorm_st_context.common.status_block_id = sb_id;
4677 context->ustorm_st_context.common.flags =
de832a55
EG
4678 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4679 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4680 context->ustorm_st_context.common.statistics_counter_id =
4681 cl_id;
8d9c5f34 4682 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4683 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4684 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4685 bp->rx_buf_size;
34f80b04 4686 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4687 U64_HI(fp->rx_desc_mapping);
34f80b04 4688 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4689 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4690 if (!fp->disable_tpa) {
4691 context->ustorm_st_context.common.flags |=
4692 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4693 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4694 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4695 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4696 (u32)0xffff);
7a9b2557
VZ
4697 context->ustorm_st_context.common.sge_page_base_hi =
4698 U64_HI(fp->rx_sge_mapping);
4699 context->ustorm_st_context.common.sge_page_base_lo =
4700 U64_LO(fp->rx_sge_mapping);
4701 }
4702
8d9c5f34
EG
4703 context->ustorm_ag_context.cdu_usage =
4704 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4705 CDU_REGION_NUMBER_UCM_AG,
4706 ETH_CONNECTION_TYPE);
4707
4708 context->xstorm_st_context.tx_bd_page_base_hi =
4709 U64_HI(fp->tx_desc_mapping);
4710 context->xstorm_st_context.tx_bd_page_base_lo =
4711 U64_LO(fp->tx_desc_mapping);
4712 context->xstorm_st_context.db_data_addr_hi =
4713 U64_HI(fp->tx_prods_mapping);
4714 context->xstorm_st_context.db_data_addr_lo =
4715 U64_LO(fp->tx_prods_mapping);
0626b899 4716 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4717 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4718 context->cstorm_st_context.sb_index_number =
5c862848 4719 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4720 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4721
4722 context->xstorm_ag_context.cdu_reserved =
4723 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4724 CDU_REGION_NUMBER_XCM_AG,
4725 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4726 }
4727}
4728
4729static void bnx2x_init_ind_table(struct bnx2x *bp)
4730{
26c8fa4d 4731 int func = BP_FUNC(bp);
a2fbb9ea
ET
4732 int i;
4733
555f6c78 4734 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4735 return;
4736
555f6c78
EG
4737 DP(NETIF_MSG_IFUP,
4738 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4739 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4740 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4741 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4742 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4743}
4744
49d66772
ET
4745static void bnx2x_set_client_config(struct bnx2x *bp)
4746{
49d66772 4747 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4748 int port = BP_PORT(bp);
4749 int i;
49d66772 4750
e7799c5f 4751 tstorm_client.mtu = bp->dev->mtu;
49d66772 4752 tstorm_client.config_flags =
de832a55
EG
4753 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4754 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4755#ifdef BCM_VLAN
0c6671b0 4756 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4757 tstorm_client.config_flags |=
8d9c5f34 4758 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4759 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4760 }
4761#endif
49d66772 4762
7a9b2557
VZ
4763 if (bp->flags & TPA_ENABLE_FLAG) {
4764 tstorm_client.max_sges_for_packet =
4f40f2cb 4765 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4766 tstorm_client.max_sges_for_packet =
4767 ((tstorm_client.max_sges_for_packet +
4768 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4769 PAGES_PER_SGE_SHIFT;
4770
4771 tstorm_client.config_flags |=
4772 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4773 }
4774
49d66772 4775 for_each_queue(bp, i) {
de832a55
EG
4776 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4777
49d66772 4778 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4779 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4780 ((u32 *)&tstorm_client)[0]);
4781 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4782 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4783 ((u32 *)&tstorm_client)[1]);
4784 }
4785
34f80b04
EG
4786 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4787 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4788}
4789
a2fbb9ea
ET
4790static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4791{
a2fbb9ea 4792 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4793 int mode = bp->rx_mode;
4794 int mask = (1 << BP_L_ID(bp));
4795 int func = BP_FUNC(bp);
a2fbb9ea
ET
4796 int i;
4797
3196a88a 4798 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4799
4800 switch (mode) {
4801 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4802 tstorm_mac_filter.ucast_drop_all = mask;
4803 tstorm_mac_filter.mcast_drop_all = mask;
4804 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4805 break;
4806 case BNX2X_RX_MODE_NORMAL:
34f80b04 4807 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4808 break;
4809 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4810 tstorm_mac_filter.mcast_accept_all = mask;
4811 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4812 break;
4813 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4814 tstorm_mac_filter.ucast_accept_all = mask;
4815 tstorm_mac_filter.mcast_accept_all = mask;
4816 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4817 break;
4818 default:
34f80b04
EG
4819 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4820 break;
a2fbb9ea
ET
4821 }
4822
4823 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4824 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4825 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4826 ((u32 *)&tstorm_mac_filter)[i]);
4827
34f80b04 4828/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4829 ((u32 *)&tstorm_mac_filter)[i]); */
4830 }
a2fbb9ea 4831
49d66772
ET
4832 if (mode != BNX2X_RX_MODE_NONE)
4833 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4834}
4835
471de716
EG
4836static void bnx2x_init_internal_common(struct bnx2x *bp)
4837{
4838 int i;
4839
3cdf1db7
YG
4840 if (bp->flags & TPA_ENABLE_FLAG) {
4841 struct tstorm_eth_tpa_exist tpa = {0};
4842
4843 tpa.tpa_exist = 1;
4844
4845 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4846 ((u32 *)&tpa)[0]);
4847 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4848 ((u32 *)&tpa)[1]);
4849 }
4850
471de716
EG
4851 /* Zero this manually as its initialization is
4852 currently missing in the initTool */
4853 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4854 REG_WR(bp, BAR_USTRORM_INTMEM +
4855 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4856}
4857
4858static void bnx2x_init_internal_port(struct bnx2x *bp)
4859{
4860 int port = BP_PORT(bp);
4861
4862 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4863 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4864 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4865 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4866}
4867
8a1c38d1
EG
4868/* Calculates the sum of vn_min_rates.
4869 It's needed for further normalizing of the min_rates.
4870 Returns:
4871 sum of vn_min_rates.
4872 or
4873 0 - if all the min_rates are 0.
4874 In the later case fainess algorithm should be deactivated.
4875 If not all min_rates are zero then those that are zeroes will be set to 1.
4876 */
4877static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4878{
4879 int all_zero = 1;
4880 int port = BP_PORT(bp);
4881 int vn;
4882
4883 bp->vn_weight_sum = 0;
4884 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4885 int func = 2*vn + port;
4886 u32 vn_cfg =
4887 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4888 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4889 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4890
4891 /* Skip hidden vns */
4892 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4893 continue;
4894
4895 /* If min rate is zero - set it to 1 */
4896 if (!vn_min_rate)
4897 vn_min_rate = DEF_MIN_RATE;
4898 else
4899 all_zero = 0;
4900
4901 bp->vn_weight_sum += vn_min_rate;
4902 }
4903
4904 /* ... only if all min rates are zeros - disable fairness */
4905 if (all_zero)
4906 bp->vn_weight_sum = 0;
4907}
4908
471de716 4909static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4910{
a2fbb9ea
ET
4911 struct tstorm_eth_function_common_config tstorm_config = {0};
4912 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4913 int port = BP_PORT(bp);
4914 int func = BP_FUNC(bp);
de832a55
EG
4915 int i, j;
4916 u32 offset;
471de716 4917 u16 max_agg_size;
a2fbb9ea
ET
4918
4919 if (is_multi(bp)) {
555f6c78 4920 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4921 tstorm_config.rss_result_mask = MULTI_MASK;
4922 }
8d9c5f34
EG
4923 if (IS_E1HMF(bp))
4924 tstorm_config.config_flags |=
4925 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4926
34f80b04
EG
4927 tstorm_config.leading_client_id = BP_L_ID(bp);
4928
a2fbb9ea 4929 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4930 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4931 (*(u32 *)&tstorm_config));
4932
c14423fe 4933 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4934 bnx2x_set_storm_rx_mode(bp);
4935
de832a55
EG
4936 for_each_queue(bp, i) {
4937 u8 cl_id = bp->fp[i].cl_id;
4938
4939 /* reset xstorm per client statistics */
4940 offset = BAR_XSTRORM_INTMEM +
4941 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4942 for (j = 0;
4943 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4944 REG_WR(bp, offset + j*4, 0);
4945
4946 /* reset tstorm per client statistics */
4947 offset = BAR_TSTRORM_INTMEM +
4948 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4949 for (j = 0;
4950 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4951 REG_WR(bp, offset + j*4, 0);
4952
4953 /* reset ustorm per client statistics */
4954 offset = BAR_USTRORM_INTMEM +
4955 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4956 for (j = 0;
4957 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4958 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4959 }
4960
4961 /* Init statistics related context */
34f80b04 4962 stats_flags.collect_eth = 1;
a2fbb9ea 4963
66e855f3 4964 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4965 ((u32 *)&stats_flags)[0]);
66e855f3 4966 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4967 ((u32 *)&stats_flags)[1]);
4968
66e855f3 4969 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4970 ((u32 *)&stats_flags)[0]);
66e855f3 4971 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4972 ((u32 *)&stats_flags)[1]);
4973
de832a55
EG
4974 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4975 ((u32 *)&stats_flags)[0]);
4976 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4977 ((u32 *)&stats_flags)[1]);
4978
66e855f3 4979 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4980 ((u32 *)&stats_flags)[0]);
66e855f3 4981 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4982 ((u32 *)&stats_flags)[1]);
4983
66e855f3
YG
4984 REG_WR(bp, BAR_XSTRORM_INTMEM +
4985 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4986 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4987 REG_WR(bp, BAR_XSTRORM_INTMEM +
4988 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4989 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4990
4991 REG_WR(bp, BAR_TSTRORM_INTMEM +
4992 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4993 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4994 REG_WR(bp, BAR_TSTRORM_INTMEM +
4995 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4996 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4997
de832a55
EG
4998 REG_WR(bp, BAR_USTRORM_INTMEM +
4999 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5000 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5001 REG_WR(bp, BAR_USTRORM_INTMEM +
5002 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5003 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5004
34f80b04
EG
5005 if (CHIP_IS_E1H(bp)) {
5006 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5007 IS_E1HMF(bp));
5008 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5009 IS_E1HMF(bp));
5010 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5011 IS_E1HMF(bp));
5012 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5013 IS_E1HMF(bp));
5014
7a9b2557
VZ
5015 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5016 bp->e1hov);
34f80b04
EG
5017 }
5018
4f40f2cb
EG
5019 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5020 max_agg_size =
5021 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5022 SGE_PAGE_SIZE * PAGES_PER_SGE),
5023 (u32)0xffff);
555f6c78 5024 for_each_rx_queue(bp, i) {
7a9b2557 5025 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5026
5027 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5028 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5029 U64_LO(fp->rx_comp_mapping));
5030 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5031 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5032 U64_HI(fp->rx_comp_mapping));
5033
7a9b2557 5034 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5035 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5036 max_agg_size);
5037 }
8a1c38d1 5038
1c06328c
EG
5039 /* dropless flow control */
5040 if (CHIP_IS_E1H(bp)) {
5041 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5042
5043 rx_pause.bd_thr_low = 250;
5044 rx_pause.cqe_thr_low = 250;
5045 rx_pause.cos = 1;
5046 rx_pause.sge_thr_low = 0;
5047 rx_pause.bd_thr_high = 350;
5048 rx_pause.cqe_thr_high = 350;
5049 rx_pause.sge_thr_high = 0;
5050
5051 for_each_rx_queue(bp, i) {
5052 struct bnx2x_fastpath *fp = &bp->fp[i];
5053
5054 if (!fp->disable_tpa) {
5055 rx_pause.sge_thr_low = 150;
5056 rx_pause.sge_thr_high = 250;
5057 }
5058
5059
5060 offset = BAR_USTRORM_INTMEM +
5061 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5062 fp->cl_id);
5063 for (j = 0;
5064 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5065 j++)
5066 REG_WR(bp, offset + j*4,
5067 ((u32 *)&rx_pause)[j]);
5068 }
5069 }
5070
8a1c38d1
EG
5071 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5072
5073 /* Init rate shaping and fairness contexts */
5074 if (IS_E1HMF(bp)) {
5075 int vn;
5076
5077 /* During init there is no active link
5078 Until link is up, set link rate to 10Gbps */
5079 bp->link_vars.line_speed = SPEED_10000;
5080 bnx2x_init_port_minmax(bp);
5081
5082 bnx2x_calc_vn_weight_sum(bp);
5083
5084 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5085 bnx2x_init_vn_minmax(bp, 2*vn + port);
5086
5087 /* Enable rate shaping and fairness */
5088 bp->cmng.flags.cmng_enables =
5089 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5090 if (bp->vn_weight_sum)
5091 bp->cmng.flags.cmng_enables |=
5092 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5093 else
5094 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5095 " fairness will be disabled\n");
5096 } else {
5097 /* rate shaping and fairness are disabled */
5098 DP(NETIF_MSG_IFUP,
5099 "single function mode minmax will be disabled\n");
5100 }
5101
5102
5103 /* Store it to internal memory */
5104 if (bp->port.pmf)
5105 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5106 REG_WR(bp, BAR_XSTRORM_INTMEM +
5107 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5108 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5109}
5110
471de716
EG
5111static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5112{
5113 switch (load_code) {
5114 case FW_MSG_CODE_DRV_LOAD_COMMON:
5115 bnx2x_init_internal_common(bp);
5116 /* no break */
5117
5118 case FW_MSG_CODE_DRV_LOAD_PORT:
5119 bnx2x_init_internal_port(bp);
5120 /* no break */
5121
5122 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5123 bnx2x_init_internal_func(bp);
5124 break;
5125
5126 default:
5127 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5128 break;
5129 }
5130}
5131
5132static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5133{
5134 int i;
5135
5136 for_each_queue(bp, i) {
5137 struct bnx2x_fastpath *fp = &bp->fp[i];
5138
34f80b04 5139 fp->bp = bp;
a2fbb9ea 5140 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5141 fp->index = i;
34f80b04
EG
5142 fp->cl_id = BP_L_ID(bp) + i;
5143 fp->sb_id = fp->cl_id;
5144 DP(NETIF_MSG_IFUP,
5145 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
0626b899 5146 bp, fp->status_blk, i, fp->cl_id, fp->sb_id);
5c862848 5147 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5148 fp->sb_id);
5c862848 5149 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5150 }
5151
5c862848
EG
5152 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5153 DEF_SB_ID);
5154 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5155 bnx2x_update_coalesce(bp);
5156 bnx2x_init_rx_rings(bp);
5157 bnx2x_init_tx_ring(bp);
5158 bnx2x_init_sp_ring(bp);
5159 bnx2x_init_context(bp);
471de716 5160 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5161 bnx2x_init_ind_table(bp);
0ef00459
EG
5162 bnx2x_stats_init(bp);
5163
5164 /* At this point, we are ready for interrupts */
5165 atomic_set(&bp->intr_sem, 0);
5166
5167 /* flush all before enabling interrupts */
5168 mb();
5169 mmiowb();
5170
615f8fd9 5171 bnx2x_int_enable(bp);
a2fbb9ea
ET
5172}
5173
5174/* end of nic init */
5175
5176/*
5177 * gzip service functions
5178 */
5179
5180static int bnx2x_gunzip_init(struct bnx2x *bp)
5181{
5182 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5183 &bp->gunzip_mapping);
5184 if (bp->gunzip_buf == NULL)
5185 goto gunzip_nomem1;
5186
5187 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5188 if (bp->strm == NULL)
5189 goto gunzip_nomem2;
5190
5191 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5192 GFP_KERNEL);
5193 if (bp->strm->workspace == NULL)
5194 goto gunzip_nomem3;
5195
5196 return 0;
5197
5198gunzip_nomem3:
5199 kfree(bp->strm);
5200 bp->strm = NULL;
5201
5202gunzip_nomem2:
5203 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5204 bp->gunzip_mapping);
5205 bp->gunzip_buf = NULL;
5206
5207gunzip_nomem1:
5208 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5209 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5210 return -ENOMEM;
5211}
5212
5213static void bnx2x_gunzip_end(struct bnx2x *bp)
5214{
5215 kfree(bp->strm->workspace);
5216
5217 kfree(bp->strm);
5218 bp->strm = NULL;
5219
5220 if (bp->gunzip_buf) {
5221 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5222 bp->gunzip_mapping);
5223 bp->gunzip_buf = NULL;
5224 }
5225}
5226
5227static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5228{
5229 int n, rc;
5230
5231 /* check gzip header */
5232 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5233 return -EINVAL;
5234
5235 n = 10;
5236
34f80b04 5237#define FNAME 0x8
a2fbb9ea
ET
5238
5239 if (zbuf[3] & FNAME)
5240 while ((zbuf[n++] != 0) && (n < len));
5241
5242 bp->strm->next_in = zbuf + n;
5243 bp->strm->avail_in = len - n;
5244 bp->strm->next_out = bp->gunzip_buf;
5245 bp->strm->avail_out = FW_BUF_SIZE;
5246
5247 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5248 if (rc != Z_OK)
5249 return rc;
5250
5251 rc = zlib_inflate(bp->strm, Z_FINISH);
5252 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5253 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5254 bp->dev->name, bp->strm->msg);
5255
5256 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5257 if (bp->gunzip_outlen & 0x3)
5258 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5259 " gunzip_outlen (%d) not aligned\n",
5260 bp->dev->name, bp->gunzip_outlen);
5261 bp->gunzip_outlen >>= 2;
5262
5263 zlib_inflateEnd(bp->strm);
5264
5265 if (rc == Z_STREAM_END)
5266 return 0;
5267
5268 return rc;
5269}
5270
5271/* nic load/unload */
5272
5273/*
34f80b04 5274 * General service functions
a2fbb9ea
ET
5275 */
5276
5277/* send a NIG loopback debug packet */
5278static void bnx2x_lb_pckt(struct bnx2x *bp)
5279{
a2fbb9ea 5280 u32 wb_write[3];
a2fbb9ea
ET
5281
5282 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5283 wb_write[0] = 0x55555555;
5284 wb_write[1] = 0x55555555;
34f80b04 5285 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5286 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5287
5288 /* NON-IP protocol */
a2fbb9ea
ET
5289 wb_write[0] = 0x09000000;
5290 wb_write[1] = 0x55555555;
34f80b04 5291 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5292 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5293}
5294
5295/* some of the internal memories
5296 * are not directly readable from the driver
5297 * to test them we send debug packets
5298 */
5299static int bnx2x_int_mem_test(struct bnx2x *bp)
5300{
5301 int factor;
5302 int count, i;
5303 u32 val = 0;
5304
ad8d3948 5305 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5306 factor = 120;
ad8d3948
EG
5307 else if (CHIP_REV_IS_EMUL(bp))
5308 factor = 200;
5309 else
a2fbb9ea 5310 factor = 1;
a2fbb9ea
ET
5311
5312 DP(NETIF_MSG_HW, "start part1\n");
5313
5314 /* Disable inputs of parser neighbor blocks */
5315 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5316 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5317 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5318 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5319
5320 /* Write 0 to parser credits for CFC search request */
5321 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5322
5323 /* send Ethernet packet */
5324 bnx2x_lb_pckt(bp);
5325
5326 /* TODO do i reset NIG statistic? */
5327 /* Wait until NIG register shows 1 packet of size 0x10 */
5328 count = 1000 * factor;
5329 while (count) {
34f80b04 5330
a2fbb9ea
ET
5331 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5332 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5333 if (val == 0x10)
5334 break;
5335
5336 msleep(10);
5337 count--;
5338 }
5339 if (val != 0x10) {
5340 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5341 return -1;
5342 }
5343
5344 /* Wait until PRS register shows 1 packet */
5345 count = 1000 * factor;
5346 while (count) {
5347 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5348 if (val == 1)
5349 break;
5350
5351 msleep(10);
5352 count--;
5353 }
5354 if (val != 0x1) {
5355 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5356 return -2;
5357 }
5358
5359 /* Reset and init BRB, PRS */
34f80b04 5360 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5361 msleep(50);
34f80b04 5362 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5363 msleep(50);
5364 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5365 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5366
5367 DP(NETIF_MSG_HW, "part2\n");
5368
5369 /* Disable inputs of parser neighbor blocks */
5370 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5371 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5372 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5373 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5374
5375 /* Write 0 to parser credits for CFC search request */
5376 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5377
5378 /* send 10 Ethernet packets */
5379 for (i = 0; i < 10; i++)
5380 bnx2x_lb_pckt(bp);
5381
5382 /* Wait until NIG register shows 10 + 1
5383 packets of size 11*0x10 = 0xb0 */
5384 count = 1000 * factor;
5385 while (count) {
34f80b04 5386
a2fbb9ea
ET
5387 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5388 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5389 if (val == 0xb0)
5390 break;
5391
5392 msleep(10);
5393 count--;
5394 }
5395 if (val != 0xb0) {
5396 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5397 return -3;
5398 }
5399
5400 /* Wait until PRS register shows 2 packets */
5401 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5402 if (val != 2)
5403 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5404
5405 /* Write 1 to parser credits for CFC search request */
5406 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5407
5408 /* Wait until PRS register shows 3 packets */
5409 msleep(10 * factor);
5410 /* Wait until NIG register shows 1 packet of size 0x10 */
5411 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5412 if (val != 3)
5413 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5414
5415 /* clear NIG EOP FIFO */
5416 for (i = 0; i < 11; i++)
5417 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5418 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5419 if (val != 1) {
5420 BNX2X_ERR("clear of NIG failed\n");
5421 return -4;
5422 }
5423
5424 /* Reset and init BRB, PRS, NIG */
5425 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5426 msleep(50);
5427 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5428 msleep(50);
5429 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5430 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5431#ifndef BCM_ISCSI
5432 /* set NIC mode */
5433 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5434#endif
5435
5436 /* Enable inputs of parser neighbor blocks */
5437 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5438 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5439 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5440 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5441
5442 DP(NETIF_MSG_HW, "done\n");
5443
5444 return 0; /* OK */
5445}
5446
5447static void enable_blocks_attention(struct bnx2x *bp)
5448{
5449 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5450 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5451 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5452 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5453 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5454 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5455 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5456 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5457 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5458/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5459/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5460 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5461 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5462 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5463/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5464/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5465 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5466 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5467 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5468 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5469/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5470/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5471 if (CHIP_REV_IS_FPGA(bp))
5472 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5473 else
5474 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5475 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5476 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5477 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5478/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5479/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5480 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5481 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5482/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5483 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5484}
5485
34f80b04 5486
81f75bbf
EG
5487static void bnx2x_reset_common(struct bnx2x *bp)
5488{
5489 /* reset_common */
5490 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5491 0xd3ffff7f);
5492 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5493}
5494
34f80b04 5495static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5496{
a2fbb9ea 5497 u32 val, i;
a2fbb9ea 5498
34f80b04 5499 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5500
81f75bbf 5501 bnx2x_reset_common(bp);
34f80b04
EG
5502 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5503 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5504
34f80b04
EG
5505 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5506 if (CHIP_IS_E1H(bp))
5507 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5508
34f80b04
EG
5509 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5510 msleep(30);
5511 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5512
34f80b04
EG
5513 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5514 if (CHIP_IS_E1(bp)) {
5515 /* enable HW interrupt from PXP on USDM overflow
5516 bit 16 on INT_MASK_0 */
5517 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5518 }
a2fbb9ea 5519
34f80b04
EG
5520 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5521 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5522
5523#ifdef __BIG_ENDIAN
34f80b04
EG
5524 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5525 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5526 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5527 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5528 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5529 /* make sure this value is 0 */
5530 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5531
5532/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5533 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5534 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5535 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5536 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5537#endif
5538
34f80b04 5539 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5540#ifdef BCM_ISCSI
34f80b04
EG
5541 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5542 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5543 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5544#endif
5545
34f80b04
EG
5546 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5547 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5548
34f80b04
EG
5549 /* let the HW do it's magic ... */
5550 msleep(100);
5551 /* finish PXP init */
5552 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5553 if (val != 1) {
5554 BNX2X_ERR("PXP2 CFG failed\n");
5555 return -EBUSY;
5556 }
5557 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5558 if (val != 1) {
5559 BNX2X_ERR("PXP2 RD_INIT failed\n");
5560 return -EBUSY;
5561 }
a2fbb9ea 5562
34f80b04
EG
5563 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5564 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5565
34f80b04 5566 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5567
34f80b04
EG
5568 /* clean the DMAE memory */
5569 bp->dmae_ready = 1;
5570 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5571
34f80b04
EG
5572 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5573 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5574 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5575 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5576
34f80b04
EG
5577 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5578 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5579 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5580 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5581
5582 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5583 /* soft reset pulse */
5584 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5585 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5586
5587#ifdef BCM_ISCSI
34f80b04 5588 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5589#endif
a2fbb9ea 5590
34f80b04
EG
5591 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5592 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5593 if (!CHIP_REV_IS_SLOW(bp)) {
5594 /* enable hw interrupt from doorbell Q */
5595 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5596 }
a2fbb9ea 5597
34f80b04 5598 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5599 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5600 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5601 /* set NIC mode */
5602 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5603 if (CHIP_IS_E1H(bp))
5604 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5605
34f80b04
EG
5606 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5607 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5608 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5609 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5610
34f80b04
EG
5611 if (CHIP_IS_E1H(bp)) {
5612 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5613 STORM_INTMEM_SIZE_E1H/2);
5614 bnx2x_init_fill(bp,
5615 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5616 0, STORM_INTMEM_SIZE_E1H/2);
5617 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5618 STORM_INTMEM_SIZE_E1H/2);
5619 bnx2x_init_fill(bp,
5620 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5621 0, STORM_INTMEM_SIZE_E1H/2);
5622 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5623 STORM_INTMEM_SIZE_E1H/2);
5624 bnx2x_init_fill(bp,
5625 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5626 0, STORM_INTMEM_SIZE_E1H/2);
5627 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5628 STORM_INTMEM_SIZE_E1H/2);
5629 bnx2x_init_fill(bp,
5630 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5631 0, STORM_INTMEM_SIZE_E1H/2);
5632 } else { /* E1 */
ad8d3948
EG
5633 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5634 STORM_INTMEM_SIZE_E1);
5635 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5636 STORM_INTMEM_SIZE_E1);
5637 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5638 STORM_INTMEM_SIZE_E1);
5639 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5640 STORM_INTMEM_SIZE_E1);
34f80b04 5641 }
a2fbb9ea 5642
34f80b04
EG
5643 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5644 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5645 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5646 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5647
34f80b04
EG
5648 /* sync semi rtc */
5649 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5650 0x80000000);
5651 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5652 0x80000000);
a2fbb9ea 5653
34f80b04
EG
5654 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5655 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5656 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5657
34f80b04
EG
5658 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5659 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5660 REG_WR(bp, i, 0xc0cac01a);
5661 /* TODO: replace with something meaningful */
5662 }
8d9c5f34 5663 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5664 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5665
34f80b04
EG
5666 if (sizeof(union cdu_context) != 1024)
5667 /* we currently assume that a context is 1024 bytes */
5668 printk(KERN_ALERT PFX "please adjust the size of"
5669 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5670
34f80b04
EG
5671 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5672 val = (4 << 24) + (0 << 12) + 1024;
5673 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5674 if (CHIP_IS_E1(bp)) {
5675 /* !!! fix pxp client crdit until excel update */
5676 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5677 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5678 }
a2fbb9ea 5679
34f80b04
EG
5680 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5681 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5682 /* enable context validation interrupt from CFC */
5683 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5684
5685 /* set the thresholds to prevent CFC/CDU race */
5686 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5687
34f80b04
EG
5688 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5689 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5690
34f80b04
EG
5691 /* PXPCS COMMON comes here */
5692 /* Reset PCIE errors for debug */
5693 REG_WR(bp, 0x2814, 0xffffffff);
5694 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5695
34f80b04
EG
5696 /* EMAC0 COMMON comes here */
5697 /* EMAC1 COMMON comes here */
5698 /* DBU COMMON comes here */
5699 /* DBG COMMON comes here */
5700
5701 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5702 if (CHIP_IS_E1H(bp)) {
5703 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5704 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5705 }
5706
5707 if (CHIP_REV_IS_SLOW(bp))
5708 msleep(200);
5709
5710 /* finish CFC init */
5711 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5712 if (val != 1) {
5713 BNX2X_ERR("CFC LL_INIT failed\n");
5714 return -EBUSY;
5715 }
5716 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5717 if (val != 1) {
5718 BNX2X_ERR("CFC AC_INIT failed\n");
5719 return -EBUSY;
5720 }
5721 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5722 if (val != 1) {
5723 BNX2X_ERR("CFC CAM_INIT failed\n");
5724 return -EBUSY;
5725 }
5726 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5727
34f80b04
EG
5728 /* read NIG statistic
5729 to see if this is our first up since powerup */
5730 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5731 val = *bnx2x_sp(bp, wb_data[0]);
5732
5733 /* do internal memory self test */
5734 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5735 BNX2X_ERR("internal mem self test failed\n");
5736 return -EBUSY;
5737 }
5738
35b19ba5 5739 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5741 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5743 bp->port.need_hw_lock = 1;
5744 break;
5745
35b19ba5 5746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5747 /* Fan failure is indicated by SPIO 5 */
5748 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5749 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5750
5751 /* set to active low mode */
5752 val = REG_RD(bp, MISC_REG_SPIO_INT);
5753 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5754 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5755 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5756
34f80b04
EG
5757 /* enable interrupt to signal the IGU */
5758 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5759 val |= (1 << MISC_REGISTERS_SPIO_5);
5760 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5761 break;
f1410647 5762
34f80b04
EG
5763 default:
5764 break;
5765 }
f1410647 5766
34f80b04
EG
5767 /* clear PXP2 attentions */
5768 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5769
34f80b04 5770 enable_blocks_attention(bp);
a2fbb9ea 5771
6bbca910
YR
5772 if (!BP_NOMCP(bp)) {
5773 bnx2x_acquire_phy_lock(bp);
5774 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5775 bnx2x_release_phy_lock(bp);
5776 } else
5777 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5778
34f80b04
EG
5779 return 0;
5780}
a2fbb9ea 5781
34f80b04
EG
5782static int bnx2x_init_port(struct bnx2x *bp)
5783{
5784 int port = BP_PORT(bp);
1c06328c 5785 u32 low, high;
34f80b04 5786 u32 val;
a2fbb9ea 5787
34f80b04
EG
5788 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5789
5790 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5791
5792 /* Port PXP comes here */
5793 /* Port PXP2 comes here */
a2fbb9ea
ET
5794#ifdef BCM_ISCSI
5795 /* Port0 1
5796 * Port1 385 */
5797 i++;
5798 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5799 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5800 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5801 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5802
5803 /* Port0 2
5804 * Port1 386 */
5805 i++;
5806 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5807 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5808 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5809 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5810
5811 /* Port0 3
5812 * Port1 387 */
5813 i++;
5814 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5815 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5816 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5817 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5818#endif
34f80b04 5819 /* Port CMs come here */
8d9c5f34
EG
5820 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5821 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5822
5823 /* Port QM comes here */
a2fbb9ea
ET
5824#ifdef BCM_ISCSI
5825 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5826 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5827
5828 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5829 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5830#endif
5831 /* Port DQ comes here */
1c06328c
EG
5832
5833 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5834 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5835 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5836 /* no pause for emulation and FPGA */
5837 low = 0;
5838 high = 513;
5839 } else {
5840 if (IS_E1HMF(bp))
5841 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5842 else if (bp->dev->mtu > 4096) {
5843 if (bp->flags & ONE_PORT_FLAG)
5844 low = 160;
5845 else {
5846 val = bp->dev->mtu;
5847 /* (24*1024 + val*4)/256 */
5848 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5849 }
5850 } else
5851 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5852 high = low + 56; /* 14*1024/256 */
5853 }
5854 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5855 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5856
5857
ad8d3948 5858 /* Port PRS comes here */
a2fbb9ea
ET
5859 /* Port TSDM comes here */
5860 /* Port CSDM comes here */
5861 /* Port USDM comes here */
5862 /* Port XSDM comes here */
34f80b04
EG
5863 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5864 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5865 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5866 port ? USEM_PORT1_END : USEM_PORT0_END);
5867 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5868 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5869 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5870 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5871 /* Port UPB comes here */
34f80b04
EG
5872 /* Port XPB comes here */
5873
5874 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5875 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5876
5877 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5878 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5879
5880 /* update threshold */
34f80b04 5881 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5882 /* update init credit */
34f80b04 5883 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5884
5885 /* probe changes */
34f80b04 5886 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5887 msleep(5);
34f80b04 5888 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5889
5890#ifdef BCM_ISCSI
5891 /* tell the searcher where the T2 table is */
5892 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5893
5894 wb_write[0] = U64_LO(bp->t2_mapping);
5895 wb_write[1] = U64_HI(bp->t2_mapping);
5896 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5897 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5898 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5899 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5900
5901 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5902 /* Port SRCH comes here */
5903#endif
5904 /* Port CDU comes here */
5905 /* Port CFC comes here */
34f80b04
EG
5906
5907 if (CHIP_IS_E1(bp)) {
5908 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5909 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5910 }
5911 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5912 port ? HC_PORT1_END : HC_PORT0_END);
5913
5914 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5915 MISC_AEU_PORT0_START,
34f80b04
EG
5916 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5917 /* init aeu_mask_attn_func_0/1:
5918 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5919 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5920 * bits 4-7 are used for "per vn group attention" */
5921 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5922 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5923
a2fbb9ea
ET
5924 /* Port PXPCS comes here */
5925 /* Port EMAC0 comes here */
5926 /* Port EMAC1 comes here */
5927 /* Port DBU comes here */
5928 /* Port DBG comes here */
34f80b04
EG
5929 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5930 port ? NIG_PORT1_END : NIG_PORT0_END);
5931
5932 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5933
5934 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5935 /* 0x2 disable e1hov, 0x1 enable */
5936 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5937 (IS_E1HMF(bp) ? 0x1 : 0x2));
5938
1c06328c
EG
5939 /* support pause requests from USDM, TSDM and BRB */
5940 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5941
5942 {
5943 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5944 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5945 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5946 }
34f80b04
EG
5947 }
5948
a2fbb9ea
ET
5949 /* Port MCP comes here */
5950 /* Port DMAE comes here */
5951
35b19ba5 5952 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5953 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5954 {
5955 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5956
5957 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5958 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5959
5960 /* The GPIO should be swapped if the swap register is
5961 set and active */
5962 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5963 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5964
5965 /* Select function upon port-swap configuration */
5966 if (port == 0) {
5967 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5968 aeu_gpio_mask = (swap_val && swap_override) ?
5969 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5970 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5971 } else {
5972 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5973 aeu_gpio_mask = (swap_val && swap_override) ?
5974 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5975 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5976 }
5977 val = REG_RD(bp, offset);
5978 /* add GPIO3 to group */
5979 val |= aeu_gpio_mask;
5980 REG_WR(bp, offset, val);
5981 }
5982 break;
5983
35b19ba5 5984 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5985 /* add SPIO 5 to group 0 */
5986 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5987 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5988 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5989 break;
5990
5991 default:
5992 break;
5993 }
5994
c18487ee 5995 bnx2x__link_reset(bp);
a2fbb9ea 5996
34f80b04
EG
5997 return 0;
5998}
5999
6000#define ILT_PER_FUNC (768/2)
6001#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6002/* the phys address is shifted right 12 bits and has an added
6003 1=valid bit added to the 53rd bit
6004 then since this is a wide register(TM)
6005 we split it into two 32 bit writes
6006 */
6007#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6008#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6009#define PXP_ONE_ILT(x) (((x) << 10) | x)
6010#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6011
6012#define CNIC_ILT_LINES 0
6013
6014static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6015{
6016 int reg;
6017
6018 if (CHIP_IS_E1H(bp))
6019 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6020 else /* E1 */
6021 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6022
6023 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6024}
6025
6026static int bnx2x_init_func(struct bnx2x *bp)
6027{
6028 int port = BP_PORT(bp);
6029 int func = BP_FUNC(bp);
8badd27a 6030 u32 addr, val;
34f80b04
EG
6031 int i;
6032
6033 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6034
8badd27a
EG
6035 /* set MSI reconfigure capability */
6036 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6037 val = REG_RD(bp, addr);
6038 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6039 REG_WR(bp, addr, val);
6040
34f80b04
EG
6041 i = FUNC_ILT_BASE(func);
6042
6043 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6044 if (CHIP_IS_E1H(bp)) {
6045 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6046 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6047 } else /* E1 */
6048 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6049 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6050
6051
6052 if (CHIP_IS_E1H(bp)) {
6053 for (i = 0; i < 9; i++)
6054 bnx2x_init_block(bp,
6055 cm_start[func][i], cm_end[func][i]);
6056
6057 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6058 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6059 }
6060
6061 /* HC init per function */
6062 if (CHIP_IS_E1H(bp)) {
6063 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6064
6065 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6066 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6067 }
6068 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6069
c14423fe 6070 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6071 REG_WR(bp, 0x2114, 0xffffffff);
6072 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6073
34f80b04
EG
6074 return 0;
6075}
6076
6077static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6078{
6079 int i, rc = 0;
a2fbb9ea 6080
34f80b04
EG
6081 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6082 BP_FUNC(bp), load_code);
a2fbb9ea 6083
34f80b04
EG
6084 bp->dmae_ready = 0;
6085 mutex_init(&bp->dmae_mutex);
6086 bnx2x_gunzip_init(bp);
a2fbb9ea 6087
34f80b04
EG
6088 switch (load_code) {
6089 case FW_MSG_CODE_DRV_LOAD_COMMON:
6090 rc = bnx2x_init_common(bp);
6091 if (rc)
6092 goto init_hw_err;
6093 /* no break */
6094
6095 case FW_MSG_CODE_DRV_LOAD_PORT:
6096 bp->dmae_ready = 1;
6097 rc = bnx2x_init_port(bp);
6098 if (rc)
6099 goto init_hw_err;
6100 /* no break */
6101
6102 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6103 bp->dmae_ready = 1;
6104 rc = bnx2x_init_func(bp);
6105 if (rc)
6106 goto init_hw_err;
6107 break;
6108
6109 default:
6110 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6111 break;
6112 }
6113
6114 if (!BP_NOMCP(bp)) {
6115 int func = BP_FUNC(bp);
a2fbb9ea
ET
6116
6117 bp->fw_drv_pulse_wr_seq =
34f80b04 6118 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6119 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6120 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6121 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6122 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6123 } else
6124 bp->func_stx = 0;
a2fbb9ea 6125
34f80b04
EG
6126 /* this needs to be done before gunzip end */
6127 bnx2x_zero_def_sb(bp);
6128 for_each_queue(bp, i)
6129 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6130
6131init_hw_err:
6132 bnx2x_gunzip_end(bp);
6133
6134 return rc;
a2fbb9ea
ET
6135}
6136
c14423fe 6137/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6138static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6139{
34f80b04 6140 int func = BP_FUNC(bp);
f1410647
ET
6141 u32 seq = ++bp->fw_seq;
6142 u32 rc = 0;
19680c48
EG
6143 u32 cnt = 1;
6144 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6145
34f80b04 6146 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6147 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6148
19680c48
EG
6149 do {
6150 /* let the FW do it's magic ... */
6151 msleep(delay);
a2fbb9ea 6152
19680c48 6153 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6154
19680c48
EG
6155 /* Give the FW up to 2 second (200*10ms) */
6156 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6157
6158 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6159 cnt*delay, rc, seq);
a2fbb9ea
ET
6160
6161 /* is this a reply to our command? */
6162 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6163 rc &= FW_MSG_CODE_MASK;
f1410647 6164
a2fbb9ea
ET
6165 } else {
6166 /* FW BUG! */
6167 BNX2X_ERR("FW failed to respond!\n");
6168 bnx2x_fw_dump(bp);
6169 rc = 0;
6170 }
f1410647 6171
a2fbb9ea
ET
6172 return rc;
6173}
6174
6175static void bnx2x_free_mem(struct bnx2x *bp)
6176{
6177
6178#define BNX2X_PCI_FREE(x, y, size) \
6179 do { \
6180 if (x) { \
6181 pci_free_consistent(bp->pdev, size, x, y); \
6182 x = NULL; \
6183 y = 0; \
6184 } \
6185 } while (0)
6186
6187#define BNX2X_FREE(x) \
6188 do { \
6189 if (x) { \
6190 vfree(x); \
6191 x = NULL; \
6192 } \
6193 } while (0)
6194
6195 int i;
6196
6197 /* fastpath */
555f6c78 6198 /* Common */
a2fbb9ea
ET
6199 for_each_queue(bp, i) {
6200
555f6c78 6201 /* status blocks */
a2fbb9ea
ET
6202 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6203 bnx2x_fp(bp, i, status_blk_mapping),
6204 sizeof(struct host_status_block) +
6205 sizeof(struct eth_tx_db_data));
555f6c78
EG
6206 }
6207 /* Rx */
6208 for_each_rx_queue(bp, i) {
a2fbb9ea 6209
555f6c78 6210 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6211 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6212 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6213 bnx2x_fp(bp, i, rx_desc_mapping),
6214 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6215
6216 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6217 bnx2x_fp(bp, i, rx_comp_mapping),
6218 sizeof(struct eth_fast_path_rx_cqe) *
6219 NUM_RCQ_BD);
a2fbb9ea 6220
7a9b2557 6221 /* SGE ring */
32626230 6222 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6223 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6224 bnx2x_fp(bp, i, rx_sge_mapping),
6225 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6226 }
555f6c78
EG
6227 /* Tx */
6228 for_each_tx_queue(bp, i) {
6229
6230 /* fastpath tx rings: tx_buf tx_desc */
6231 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6232 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6233 bnx2x_fp(bp, i, tx_desc_mapping),
6234 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6235 }
a2fbb9ea
ET
6236 /* end of fastpath */
6237
6238 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6239 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6240
6241 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6242 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6243
6244#ifdef BCM_ISCSI
6245 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6246 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6247 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6248 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6249#endif
7a9b2557 6250 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6251
6252#undef BNX2X_PCI_FREE
6253#undef BNX2X_KFREE
6254}
6255
6256static int bnx2x_alloc_mem(struct bnx2x *bp)
6257{
6258
6259#define BNX2X_PCI_ALLOC(x, y, size) \
6260 do { \
6261 x = pci_alloc_consistent(bp->pdev, size, y); \
6262 if (x == NULL) \
6263 goto alloc_mem_err; \
6264 memset(x, 0, size); \
6265 } while (0)
6266
6267#define BNX2X_ALLOC(x, size) \
6268 do { \
6269 x = vmalloc(size); \
6270 if (x == NULL) \
6271 goto alloc_mem_err; \
6272 memset(x, 0, size); \
6273 } while (0)
6274
6275 int i;
6276
6277 /* fastpath */
555f6c78 6278 /* Common */
a2fbb9ea
ET
6279 for_each_queue(bp, i) {
6280 bnx2x_fp(bp, i, bp) = bp;
6281
555f6c78 6282 /* status blocks */
a2fbb9ea
ET
6283 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6284 &bnx2x_fp(bp, i, status_blk_mapping),
6285 sizeof(struct host_status_block) +
6286 sizeof(struct eth_tx_db_data));
555f6c78
EG
6287 }
6288 /* Rx */
6289 for_each_rx_queue(bp, i) {
a2fbb9ea 6290
555f6c78 6291 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6292 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6293 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6294 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6295 &bnx2x_fp(bp, i, rx_desc_mapping),
6296 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6297
6298 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6299 &bnx2x_fp(bp, i, rx_comp_mapping),
6300 sizeof(struct eth_fast_path_rx_cqe) *
6301 NUM_RCQ_BD);
6302
7a9b2557
VZ
6303 /* SGE ring */
6304 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6305 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6306 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6307 &bnx2x_fp(bp, i, rx_sge_mapping),
6308 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6309 }
555f6c78
EG
6310 /* Tx */
6311 for_each_tx_queue(bp, i) {
6312
6313 bnx2x_fp(bp, i, hw_tx_prods) =
6314 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6315
6316 bnx2x_fp(bp, i, tx_prods_mapping) =
6317 bnx2x_fp(bp, i, status_blk_mapping) +
6318 sizeof(struct host_status_block);
6319
6320 /* fastpath tx rings: tx_buf tx_desc */
6321 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6322 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6323 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6324 &bnx2x_fp(bp, i, tx_desc_mapping),
6325 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6326 }
a2fbb9ea
ET
6327 /* end of fastpath */
6328
6329 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6330 sizeof(struct host_def_status_block));
6331
6332 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6333 sizeof(struct bnx2x_slowpath));
6334
6335#ifdef BCM_ISCSI
6336 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6337
6338 /* Initialize T1 */
6339 for (i = 0; i < 64*1024; i += 64) {
6340 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6341 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6342 }
6343
6344 /* allocate searcher T2 table
6345 we allocate 1/4 of alloc num for T2
6346 (which is not entered into the ILT) */
6347 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6348
6349 /* Initialize T2 */
6350 for (i = 0; i < 16*1024; i += 64)
6351 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6352
c14423fe 6353 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6354 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6355
6356 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6357 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6358
6359 /* QM queues (128*MAX_CONN) */
6360 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6361#endif
6362
6363 /* Slow path ring */
6364 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6365
6366 return 0;
6367
6368alloc_mem_err:
6369 bnx2x_free_mem(bp);
6370 return -ENOMEM;
6371
6372#undef BNX2X_PCI_ALLOC
6373#undef BNX2X_ALLOC
6374}
6375
6376static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6377{
6378 int i;
6379
555f6c78 6380 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6381 struct bnx2x_fastpath *fp = &bp->fp[i];
6382
6383 u16 bd_cons = fp->tx_bd_cons;
6384 u16 sw_prod = fp->tx_pkt_prod;
6385 u16 sw_cons = fp->tx_pkt_cons;
6386
a2fbb9ea
ET
6387 while (sw_cons != sw_prod) {
6388 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6389 sw_cons++;
6390 }
6391 }
6392}
6393
6394static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6395{
6396 int i, j;
6397
555f6c78 6398 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6399 struct bnx2x_fastpath *fp = &bp->fp[j];
6400
a2fbb9ea
ET
6401 for (i = 0; i < NUM_RX_BD; i++) {
6402 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6403 struct sk_buff *skb = rx_buf->skb;
6404
6405 if (skb == NULL)
6406 continue;
6407
6408 pci_unmap_single(bp->pdev,
6409 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6410 bp->rx_buf_size,
a2fbb9ea
ET
6411 PCI_DMA_FROMDEVICE);
6412
6413 rx_buf->skb = NULL;
6414 dev_kfree_skb(skb);
6415 }
7a9b2557 6416 if (!fp->disable_tpa)
32626230
EG
6417 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6418 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6419 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6420 }
6421}
6422
6423static void bnx2x_free_skbs(struct bnx2x *bp)
6424{
6425 bnx2x_free_tx_skbs(bp);
6426 bnx2x_free_rx_skbs(bp);
6427}
6428
6429static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6430{
34f80b04 6431 int i, offset = 1;
a2fbb9ea
ET
6432
6433 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6434 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6435 bp->msix_table[0].vector);
6436
6437 for_each_queue(bp, i) {
c14423fe 6438 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6439 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6440 bnx2x_fp(bp, i, state));
6441
34f80b04 6442 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6443 }
a2fbb9ea
ET
6444}
6445
6446static void bnx2x_free_irq(struct bnx2x *bp)
6447{
a2fbb9ea 6448 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6449 bnx2x_free_msix_irqs(bp);
6450 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6451 bp->flags &= ~USING_MSIX_FLAG;
6452
8badd27a
EG
6453 } else if (bp->flags & USING_MSI_FLAG) {
6454 free_irq(bp->pdev->irq, bp->dev);
6455 pci_disable_msi(bp->pdev);
6456 bp->flags &= ~USING_MSI_FLAG;
6457
a2fbb9ea
ET
6458 } else
6459 free_irq(bp->pdev->irq, bp->dev);
6460}
6461
6462static int bnx2x_enable_msix(struct bnx2x *bp)
6463{
8badd27a
EG
6464 int i, rc, offset = 1;
6465 int igu_vec = 0;
a2fbb9ea 6466
8badd27a
EG
6467 bp->msix_table[0].entry = igu_vec;
6468 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6469
34f80b04 6470 for_each_queue(bp, i) {
8badd27a 6471 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6472 bp->msix_table[i + offset].entry = igu_vec;
6473 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6474 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6475 }
6476
34f80b04 6477 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6478 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6479 if (rc) {
8badd27a
EG
6480 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6481 return rc;
34f80b04 6482 }
8badd27a 6483
a2fbb9ea
ET
6484 bp->flags |= USING_MSIX_FLAG;
6485
6486 return 0;
a2fbb9ea
ET
6487}
6488
a2fbb9ea
ET
6489static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6490{
34f80b04 6491 int i, rc, offset = 1;
a2fbb9ea 6492
a2fbb9ea
ET
6493 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6494 bp->dev->name, bp->dev);
a2fbb9ea
ET
6495 if (rc) {
6496 BNX2X_ERR("request sp irq failed\n");
6497 return -EBUSY;
6498 }
6499
6500 for_each_queue(bp, i) {
555f6c78
EG
6501 struct bnx2x_fastpath *fp = &bp->fp[i];
6502
6503 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6504 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6505 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6506 if (rc) {
555f6c78 6507 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6508 bnx2x_free_msix_irqs(bp);
6509 return -EBUSY;
6510 }
6511
555f6c78 6512 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6513 }
6514
555f6c78
EG
6515 i = BNX2X_NUM_QUEUES(bp);
6516 if (is_multi(bp))
6517 printk(KERN_INFO PFX
6518 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6519 bp->dev->name, bp->msix_table[0].vector,
6520 bp->msix_table[offset].vector,
6521 bp->msix_table[offset + i - 1].vector);
6522 else
6523 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6524 bp->dev->name, bp->msix_table[0].vector,
6525 bp->msix_table[offset + i - 1].vector);
6526
a2fbb9ea 6527 return 0;
a2fbb9ea
ET
6528}
6529
8badd27a
EG
6530static int bnx2x_enable_msi(struct bnx2x *bp)
6531{
6532 int rc;
6533
6534 rc = pci_enable_msi(bp->pdev);
6535 if (rc) {
6536 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6537 return -1;
6538 }
6539 bp->flags |= USING_MSI_FLAG;
6540
6541 return 0;
6542}
6543
a2fbb9ea
ET
6544static int bnx2x_req_irq(struct bnx2x *bp)
6545{
8badd27a 6546 unsigned long flags;
34f80b04 6547 int rc;
a2fbb9ea 6548
8badd27a
EG
6549 if (bp->flags & USING_MSI_FLAG)
6550 flags = 0;
6551 else
6552 flags = IRQF_SHARED;
6553
6554 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6555 bp->dev->name, bp->dev);
a2fbb9ea
ET
6556 if (!rc)
6557 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6558
6559 return rc;
a2fbb9ea
ET
6560}
6561
65abd74d
YG
6562static void bnx2x_napi_enable(struct bnx2x *bp)
6563{
6564 int i;
6565
555f6c78 6566 for_each_rx_queue(bp, i)
65abd74d
YG
6567 napi_enable(&bnx2x_fp(bp, i, napi));
6568}
6569
6570static void bnx2x_napi_disable(struct bnx2x *bp)
6571{
6572 int i;
6573
555f6c78 6574 for_each_rx_queue(bp, i)
65abd74d
YG
6575 napi_disable(&bnx2x_fp(bp, i, napi));
6576}
6577
6578static void bnx2x_netif_start(struct bnx2x *bp)
6579{
6580 if (atomic_dec_and_test(&bp->intr_sem)) {
6581 if (netif_running(bp->dev)) {
65abd74d
YG
6582 bnx2x_napi_enable(bp);
6583 bnx2x_int_enable(bp);
555f6c78
EG
6584 if (bp->state == BNX2X_STATE_OPEN)
6585 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6586 }
6587 }
6588}
6589
f8ef6e44 6590static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6591{
f8ef6e44 6592 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6593 bnx2x_napi_disable(bp);
65abd74d 6594 if (netif_running(bp->dev)) {
65abd74d
YG
6595 netif_tx_disable(bp->dev);
6596 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6597 }
6598}
6599
a2fbb9ea
ET
6600/*
6601 * Init service functions
6602 */
6603
3101c2bc 6604static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6605{
6606 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6607 int port = BP_PORT(bp);
a2fbb9ea
ET
6608
6609 /* CAM allocation
6610 * unicasts 0-31:port0 32-63:port1
6611 * multicast 64-127:port0 128-191:port1
6612 */
8d9c5f34 6613 config->hdr.length = 2;
af246401 6614 config->hdr.offset = port ? 32 : 0;
0626b899 6615 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6616 config->hdr.reserved1 = 0;
6617
6618 /* primary MAC */
6619 config->config_table[0].cam_entry.msb_mac_addr =
6620 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6621 config->config_table[0].cam_entry.middle_mac_addr =
6622 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6623 config->config_table[0].cam_entry.lsb_mac_addr =
6624 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6625 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6626 if (set)
6627 config->config_table[0].target_table_entry.flags = 0;
6628 else
6629 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6630 config->config_table[0].target_table_entry.client_id = 0;
6631 config->config_table[0].target_table_entry.vlan_id = 0;
6632
3101c2bc
YG
6633 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6634 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6635 config->config_table[0].cam_entry.msb_mac_addr,
6636 config->config_table[0].cam_entry.middle_mac_addr,
6637 config->config_table[0].cam_entry.lsb_mac_addr);
6638
6639 /* broadcast */
4781bfad
EG
6640 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6641 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6642 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6643 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6644 if (set)
6645 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6646 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6647 else
6648 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6649 config->config_table[1].target_table_entry.client_id = 0;
6650 config->config_table[1].target_table_entry.vlan_id = 0;
6651
6652 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6653 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6654 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6655}
6656
3101c2bc 6657static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6658{
6659 struct mac_configuration_cmd_e1h *config =
6660 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6661
3101c2bc 6662 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6663 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6664 return;
6665 }
6666
6667 /* CAM allocation for E1H
6668 * unicasts: by func number
6669 * multicast: 20+FUNC*20, 20 each
6670 */
8d9c5f34 6671 config->hdr.length = 1;
34f80b04 6672 config->hdr.offset = BP_FUNC(bp);
0626b899 6673 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6674 config->hdr.reserved1 = 0;
6675
6676 /* primary MAC */
6677 config->config_table[0].msb_mac_addr =
6678 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6679 config->config_table[0].middle_mac_addr =
6680 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6681 config->config_table[0].lsb_mac_addr =
6682 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6683 config->config_table[0].client_id = BP_L_ID(bp);
6684 config->config_table[0].vlan_id = 0;
6685 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6686 if (set)
6687 config->config_table[0].flags = BP_PORT(bp);
6688 else
6689 config->config_table[0].flags =
6690 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6691
3101c2bc
YG
6692 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6693 (set ? "setting" : "clearing"),
34f80b04
EG
6694 config->config_table[0].msb_mac_addr,
6695 config->config_table[0].middle_mac_addr,
6696 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6697
6698 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6699 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6700 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6701}
6702
a2fbb9ea
ET
6703static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6704 int *state_p, int poll)
6705{
6706 /* can take a while if any port is running */
8b3a0f0b 6707 int cnt = 5000;
a2fbb9ea 6708
c14423fe
ET
6709 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6710 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6711
6712 might_sleep();
34f80b04 6713 while (cnt--) {
a2fbb9ea
ET
6714 if (poll) {
6715 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6716 /* if index is different from 0
6717 * the reply for some commands will
3101c2bc 6718 * be on the non default queue
a2fbb9ea
ET
6719 */
6720 if (idx)
6721 bnx2x_rx_int(&bp->fp[idx], 10);
6722 }
a2fbb9ea 6723
3101c2bc 6724 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6725 if (*state_p == state) {
6726#ifdef BNX2X_STOP_ON_ERROR
6727 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6728#endif
a2fbb9ea 6729 return 0;
8b3a0f0b 6730 }
a2fbb9ea 6731
a2fbb9ea 6732 msleep(1);
a2fbb9ea
ET
6733 }
6734
a2fbb9ea 6735 /* timeout! */
49d66772
ET
6736 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6737 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6738#ifdef BNX2X_STOP_ON_ERROR
6739 bnx2x_panic();
6740#endif
a2fbb9ea 6741
49d66772 6742 return -EBUSY;
a2fbb9ea
ET
6743}
6744
6745static int bnx2x_setup_leading(struct bnx2x *bp)
6746{
34f80b04 6747 int rc;
a2fbb9ea 6748
c14423fe 6749 /* reset IGU state */
34f80b04 6750 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6751
6752 /* SETUP ramrod */
6753 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6754
34f80b04
EG
6755 /* Wait for completion */
6756 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6757
34f80b04 6758 return rc;
a2fbb9ea
ET
6759}
6760
6761static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6762{
555f6c78
EG
6763 struct bnx2x_fastpath *fp = &bp->fp[index];
6764
a2fbb9ea 6765 /* reset IGU state */
555f6c78 6766 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6767
228241eb 6768 /* SETUP ramrod */
555f6c78
EG
6769 fp->state = BNX2X_FP_STATE_OPENING;
6770 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6771 fp->cl_id, 0);
a2fbb9ea
ET
6772
6773 /* Wait for completion */
6774 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6775 &(fp->state), 0);
a2fbb9ea
ET
6776}
6777
a2fbb9ea 6778static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6779
8badd27a 6780static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6781{
555f6c78 6782 int num_queues;
a2fbb9ea 6783
8badd27a
EG
6784 switch (int_mode) {
6785 case INT_MODE_INTx:
6786 case INT_MODE_MSI:
555f6c78
EG
6787 num_queues = 1;
6788 bp->num_rx_queues = num_queues;
6789 bp->num_tx_queues = num_queues;
6790 DP(NETIF_MSG_IFUP,
6791 "set number of queues to %d\n", num_queues);
8badd27a
EG
6792 break;
6793
6794 case INT_MODE_MSIX:
6795 default:
555f6c78
EG
6796 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6797 num_queues = min_t(u32, num_online_cpus(),
6798 BNX2X_MAX_QUEUES(bp));
34f80b04 6799 else
555f6c78
EG
6800 num_queues = 1;
6801 bp->num_rx_queues = num_queues;
6802 bp->num_tx_queues = num_queues;
6803 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6804 " number of tx queues to %d\n",
6805 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6806 /* if we can't use MSI-X we only need one fp,
6807 * so try to enable MSI-X with the requested number of fp's
6808 * and fallback to MSI or legacy INTx with one fp
6809 */
8badd27a 6810 if (bnx2x_enable_msix(bp)) {
34f80b04 6811 /* failed to enable MSI-X */
555f6c78
EG
6812 num_queues = 1;
6813 bp->num_rx_queues = num_queues;
6814 bp->num_tx_queues = num_queues;
6815 if (bp->multi_mode)
6816 BNX2X_ERR("Multi requested but failed to "
6817 "enable MSI-X set number of "
6818 "queues to %d\n", num_queues);
a2fbb9ea 6819 }
8badd27a 6820 break;
a2fbb9ea 6821 }
555f6c78 6822 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6823}
6824
6825static void bnx2x_set_rx_mode(struct net_device *dev);
6826
6827/* must be called with rtnl_lock */
6828static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6829{
6830 u32 load_code;
6831 int i, rc = 0;
6832#ifdef BNX2X_STOP_ON_ERROR
6833 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6834 if (unlikely(bp->panic))
6835 return -EPERM;
6836#endif
6837
6838 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6839
6840 bnx2x_set_int_mode(bp);
c14423fe 6841
a2fbb9ea
ET
6842 if (bnx2x_alloc_mem(bp))
6843 return -ENOMEM;
6844
555f6c78 6845 for_each_rx_queue(bp, i)
7a9b2557
VZ
6846 bnx2x_fp(bp, i, disable_tpa) =
6847 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6848
555f6c78 6849 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6850 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6851 bnx2x_poll, 128);
6852
6853#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6854 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6855 struct bnx2x_fastpath *fp = &bp->fp[i];
6856
6857 fp->poll_no_work = 0;
6858 fp->poll_calls = 0;
6859 fp->poll_max_calls = 0;
6860 fp->poll_complete = 0;
6861 fp->poll_exit = 0;
6862 }
6863#endif
6864 bnx2x_napi_enable(bp);
6865
34f80b04
EG
6866 if (bp->flags & USING_MSIX_FLAG) {
6867 rc = bnx2x_req_msix_irqs(bp);
6868 if (rc) {
6869 pci_disable_msix(bp->pdev);
2dfe0e1f 6870 goto load_error1;
34f80b04
EG
6871 }
6872 } else {
8badd27a
EG
6873 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6874 bnx2x_enable_msi(bp);
34f80b04
EG
6875 bnx2x_ack_int(bp);
6876 rc = bnx2x_req_irq(bp);
6877 if (rc) {
2dfe0e1f 6878 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6879 if (bp->flags & USING_MSI_FLAG)
6880 pci_disable_msi(bp->pdev);
2dfe0e1f 6881 goto load_error1;
a2fbb9ea 6882 }
8badd27a
EG
6883 if (bp->flags & USING_MSI_FLAG) {
6884 bp->dev->irq = bp->pdev->irq;
6885 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6886 bp->dev->name, bp->pdev->irq);
6887 }
a2fbb9ea
ET
6888 }
6889
2dfe0e1f
EG
6890 /* Send LOAD_REQUEST command to MCP
6891 Returns the type of LOAD command:
6892 if it is the first port to be initialized
6893 common blocks should be initialized, otherwise - not
6894 */
6895 if (!BP_NOMCP(bp)) {
6896 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6897 if (!load_code) {
6898 BNX2X_ERR("MCP response failure, aborting\n");
6899 rc = -EBUSY;
6900 goto load_error2;
6901 }
6902 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6903 rc = -EBUSY; /* other port in diagnostic mode */
6904 goto load_error2;
6905 }
6906
6907 } else {
6908 int port = BP_PORT(bp);
6909
6910 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6911 load_count[0], load_count[1], load_count[2]);
6912 load_count[0]++;
6913 load_count[1 + port]++;
6914 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6915 load_count[0], load_count[1], load_count[2]);
6916 if (load_count[0] == 1)
6917 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6918 else if (load_count[1 + port] == 1)
6919 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6920 else
6921 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6922 }
6923
6924 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6925 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6926 bp->port.pmf = 1;
6927 else
6928 bp->port.pmf = 0;
6929 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6930
a2fbb9ea 6931 /* Initialize HW */
34f80b04
EG
6932 rc = bnx2x_init_hw(bp, load_code);
6933 if (rc) {
a2fbb9ea 6934 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6935 goto load_error2;
a2fbb9ea
ET
6936 }
6937
a2fbb9ea 6938 /* Setup NIC internals and enable interrupts */
471de716 6939 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6940
6941 /* Send LOAD_DONE command to MCP */
34f80b04 6942 if (!BP_NOMCP(bp)) {
228241eb
ET
6943 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6944 if (!load_code) {
da5a662a 6945 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6946 rc = -EBUSY;
2dfe0e1f 6947 goto load_error3;
a2fbb9ea
ET
6948 }
6949 }
6950
6951 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6952
34f80b04
EG
6953 rc = bnx2x_setup_leading(bp);
6954 if (rc) {
da5a662a 6955 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6956 goto load_error3;
34f80b04 6957 }
a2fbb9ea 6958
34f80b04
EG
6959 if (CHIP_IS_E1H(bp))
6960 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6961 BNX2X_ERR("!!! mf_cfg function disabled\n");
6962 bp->state = BNX2X_STATE_DISABLED;
6963 }
a2fbb9ea 6964
34f80b04
EG
6965 if (bp->state == BNX2X_STATE_OPEN)
6966 for_each_nondefault_queue(bp, i) {
6967 rc = bnx2x_setup_multi(bp, i);
6968 if (rc)
2dfe0e1f 6969 goto load_error3;
34f80b04 6970 }
a2fbb9ea 6971
34f80b04 6972 if (CHIP_IS_E1(bp))
3101c2bc 6973 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6974 else
3101c2bc 6975 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6976
6977 if (bp->port.pmf)
b5bf9068 6978 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6979
6980 /* Start fast path */
34f80b04
EG
6981 switch (load_mode) {
6982 case LOAD_NORMAL:
6983 /* Tx queue should be only reenabled */
555f6c78 6984 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6985 /* Initialize the receive filter. */
34f80b04
EG
6986 bnx2x_set_rx_mode(bp->dev);
6987 break;
6988
6989 case LOAD_OPEN:
555f6c78 6990 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6991 /* Initialize the receive filter. */
34f80b04 6992 bnx2x_set_rx_mode(bp->dev);
34f80b04 6993 break;
a2fbb9ea 6994
34f80b04 6995 case LOAD_DIAG:
2dfe0e1f 6996 /* Initialize the receive filter. */
a2fbb9ea 6997 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6998 bp->state = BNX2X_STATE_DIAG;
6999 break;
7000
7001 default:
7002 break;
a2fbb9ea
ET
7003 }
7004
34f80b04
EG
7005 if (!bp->port.pmf)
7006 bnx2x__link_status_update(bp);
7007
a2fbb9ea
ET
7008 /* start the timer */
7009 mod_timer(&bp->timer, jiffies + bp->current_interval);
7010
34f80b04 7011
a2fbb9ea
ET
7012 return 0;
7013
2dfe0e1f
EG
7014load_error3:
7015 bnx2x_int_disable_sync(bp, 1);
7016 if (!BP_NOMCP(bp)) {
7017 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7018 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7019 }
7020 bp->port.pmf = 0;
7a9b2557
VZ
7021 /* Free SKBs, SGEs, TPA pool and driver internals */
7022 bnx2x_free_skbs(bp);
555f6c78 7023 for_each_rx_queue(bp, i)
3196a88a 7024 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7025load_error2:
d1014634
YG
7026 /* Release IRQs */
7027 bnx2x_free_irq(bp);
2dfe0e1f
EG
7028load_error1:
7029 bnx2x_napi_disable(bp);
555f6c78 7030 for_each_rx_queue(bp, i)
7cde1c8b 7031 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7032 bnx2x_free_mem(bp);
7033
7034 /* TBD we really need to reset the chip
7035 if we want to recover from this */
34f80b04 7036 return rc;
a2fbb9ea
ET
7037}
7038
7039static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7040{
555f6c78 7041 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7042 int rc;
7043
c14423fe 7044 /* halt the connection */
555f6c78
EG
7045 fp->state = BNX2X_FP_STATE_HALTING;
7046 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7047
34f80b04 7048 /* Wait for completion */
a2fbb9ea 7049 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7050 &(fp->state), 1);
c14423fe 7051 if (rc) /* timeout */
a2fbb9ea
ET
7052 return rc;
7053
7054 /* delete cfc entry */
7055 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7056
34f80b04
EG
7057 /* Wait for completion */
7058 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7059 &(fp->state), 1);
34f80b04 7060 return rc;
a2fbb9ea
ET
7061}
7062
da5a662a 7063static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7064{
4781bfad 7065 __le16 dsb_sp_prod_idx;
c14423fe 7066 /* if the other port is handling traffic,
a2fbb9ea 7067 this can take a lot of time */
34f80b04
EG
7068 int cnt = 500;
7069 int rc;
a2fbb9ea
ET
7070
7071 might_sleep();
7072
7073 /* Send HALT ramrod */
7074 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7075 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7076
34f80b04
EG
7077 /* Wait for completion */
7078 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7079 &(bp->fp[0].state), 1);
7080 if (rc) /* timeout */
da5a662a 7081 return rc;
a2fbb9ea 7082
49d66772 7083 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7084
228241eb 7085 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7086 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7087
49d66772 7088 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7089 we are going to reset the chip anyway
7090 so there is not much to do if this times out
7091 */
34f80b04 7092 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7093 if (!cnt) {
7094 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7095 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7096 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7097#ifdef BNX2X_STOP_ON_ERROR
7098 bnx2x_panic();
7099#endif
36e552ab 7100 rc = -EBUSY;
34f80b04
EG
7101 break;
7102 }
7103 cnt--;
da5a662a 7104 msleep(1);
5650d9d4 7105 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7106 }
7107 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7108 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7109
7110 return rc;
a2fbb9ea
ET
7111}
7112
34f80b04
EG
7113static void bnx2x_reset_func(struct bnx2x *bp)
7114{
7115 int port = BP_PORT(bp);
7116 int func = BP_FUNC(bp);
7117 int base, i;
7118
7119 /* Configure IGU */
7120 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7121 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7122
34f80b04
EG
7123 /* Clear ILT */
7124 base = FUNC_ILT_BASE(func);
7125 for (i = base; i < base + ILT_PER_FUNC; i++)
7126 bnx2x_ilt_wr(bp, i, 0);
7127}
7128
7129static void bnx2x_reset_port(struct bnx2x *bp)
7130{
7131 int port = BP_PORT(bp);
7132 u32 val;
7133
7134 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7135
7136 /* Do not rcv packets to BRB */
7137 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7138 /* Do not direct rcv packets that are not for MCP to the BRB */
7139 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7140 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7141
7142 /* Configure AEU */
7143 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7144
7145 msleep(100);
7146 /* Check for BRB port occupancy */
7147 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7148 if (val)
7149 DP(NETIF_MSG_IFDOWN,
33471629 7150 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7151
7152 /* TODO: Close Doorbell port? */
7153}
7154
34f80b04
EG
7155static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7156{
7157 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7158 BP_FUNC(bp), reset_code);
7159
7160 switch (reset_code) {
7161 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7162 bnx2x_reset_port(bp);
7163 bnx2x_reset_func(bp);
7164 bnx2x_reset_common(bp);
7165 break;
7166
7167 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7168 bnx2x_reset_port(bp);
7169 bnx2x_reset_func(bp);
7170 break;
7171
7172 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7173 bnx2x_reset_func(bp);
7174 break;
49d66772 7175
34f80b04
EG
7176 default:
7177 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7178 break;
7179 }
7180}
7181
33471629 7182/* must be called with rtnl_lock */
34f80b04 7183static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7184{
da5a662a 7185 int port = BP_PORT(bp);
a2fbb9ea 7186 u32 reset_code = 0;
da5a662a 7187 int i, cnt, rc;
a2fbb9ea
ET
7188
7189 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7190
228241eb
ET
7191 bp->rx_mode = BNX2X_RX_MODE_NONE;
7192 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7193
f8ef6e44 7194 bnx2x_netif_stop(bp, 1);
e94d8af3 7195
34f80b04
EG
7196 del_timer_sync(&bp->timer);
7197 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7198 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7199 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7200
70b9986c
EG
7201 /* Release IRQs */
7202 bnx2x_free_irq(bp);
7203
555f6c78
EG
7204 /* Wait until tx fastpath tasks complete */
7205 for_each_tx_queue(bp, i) {
228241eb
ET
7206 struct bnx2x_fastpath *fp = &bp->fp[i];
7207
34f80b04 7208 cnt = 1000;
3e5b510e 7209 smp_mb();
e8b5fc51 7210 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7211
65abd74d 7212 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7213 if (!cnt) {
7214 BNX2X_ERR("timeout waiting for queue[%d]\n",
7215 i);
7216#ifdef BNX2X_STOP_ON_ERROR
7217 bnx2x_panic();
7218 return -EBUSY;
7219#else
7220 break;
7221#endif
7222 }
7223 cnt--;
da5a662a 7224 msleep(1);
3e5b510e 7225 smp_mb();
34f80b04 7226 }
228241eb 7227 }
da5a662a
VZ
7228 /* Give HW time to discard old tx messages */
7229 msleep(1);
a2fbb9ea 7230
3101c2bc
YG
7231 if (CHIP_IS_E1(bp)) {
7232 struct mac_configuration_cmd *config =
7233 bnx2x_sp(bp, mcast_config);
7234
7235 bnx2x_set_mac_addr_e1(bp, 0);
7236
8d9c5f34 7237 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7238 CAM_INVALIDATE(config->config_table[i]);
7239
8d9c5f34 7240 config->hdr.length = i;
3101c2bc
YG
7241 if (CHIP_REV_IS_SLOW(bp))
7242 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7243 else
7244 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7245 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7246 config->hdr.reserved1 = 0;
7247
7248 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7249 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7250 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7251
7252 } else { /* E1H */
65abd74d
YG
7253 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7254
3101c2bc
YG
7255 bnx2x_set_mac_addr_e1h(bp, 0);
7256
7257 for (i = 0; i < MC_HASH_SIZE; i++)
7258 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7259 }
7260
65abd74d
YG
7261 if (unload_mode == UNLOAD_NORMAL)
7262 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7263
7264 else if (bp->flags & NO_WOL_FLAG) {
7265 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7266 if (CHIP_IS_E1H(bp))
7267 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7268
7269 } else if (bp->wol) {
7270 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7271 u8 *mac_addr = bp->dev->dev_addr;
7272 u32 val;
7273 /* The mac address is written to entries 1-4 to
7274 preserve entry 0 which is used by the PMF */
7275 u8 entry = (BP_E1HVN(bp) + 1)*8;
7276
7277 val = (mac_addr[0] << 8) | mac_addr[1];
7278 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7279
7280 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7281 (mac_addr[4] << 8) | mac_addr[5];
7282 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7283
7284 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7285
7286 } else
7287 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7288
34f80b04
EG
7289 /* Close multi and leading connections
7290 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7291 for_each_nondefault_queue(bp, i)
7292 if (bnx2x_stop_multi(bp, i))
228241eb 7293 goto unload_error;
a2fbb9ea 7294
da5a662a
VZ
7295 rc = bnx2x_stop_leading(bp);
7296 if (rc) {
34f80b04 7297 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7298#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7299 return -EBUSY;
da5a662a
VZ
7300#else
7301 goto unload_error;
34f80b04 7302#endif
228241eb
ET
7303 }
7304
7305unload_error:
34f80b04 7306 if (!BP_NOMCP(bp))
228241eb 7307 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7308 else {
7309 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7310 load_count[0], load_count[1], load_count[2]);
7311 load_count[0]--;
da5a662a 7312 load_count[1 + port]--;
34f80b04
EG
7313 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7314 load_count[0], load_count[1], load_count[2]);
7315 if (load_count[0] == 0)
7316 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7317 else if (load_count[1 + port] == 0)
34f80b04
EG
7318 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7319 else
7320 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7321 }
a2fbb9ea 7322
34f80b04
EG
7323 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7324 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7325 bnx2x__link_reset(bp);
a2fbb9ea
ET
7326
7327 /* Reset the chip */
228241eb 7328 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7329
7330 /* Report UNLOAD_DONE to MCP */
34f80b04 7331 if (!BP_NOMCP(bp))
a2fbb9ea 7332 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7333 bp->port.pmf = 0;
a2fbb9ea 7334
7a9b2557 7335 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7336 bnx2x_free_skbs(bp);
555f6c78 7337 for_each_rx_queue(bp, i)
3196a88a 7338 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7339 for_each_rx_queue(bp, i)
7cde1c8b 7340 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7341 bnx2x_free_mem(bp);
7342
7343 bp->state = BNX2X_STATE_CLOSED;
228241eb 7344
a2fbb9ea
ET
7345 netif_carrier_off(bp->dev);
7346
7347 return 0;
7348}
7349
34f80b04
EG
7350static void bnx2x_reset_task(struct work_struct *work)
7351{
7352 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7353
7354#ifdef BNX2X_STOP_ON_ERROR
7355 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7356 " so reset not done to allow debug dump,\n"
7357 KERN_ERR " you will need to reboot when done\n");
7358 return;
7359#endif
7360
7361 rtnl_lock();
7362
7363 if (!netif_running(bp->dev))
7364 goto reset_task_exit;
7365
7366 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7367 bnx2x_nic_load(bp, LOAD_NORMAL);
7368
7369reset_task_exit:
7370 rtnl_unlock();
7371}
7372
a2fbb9ea
ET
7373/* end of nic load/unload */
7374
7375/* ethtool_ops */
7376
7377/*
7378 * Init service functions
7379 */
7380
f1ef27ef
EG
7381static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7382{
7383 switch (func) {
7384 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7385 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7386 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7387 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7388 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7389 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7390 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7391 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7392 default:
7393 BNX2X_ERR("Unsupported function index: %d\n", func);
7394 return (u32)(-1);
7395 }
7396}
7397
7398static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7399{
7400 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7401
7402 /* Flush all outstanding writes */
7403 mmiowb();
7404
7405 /* Pretend to be function 0 */
7406 REG_WR(bp, reg, 0);
7407 /* Flush the GRC transaction (in the chip) */
7408 new_val = REG_RD(bp, reg);
7409 if (new_val != 0) {
7410 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7411 new_val);
7412 BUG();
7413 }
7414
7415 /* From now we are in the "like-E1" mode */
7416 bnx2x_int_disable(bp);
7417
7418 /* Flush all outstanding writes */
7419 mmiowb();
7420
7421 /* Restore the original funtion settings */
7422 REG_WR(bp, reg, orig_func);
7423 new_val = REG_RD(bp, reg);
7424 if (new_val != orig_func) {
7425 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7426 orig_func, new_val);
7427 BUG();
7428 }
7429}
7430
7431static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7432{
7433 if (CHIP_IS_E1H(bp))
7434 bnx2x_undi_int_disable_e1h(bp, func);
7435 else
7436 bnx2x_int_disable(bp);
7437}
7438
34f80b04
EG
7439static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7440{
7441 u32 val;
7442
7443 /* Check if there is any driver already loaded */
7444 val = REG_RD(bp, MISC_REG_UNPREPARED);
7445 if (val == 0x1) {
7446 /* Check if it is the UNDI driver
7447 * UNDI driver initializes CID offset for normal bell to 0x7
7448 */
4a37fb66 7449 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7450 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7451 if (val == 0x7) {
7452 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7453 /* save our func */
34f80b04 7454 int func = BP_FUNC(bp);
da5a662a
VZ
7455 u32 swap_en;
7456 u32 swap_val;
34f80b04 7457
b4661739
EG
7458 /* clear the UNDI indication */
7459 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7460
34f80b04
EG
7461 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7462
7463 /* try unload UNDI on port 0 */
7464 bp->func = 0;
da5a662a
VZ
7465 bp->fw_seq =
7466 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7467 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7468 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7469
7470 /* if UNDI is loaded on the other port */
7471 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7472
da5a662a
VZ
7473 /* send "DONE" for previous unload */
7474 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7475
7476 /* unload UNDI on port 1 */
34f80b04 7477 bp->func = 1;
da5a662a
VZ
7478 bp->fw_seq =
7479 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7480 DRV_MSG_SEQ_NUMBER_MASK);
7481 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7482
7483 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7484 }
7485
b4661739
EG
7486 /* now it's safe to release the lock */
7487 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7488
f1ef27ef 7489 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7490
7491 /* close input traffic and wait for it */
7492 /* Do not rcv packets to BRB */
7493 REG_WR(bp,
7494 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7495 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7496 /* Do not direct rcv packets that are not for MCP to
7497 * the BRB */
7498 REG_WR(bp,
7499 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7500 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7501 /* clear AEU */
7502 REG_WR(bp,
7503 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7504 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7505 msleep(10);
7506
7507 /* save NIG port swap info */
7508 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7509 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7510 /* reset device */
7511 REG_WR(bp,
7512 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7513 0xd3ffffff);
34f80b04
EG
7514 REG_WR(bp,
7515 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7516 0x1403);
da5a662a
VZ
7517 /* take the NIG out of reset and restore swap values */
7518 REG_WR(bp,
7519 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7520 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7521 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7522 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7523
7524 /* send unload done to the MCP */
7525 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7526
7527 /* restore our func and fw_seq */
7528 bp->func = func;
7529 bp->fw_seq =
7530 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7531 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7532
7533 } else
7534 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7535 }
7536}
7537
7538static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7539{
7540 u32 val, val2, val3, val4, id;
72ce58c3 7541 u16 pmc;
34f80b04
EG
7542
7543 /* Get the chip revision id and number. */
7544 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7545 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7546 id = ((val & 0xffff) << 16);
7547 val = REG_RD(bp, MISC_REG_CHIP_REV);
7548 id |= ((val & 0xf) << 12);
7549 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7550 id |= ((val & 0xff) << 4);
5a40e08e 7551 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7552 id |= (val & 0xf);
7553 bp->common.chip_id = id;
7554 bp->link_params.chip_id = bp->common.chip_id;
7555 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7556
1c06328c
EG
7557 val = (REG_RD(bp, 0x2874) & 0x55);
7558 if ((bp->common.chip_id & 0x1) ||
7559 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7560 bp->flags |= ONE_PORT_FLAG;
7561 BNX2X_DEV_INFO("single port device\n");
7562 }
7563
34f80b04
EG
7564 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7565 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7566 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7567 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7568 bp->common.flash_size, bp->common.flash_size);
7569
7570 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7571 bp->link_params.shmem_base = bp->common.shmem_base;
7572 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7573
7574 if (!bp->common.shmem_base ||
7575 (bp->common.shmem_base < 0xA0000) ||
7576 (bp->common.shmem_base >= 0xC0000)) {
7577 BNX2X_DEV_INFO("MCP not active\n");
7578 bp->flags |= NO_MCP_FLAG;
7579 return;
7580 }
7581
7582 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7583 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7584 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7585 BNX2X_ERR("BAD MCP validity signature\n");
7586
7587 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7588 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7589
7590 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7591 SHARED_HW_CFG_LED_MODE_MASK) >>
7592 SHARED_HW_CFG_LED_MODE_SHIFT);
7593
c2c8b03e
EG
7594 bp->link_params.feature_config_flags = 0;
7595 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7596 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7597 bp->link_params.feature_config_flags |=
7598 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7599 else
7600 bp->link_params.feature_config_flags &=
7601 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7602
34f80b04
EG
7603 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7604 bp->common.bc_ver = val;
7605 BNX2X_DEV_INFO("bc_ver %X\n", val);
7606 if (val < BNX2X_BC_VER) {
7607 /* for now only warn
7608 * later we might need to enforce this */
7609 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7610 " please upgrade BC\n", BNX2X_BC_VER, val);
7611 }
72ce58c3
EG
7612
7613 if (BP_E1HVN(bp) == 0) {
7614 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7615 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7616 } else {
7617 /* no WOL capability for E1HVN != 0 */
7618 bp->flags |= NO_WOL_FLAG;
7619 }
7620 BNX2X_DEV_INFO("%sWoL capable\n",
7621 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7622
7623 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7624 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7625 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7626 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7627
7628 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7629 val, val2, val3, val4);
7630}
7631
7632static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7633 u32 switch_cfg)
a2fbb9ea 7634{
34f80b04 7635 int port = BP_PORT(bp);
a2fbb9ea
ET
7636 u32 ext_phy_type;
7637
a2fbb9ea
ET
7638 switch (switch_cfg) {
7639 case SWITCH_CFG_1G:
7640 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7641
c18487ee
YR
7642 ext_phy_type =
7643 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7644 switch (ext_phy_type) {
7645 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7646 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7647 ext_phy_type);
7648
34f80b04
EG
7649 bp->port.supported |= (SUPPORTED_10baseT_Half |
7650 SUPPORTED_10baseT_Full |
7651 SUPPORTED_100baseT_Half |
7652 SUPPORTED_100baseT_Full |
7653 SUPPORTED_1000baseT_Full |
7654 SUPPORTED_2500baseX_Full |
7655 SUPPORTED_TP |
7656 SUPPORTED_FIBRE |
7657 SUPPORTED_Autoneg |
7658 SUPPORTED_Pause |
7659 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7660 break;
7661
7662 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7663 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7664 ext_phy_type);
7665
34f80b04
EG
7666 bp->port.supported |= (SUPPORTED_10baseT_Half |
7667 SUPPORTED_10baseT_Full |
7668 SUPPORTED_100baseT_Half |
7669 SUPPORTED_100baseT_Full |
7670 SUPPORTED_1000baseT_Full |
7671 SUPPORTED_TP |
7672 SUPPORTED_FIBRE |
7673 SUPPORTED_Autoneg |
7674 SUPPORTED_Pause |
7675 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7676 break;
7677
7678 default:
7679 BNX2X_ERR("NVRAM config error. "
7680 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7681 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7682 return;
7683 }
7684
34f80b04
EG
7685 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7686 port*0x10);
7687 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7688 break;
7689
7690 case SWITCH_CFG_10G:
7691 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7692
c18487ee
YR
7693 ext_phy_type =
7694 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7695 switch (ext_phy_type) {
7696 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7697 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7698 ext_phy_type);
7699
34f80b04
EG
7700 bp->port.supported |= (SUPPORTED_10baseT_Half |
7701 SUPPORTED_10baseT_Full |
7702 SUPPORTED_100baseT_Half |
7703 SUPPORTED_100baseT_Full |
7704 SUPPORTED_1000baseT_Full |
7705 SUPPORTED_2500baseX_Full |
7706 SUPPORTED_10000baseT_Full |
7707 SUPPORTED_TP |
7708 SUPPORTED_FIBRE |
7709 SUPPORTED_Autoneg |
7710 SUPPORTED_Pause |
7711 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7712 break;
7713
589abe3a
EG
7714 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7715 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7716 ext_phy_type);
f1410647 7717
34f80b04 7718 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7719 SUPPORTED_1000baseT_Full |
34f80b04 7720 SUPPORTED_FIBRE |
589abe3a 7721 SUPPORTED_Autoneg |
34f80b04
EG
7722 SUPPORTED_Pause |
7723 SUPPORTED_Asym_Pause);
f1410647
ET
7724 break;
7725
589abe3a
EG
7726 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7727 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7728 ext_phy_type);
7729
34f80b04 7730 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7731 SUPPORTED_2500baseX_Full |
34f80b04 7732 SUPPORTED_1000baseT_Full |
589abe3a
EG
7733 SUPPORTED_FIBRE |
7734 SUPPORTED_Autoneg |
7735 SUPPORTED_Pause |
7736 SUPPORTED_Asym_Pause);
7737 break;
7738
7739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7740 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7741 ext_phy_type);
7742
7743 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7744 SUPPORTED_FIBRE |
7745 SUPPORTED_Pause |
7746 SUPPORTED_Asym_Pause);
f1410647
ET
7747 break;
7748
589abe3a
EG
7749 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7750 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7751 ext_phy_type);
7752
34f80b04
EG
7753 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7754 SUPPORTED_1000baseT_Full |
7755 SUPPORTED_FIBRE |
34f80b04
EG
7756 SUPPORTED_Pause |
7757 SUPPORTED_Asym_Pause);
f1410647
ET
7758 break;
7759
589abe3a
EG
7760 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7761 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7762 ext_phy_type);
7763
34f80b04 7764 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7765 SUPPORTED_1000baseT_Full |
34f80b04 7766 SUPPORTED_Autoneg |
589abe3a 7767 SUPPORTED_FIBRE |
34f80b04
EG
7768 SUPPORTED_Pause |
7769 SUPPORTED_Asym_Pause);
c18487ee
YR
7770 break;
7771
f1410647
ET
7772 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7773 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7774 ext_phy_type);
7775
34f80b04
EG
7776 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7777 SUPPORTED_TP |
7778 SUPPORTED_Autoneg |
7779 SUPPORTED_Pause |
7780 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7781 break;
7782
28577185
EG
7783 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7784 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7785 ext_phy_type);
7786
7787 bp->port.supported |= (SUPPORTED_10baseT_Half |
7788 SUPPORTED_10baseT_Full |
7789 SUPPORTED_100baseT_Half |
7790 SUPPORTED_100baseT_Full |
7791 SUPPORTED_1000baseT_Full |
7792 SUPPORTED_10000baseT_Full |
7793 SUPPORTED_TP |
7794 SUPPORTED_Autoneg |
7795 SUPPORTED_Pause |
7796 SUPPORTED_Asym_Pause);
7797 break;
7798
c18487ee
YR
7799 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7800 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7801 bp->link_params.ext_phy_config);
7802 break;
7803
a2fbb9ea
ET
7804 default:
7805 BNX2X_ERR("NVRAM config error. "
7806 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7807 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7808 return;
7809 }
7810
34f80b04
EG
7811 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7812 port*0x18);
7813 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7814
a2fbb9ea
ET
7815 break;
7816
7817 default:
7818 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7819 bp->port.link_config);
a2fbb9ea
ET
7820 return;
7821 }
34f80b04 7822 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7823
7824 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7825 if (!(bp->link_params.speed_cap_mask &
7826 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7827 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7828
c18487ee
YR
7829 if (!(bp->link_params.speed_cap_mask &
7830 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7831 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7832
c18487ee
YR
7833 if (!(bp->link_params.speed_cap_mask &
7834 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7835 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7836
c18487ee
YR
7837 if (!(bp->link_params.speed_cap_mask &
7838 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7839 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7840
c18487ee
YR
7841 if (!(bp->link_params.speed_cap_mask &
7842 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7843 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7844 SUPPORTED_1000baseT_Full);
a2fbb9ea 7845
c18487ee
YR
7846 if (!(bp->link_params.speed_cap_mask &
7847 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7848 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7849
c18487ee
YR
7850 if (!(bp->link_params.speed_cap_mask &
7851 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7852 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7853
34f80b04 7854 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7855}
7856
34f80b04 7857static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7858{
c18487ee 7859 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7860
34f80b04 7861 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7862 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7863 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7864 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7865 bp->port.advertising = bp->port.supported;
a2fbb9ea 7866 } else {
c18487ee
YR
7867 u32 ext_phy_type =
7868 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7869
7870 if ((ext_phy_type ==
7871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7872 (ext_phy_type ==
7873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7874 /* force 10G, no AN */
c18487ee 7875 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7876 bp->port.advertising =
a2fbb9ea
ET
7877 (ADVERTISED_10000baseT_Full |
7878 ADVERTISED_FIBRE);
7879 break;
7880 }
7881 BNX2X_ERR("NVRAM config error. "
7882 "Invalid link_config 0x%x"
7883 " Autoneg not supported\n",
34f80b04 7884 bp->port.link_config);
a2fbb9ea
ET
7885 return;
7886 }
7887 break;
7888
7889 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7890 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7891 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7892 bp->port.advertising = (ADVERTISED_10baseT_Full |
7893 ADVERTISED_TP);
a2fbb9ea
ET
7894 } else {
7895 BNX2X_ERR("NVRAM config error. "
7896 "Invalid link_config 0x%x"
7897 " speed_cap_mask 0x%x\n",
34f80b04 7898 bp->port.link_config,
c18487ee 7899 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7900 return;
7901 }
7902 break;
7903
7904 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7905 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7906 bp->link_params.req_line_speed = SPEED_10;
7907 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7908 bp->port.advertising = (ADVERTISED_10baseT_Half |
7909 ADVERTISED_TP);
a2fbb9ea
ET
7910 } else {
7911 BNX2X_ERR("NVRAM config error. "
7912 "Invalid link_config 0x%x"
7913 " speed_cap_mask 0x%x\n",
34f80b04 7914 bp->port.link_config,
c18487ee 7915 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7916 return;
7917 }
7918 break;
7919
7920 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7921 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7922 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7923 bp->port.advertising = (ADVERTISED_100baseT_Full |
7924 ADVERTISED_TP);
a2fbb9ea
ET
7925 } else {
7926 BNX2X_ERR("NVRAM config error. "
7927 "Invalid link_config 0x%x"
7928 " speed_cap_mask 0x%x\n",
34f80b04 7929 bp->port.link_config,
c18487ee 7930 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7931 return;
7932 }
7933 break;
7934
7935 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7936 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7937 bp->link_params.req_line_speed = SPEED_100;
7938 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7939 bp->port.advertising = (ADVERTISED_100baseT_Half |
7940 ADVERTISED_TP);
a2fbb9ea
ET
7941 } else {
7942 BNX2X_ERR("NVRAM config error. "
7943 "Invalid link_config 0x%x"
7944 " speed_cap_mask 0x%x\n",
34f80b04 7945 bp->port.link_config,
c18487ee 7946 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7947 return;
7948 }
7949 break;
7950
7951 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7952 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7953 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7954 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7955 ADVERTISED_TP);
a2fbb9ea
ET
7956 } else {
7957 BNX2X_ERR("NVRAM config error. "
7958 "Invalid link_config 0x%x"
7959 " speed_cap_mask 0x%x\n",
34f80b04 7960 bp->port.link_config,
c18487ee 7961 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7962 return;
7963 }
7964 break;
7965
7966 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7967 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7968 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7969 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7970 ADVERTISED_TP);
a2fbb9ea
ET
7971 } else {
7972 BNX2X_ERR("NVRAM config error. "
7973 "Invalid link_config 0x%x"
7974 " speed_cap_mask 0x%x\n",
34f80b04 7975 bp->port.link_config,
c18487ee 7976 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7977 return;
7978 }
7979 break;
7980
7981 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7982 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7983 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7984 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7985 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7986 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7987 ADVERTISED_FIBRE);
a2fbb9ea
ET
7988 } else {
7989 BNX2X_ERR("NVRAM config error. "
7990 "Invalid link_config 0x%x"
7991 " speed_cap_mask 0x%x\n",
34f80b04 7992 bp->port.link_config,
c18487ee 7993 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7994 return;
7995 }
7996 break;
7997
7998 default:
7999 BNX2X_ERR("NVRAM config error. "
8000 "BAD link speed link_config 0x%x\n",
34f80b04 8001 bp->port.link_config);
c18487ee 8002 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8003 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8004 break;
8005 }
a2fbb9ea 8006
34f80b04
EG
8007 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8008 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8009 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8010 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8011 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8012
c18487ee 8013 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8014 " advertising 0x%x\n",
c18487ee
YR
8015 bp->link_params.req_line_speed,
8016 bp->link_params.req_duplex,
34f80b04 8017 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8018}
8019
34f80b04 8020static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8021{
34f80b04
EG
8022 int port = BP_PORT(bp);
8023 u32 val, val2;
589abe3a 8024 u32 config;
c2c8b03e 8025 u16 i;
a2fbb9ea 8026
c18487ee 8027 bp->link_params.bp = bp;
34f80b04 8028 bp->link_params.port = port;
c18487ee 8029
c18487ee 8030 bp->link_params.lane_config =
a2fbb9ea 8031 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8032 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8033 SHMEM_RD(bp,
8034 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8035 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8036 SHMEM_RD(bp,
8037 dev_info.port_hw_config[port].speed_capability_mask);
8038
34f80b04 8039 bp->port.link_config =
a2fbb9ea
ET
8040 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8041
c2c8b03e
EG
8042 /* Get the 4 lanes xgxs config rx and tx */
8043 for (i = 0; i < 2; i++) {
8044 val = SHMEM_RD(bp,
8045 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8046 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8047 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8048
8049 val = SHMEM_RD(bp,
8050 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8051 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8052 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8053 }
8054
589abe3a
EG
8055 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8056 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8057 bp->link_params.feature_config_flags |=
8058 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8059 else
8060 bp->link_params.feature_config_flags &=
8061 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8062
3ce2c3f9
EG
8063 /* If the device is capable of WoL, set the default state according
8064 * to the HW
8065 */
8066 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8067 (config & PORT_FEATURE_WOL_ENABLED));
8068
c2c8b03e
EG
8069 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8070 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8071 bp->link_params.lane_config,
8072 bp->link_params.ext_phy_config,
34f80b04 8073 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8074
34f80b04 8075 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8076 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8077 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8078
8079 bnx2x_link_settings_requested(bp);
8080
8081 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8082 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8083 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8084 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8085 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8086 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8087 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8088 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8089 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8090 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8091}
8092
8093static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8094{
8095 int func = BP_FUNC(bp);
8096 u32 val, val2;
8097 int rc = 0;
a2fbb9ea 8098
34f80b04 8099 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8100
34f80b04
EG
8101 bp->e1hov = 0;
8102 bp->e1hmf = 0;
8103 if (CHIP_IS_E1H(bp)) {
8104 bp->mf_config =
8105 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8106
3196a88a
EG
8107 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8108 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8109 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8110
34f80b04
EG
8111 bp->e1hov = val;
8112 bp->e1hmf = 1;
8113 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8114 "(0x%04x)\n",
8115 func, bp->e1hov, bp->e1hov);
8116 } else {
8117 BNX2X_DEV_INFO("Single function mode\n");
8118 if (BP_E1HVN(bp)) {
8119 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8120 " aborting\n", func);
8121 rc = -EPERM;
8122 }
8123 }
8124 }
a2fbb9ea 8125
34f80b04
EG
8126 if (!BP_NOMCP(bp)) {
8127 bnx2x_get_port_hwinfo(bp);
8128
8129 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8130 DRV_MSG_SEQ_NUMBER_MASK);
8131 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8132 }
8133
8134 if (IS_E1HMF(bp)) {
8135 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8136 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8137 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8138 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8139 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8140 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8141 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8142 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8143 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8144 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8145 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8146 ETH_ALEN);
8147 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8148 ETH_ALEN);
a2fbb9ea 8149 }
34f80b04
EG
8150
8151 return rc;
a2fbb9ea
ET
8152 }
8153
34f80b04
EG
8154 if (BP_NOMCP(bp)) {
8155 /* only supposed to happen on emulation/FPGA */
33471629 8156 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8157 random_ether_addr(bp->dev->dev_addr);
8158 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8159 }
a2fbb9ea 8160
34f80b04
EG
8161 return rc;
8162}
8163
8164static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8165{
8166 int func = BP_FUNC(bp);
87942b46 8167 int timer_interval;
34f80b04
EG
8168 int rc;
8169
da5a662a
VZ
8170 /* Disable interrupt handling until HW is initialized */
8171 atomic_set(&bp->intr_sem, 1);
8172
34f80b04 8173 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8174
1cf167f2 8175 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8176 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8177
8178 rc = bnx2x_get_hwinfo(bp);
8179
8180 /* need to reset chip if undi was active */
8181 if (!BP_NOMCP(bp))
8182 bnx2x_undi_unload(bp);
8183
8184 if (CHIP_REV_IS_FPGA(bp))
8185 printk(KERN_ERR PFX "FPGA detected\n");
8186
8187 if (BP_NOMCP(bp) && (func == 0))
8188 printk(KERN_ERR PFX
8189 "MCP disabled, must load devices in order!\n");
8190
555f6c78 8191 /* Set multi queue mode */
8badd27a
EG
8192 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8193 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8194 printk(KERN_ERR PFX
8badd27a 8195 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8196 multi_mode = ETH_RSS_MODE_DISABLED;
8197 }
8198 bp->multi_mode = multi_mode;
8199
8200
7a9b2557
VZ
8201 /* Set TPA flags */
8202 if (disable_tpa) {
8203 bp->flags &= ~TPA_ENABLE_FLAG;
8204 bp->dev->features &= ~NETIF_F_LRO;
8205 } else {
8206 bp->flags |= TPA_ENABLE_FLAG;
8207 bp->dev->features |= NETIF_F_LRO;
8208 }
8209
8d5726c4 8210 bp->mrrs = mrrs;
7a9b2557 8211
34f80b04
EG
8212 bp->tx_ring_size = MAX_TX_AVAIL;
8213 bp->rx_ring_size = MAX_RX_AVAIL;
8214
8215 bp->rx_csum = 1;
34f80b04
EG
8216
8217 bp->tx_ticks = 50;
8218 bp->rx_ticks = 25;
8219
87942b46
EG
8220 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8221 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8222
8223 init_timer(&bp->timer);
8224 bp->timer.expires = jiffies + bp->current_interval;
8225 bp->timer.data = (unsigned long) bp;
8226 bp->timer.function = bnx2x_timer;
8227
8228 return rc;
a2fbb9ea
ET
8229}
8230
8231/*
8232 * ethtool service functions
8233 */
8234
8235/* All ethtool functions called with rtnl_lock */
8236
8237static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8238{
8239 struct bnx2x *bp = netdev_priv(dev);
8240
34f80b04
EG
8241 cmd->supported = bp->port.supported;
8242 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8243
8244 if (netif_carrier_ok(dev)) {
c18487ee
YR
8245 cmd->speed = bp->link_vars.line_speed;
8246 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8247 } else {
c18487ee
YR
8248 cmd->speed = bp->link_params.req_line_speed;
8249 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8250 }
34f80b04
EG
8251 if (IS_E1HMF(bp)) {
8252 u16 vn_max_rate;
8253
8254 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8255 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8256 if (vn_max_rate < cmd->speed)
8257 cmd->speed = vn_max_rate;
8258 }
a2fbb9ea 8259
c18487ee
YR
8260 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8261 u32 ext_phy_type =
8262 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8263
8264 switch (ext_phy_type) {
8265 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8266 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8267 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8268 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8269 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8271 cmd->port = PORT_FIBRE;
8272 break;
8273
8274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8276 cmd->port = PORT_TP;
8277 break;
8278
c18487ee
YR
8279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8280 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8281 bp->link_params.ext_phy_config);
8282 break;
8283
f1410647
ET
8284 default:
8285 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8286 bp->link_params.ext_phy_config);
8287 break;
f1410647
ET
8288 }
8289 } else
a2fbb9ea 8290 cmd->port = PORT_TP;
a2fbb9ea 8291
34f80b04 8292 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8293 cmd->transceiver = XCVR_INTERNAL;
8294
c18487ee 8295 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8296 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8297 else
a2fbb9ea 8298 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8299
8300 cmd->maxtxpkt = 0;
8301 cmd->maxrxpkt = 0;
8302
8303 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8304 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8305 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8306 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8307 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8308 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8309 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8310
8311 return 0;
8312}
8313
8314static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8315{
8316 struct bnx2x *bp = netdev_priv(dev);
8317 u32 advertising;
8318
34f80b04
EG
8319 if (IS_E1HMF(bp))
8320 return 0;
8321
a2fbb9ea
ET
8322 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8323 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8324 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8325 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8326 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8327 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8328 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8329
a2fbb9ea 8330 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8331 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8332 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8333 return -EINVAL;
f1410647 8334 }
a2fbb9ea
ET
8335
8336 /* advertise the requested speed and duplex if supported */
34f80b04 8337 cmd->advertising &= bp->port.supported;
a2fbb9ea 8338
c18487ee
YR
8339 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8340 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8341 bp->port.advertising |= (ADVERTISED_Autoneg |
8342 cmd->advertising);
a2fbb9ea
ET
8343
8344 } else { /* forced speed */
8345 /* advertise the requested speed and duplex if supported */
8346 switch (cmd->speed) {
8347 case SPEED_10:
8348 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8349 if (!(bp->port.supported &
f1410647
ET
8350 SUPPORTED_10baseT_Full)) {
8351 DP(NETIF_MSG_LINK,
8352 "10M full not supported\n");
a2fbb9ea 8353 return -EINVAL;
f1410647 8354 }
a2fbb9ea
ET
8355
8356 advertising = (ADVERTISED_10baseT_Full |
8357 ADVERTISED_TP);
8358 } else {
34f80b04 8359 if (!(bp->port.supported &
f1410647
ET
8360 SUPPORTED_10baseT_Half)) {
8361 DP(NETIF_MSG_LINK,
8362 "10M half not supported\n");
a2fbb9ea 8363 return -EINVAL;
f1410647 8364 }
a2fbb9ea
ET
8365
8366 advertising = (ADVERTISED_10baseT_Half |
8367 ADVERTISED_TP);
8368 }
8369 break;
8370
8371 case SPEED_100:
8372 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8373 if (!(bp->port.supported &
f1410647
ET
8374 SUPPORTED_100baseT_Full)) {
8375 DP(NETIF_MSG_LINK,
8376 "100M full not supported\n");
a2fbb9ea 8377 return -EINVAL;
f1410647 8378 }
a2fbb9ea
ET
8379
8380 advertising = (ADVERTISED_100baseT_Full |
8381 ADVERTISED_TP);
8382 } else {
34f80b04 8383 if (!(bp->port.supported &
f1410647
ET
8384 SUPPORTED_100baseT_Half)) {
8385 DP(NETIF_MSG_LINK,
8386 "100M half not supported\n");
a2fbb9ea 8387 return -EINVAL;
f1410647 8388 }
a2fbb9ea
ET
8389
8390 advertising = (ADVERTISED_100baseT_Half |
8391 ADVERTISED_TP);
8392 }
8393 break;
8394
8395 case SPEED_1000:
f1410647
ET
8396 if (cmd->duplex != DUPLEX_FULL) {
8397 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8398 return -EINVAL;
f1410647 8399 }
a2fbb9ea 8400
34f80b04 8401 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8402 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8403 return -EINVAL;
f1410647 8404 }
a2fbb9ea
ET
8405
8406 advertising = (ADVERTISED_1000baseT_Full |
8407 ADVERTISED_TP);
8408 break;
8409
8410 case SPEED_2500:
f1410647
ET
8411 if (cmd->duplex != DUPLEX_FULL) {
8412 DP(NETIF_MSG_LINK,
8413 "2.5G half not supported\n");
a2fbb9ea 8414 return -EINVAL;
f1410647 8415 }
a2fbb9ea 8416
34f80b04 8417 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8418 DP(NETIF_MSG_LINK,
8419 "2.5G full not supported\n");
a2fbb9ea 8420 return -EINVAL;
f1410647 8421 }
a2fbb9ea 8422
f1410647 8423 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8424 ADVERTISED_TP);
8425 break;
8426
8427 case SPEED_10000:
f1410647
ET
8428 if (cmd->duplex != DUPLEX_FULL) {
8429 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8430 return -EINVAL;
f1410647 8431 }
a2fbb9ea 8432
34f80b04 8433 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8434 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8435 return -EINVAL;
f1410647 8436 }
a2fbb9ea
ET
8437
8438 advertising = (ADVERTISED_10000baseT_Full |
8439 ADVERTISED_FIBRE);
8440 break;
8441
8442 default:
f1410647 8443 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8444 return -EINVAL;
8445 }
8446
c18487ee
YR
8447 bp->link_params.req_line_speed = cmd->speed;
8448 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8449 bp->port.advertising = advertising;
a2fbb9ea
ET
8450 }
8451
c18487ee 8452 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8453 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8454 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8455 bp->port.advertising);
a2fbb9ea 8456
34f80b04 8457 if (netif_running(dev)) {
bb2a0f7a 8458 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8459 bnx2x_link_set(bp);
8460 }
a2fbb9ea
ET
8461
8462 return 0;
8463}
8464
c18487ee
YR
8465#define PHY_FW_VER_LEN 10
8466
a2fbb9ea
ET
8467static void bnx2x_get_drvinfo(struct net_device *dev,
8468 struct ethtool_drvinfo *info)
8469{
8470 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8471 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8472
8473 strcpy(info->driver, DRV_MODULE_NAME);
8474 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8475
8476 phy_fw_ver[0] = '\0';
34f80b04 8477 if (bp->port.pmf) {
4a37fb66 8478 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8479 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8480 (bp->state != BNX2X_STATE_CLOSED),
8481 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8482 bnx2x_release_phy_lock(bp);
34f80b04 8483 }
c18487ee 8484
f0e53a84
EG
8485 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8486 (bp->common.bc_ver & 0xff0000) >> 16,
8487 (bp->common.bc_ver & 0xff00) >> 8,
8488 (bp->common.bc_ver & 0xff),
8489 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8490 strcpy(info->bus_info, pci_name(bp->pdev));
8491 info->n_stats = BNX2X_NUM_STATS;
8492 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8493 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8494 info->regdump_len = 0;
8495}
8496
8497static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8498{
8499 struct bnx2x *bp = netdev_priv(dev);
8500
8501 if (bp->flags & NO_WOL_FLAG) {
8502 wol->supported = 0;
8503 wol->wolopts = 0;
8504 } else {
8505 wol->supported = WAKE_MAGIC;
8506 if (bp->wol)
8507 wol->wolopts = WAKE_MAGIC;
8508 else
8509 wol->wolopts = 0;
8510 }
8511 memset(&wol->sopass, 0, sizeof(wol->sopass));
8512}
8513
8514static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8515{
8516 struct bnx2x *bp = netdev_priv(dev);
8517
8518 if (wol->wolopts & ~WAKE_MAGIC)
8519 return -EINVAL;
8520
8521 if (wol->wolopts & WAKE_MAGIC) {
8522 if (bp->flags & NO_WOL_FLAG)
8523 return -EINVAL;
8524
8525 bp->wol = 1;
34f80b04 8526 } else
a2fbb9ea 8527 bp->wol = 0;
34f80b04 8528
a2fbb9ea
ET
8529 return 0;
8530}
8531
8532static u32 bnx2x_get_msglevel(struct net_device *dev)
8533{
8534 struct bnx2x *bp = netdev_priv(dev);
8535
8536 return bp->msglevel;
8537}
8538
8539static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8540{
8541 struct bnx2x *bp = netdev_priv(dev);
8542
8543 if (capable(CAP_NET_ADMIN))
8544 bp->msglevel = level;
8545}
8546
8547static int bnx2x_nway_reset(struct net_device *dev)
8548{
8549 struct bnx2x *bp = netdev_priv(dev);
8550
34f80b04
EG
8551 if (!bp->port.pmf)
8552 return 0;
a2fbb9ea 8553
34f80b04 8554 if (netif_running(dev)) {
bb2a0f7a 8555 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8556 bnx2x_link_set(bp);
8557 }
a2fbb9ea
ET
8558
8559 return 0;
8560}
8561
8562static int bnx2x_get_eeprom_len(struct net_device *dev)
8563{
8564 struct bnx2x *bp = netdev_priv(dev);
8565
34f80b04 8566 return bp->common.flash_size;
a2fbb9ea
ET
8567}
8568
8569static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8570{
34f80b04 8571 int port = BP_PORT(bp);
a2fbb9ea
ET
8572 int count, i;
8573 u32 val = 0;
8574
8575 /* adjust timeout for emulation/FPGA */
8576 count = NVRAM_TIMEOUT_COUNT;
8577 if (CHIP_REV_IS_SLOW(bp))
8578 count *= 100;
8579
8580 /* request access to nvram interface */
8581 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8582 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8583
8584 for (i = 0; i < count*10; i++) {
8585 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8586 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8587 break;
8588
8589 udelay(5);
8590 }
8591
8592 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8593 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8594 return -EBUSY;
8595 }
8596
8597 return 0;
8598}
8599
8600static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8601{
34f80b04 8602 int port = BP_PORT(bp);
a2fbb9ea
ET
8603 int count, i;
8604 u32 val = 0;
8605
8606 /* adjust timeout for emulation/FPGA */
8607 count = NVRAM_TIMEOUT_COUNT;
8608 if (CHIP_REV_IS_SLOW(bp))
8609 count *= 100;
8610
8611 /* relinquish nvram interface */
8612 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8613 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8614
8615 for (i = 0; i < count*10; i++) {
8616 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8617 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8618 break;
8619
8620 udelay(5);
8621 }
8622
8623 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8624 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8625 return -EBUSY;
8626 }
8627
8628 return 0;
8629}
8630
8631static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8632{
8633 u32 val;
8634
8635 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8636
8637 /* enable both bits, even on read */
8638 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8639 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8640 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8641}
8642
8643static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8644{
8645 u32 val;
8646
8647 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8648
8649 /* disable both bits, even after read */
8650 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8651 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8652 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8653}
8654
4781bfad 8655static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8656 u32 cmd_flags)
8657{
f1410647 8658 int count, i, rc;
a2fbb9ea
ET
8659 u32 val;
8660
8661 /* build the command word */
8662 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8663
8664 /* need to clear DONE bit separately */
8665 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8666
8667 /* address of the NVRAM to read from */
8668 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8669 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8670
8671 /* issue a read command */
8672 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8673
8674 /* adjust timeout for emulation/FPGA */
8675 count = NVRAM_TIMEOUT_COUNT;
8676 if (CHIP_REV_IS_SLOW(bp))
8677 count *= 100;
8678
8679 /* wait for completion */
8680 *ret_val = 0;
8681 rc = -EBUSY;
8682 for (i = 0; i < count; i++) {
8683 udelay(5);
8684 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8685
8686 if (val & MCPR_NVM_COMMAND_DONE) {
8687 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8688 /* we read nvram data in cpu order
8689 * but ethtool sees it as an array of bytes
8690 * converting to big-endian will do the work */
4781bfad 8691 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8692 rc = 0;
8693 break;
8694 }
8695 }
8696
8697 return rc;
8698}
8699
8700static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8701 int buf_size)
8702{
8703 int rc;
8704 u32 cmd_flags;
4781bfad 8705 __be32 val;
a2fbb9ea
ET
8706
8707 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8708 DP(BNX2X_MSG_NVM,
c14423fe 8709 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8710 offset, buf_size);
8711 return -EINVAL;
8712 }
8713
34f80b04
EG
8714 if (offset + buf_size > bp->common.flash_size) {
8715 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8716 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8717 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8718 return -EINVAL;
8719 }
8720
8721 /* request access to nvram interface */
8722 rc = bnx2x_acquire_nvram_lock(bp);
8723 if (rc)
8724 return rc;
8725
8726 /* enable access to nvram interface */
8727 bnx2x_enable_nvram_access(bp);
8728
8729 /* read the first word(s) */
8730 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8731 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8732 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8733 memcpy(ret_buf, &val, 4);
8734
8735 /* advance to the next dword */
8736 offset += sizeof(u32);
8737 ret_buf += sizeof(u32);
8738 buf_size -= sizeof(u32);
8739 cmd_flags = 0;
8740 }
8741
8742 if (rc == 0) {
8743 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8744 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8745 memcpy(ret_buf, &val, 4);
8746 }
8747
8748 /* disable access to nvram interface */
8749 bnx2x_disable_nvram_access(bp);
8750 bnx2x_release_nvram_lock(bp);
8751
8752 return rc;
8753}
8754
8755static int bnx2x_get_eeprom(struct net_device *dev,
8756 struct ethtool_eeprom *eeprom, u8 *eebuf)
8757{
8758 struct bnx2x *bp = netdev_priv(dev);
8759 int rc;
8760
2add3acb
EG
8761 if (!netif_running(dev))
8762 return -EAGAIN;
8763
34f80b04 8764 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8765 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8766 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8767 eeprom->len, eeprom->len);
8768
8769 /* parameters already validated in ethtool_get_eeprom */
8770
8771 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8772
8773 return rc;
8774}
8775
8776static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8777 u32 cmd_flags)
8778{
f1410647 8779 int count, i, rc;
a2fbb9ea
ET
8780
8781 /* build the command word */
8782 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8783
8784 /* need to clear DONE bit separately */
8785 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8786
8787 /* write the data */
8788 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8789
8790 /* address of the NVRAM to write to */
8791 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8792 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8793
8794 /* issue the write command */
8795 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8796
8797 /* adjust timeout for emulation/FPGA */
8798 count = NVRAM_TIMEOUT_COUNT;
8799 if (CHIP_REV_IS_SLOW(bp))
8800 count *= 100;
8801
8802 /* wait for completion */
8803 rc = -EBUSY;
8804 for (i = 0; i < count; i++) {
8805 udelay(5);
8806 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8807 if (val & MCPR_NVM_COMMAND_DONE) {
8808 rc = 0;
8809 break;
8810 }
8811 }
8812
8813 return rc;
8814}
8815
f1410647 8816#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8817
8818static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8819 int buf_size)
8820{
8821 int rc;
8822 u32 cmd_flags;
8823 u32 align_offset;
4781bfad 8824 __be32 val;
a2fbb9ea 8825
34f80b04
EG
8826 if (offset + buf_size > bp->common.flash_size) {
8827 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8828 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8829 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8830 return -EINVAL;
8831 }
8832
8833 /* request access to nvram interface */
8834 rc = bnx2x_acquire_nvram_lock(bp);
8835 if (rc)
8836 return rc;
8837
8838 /* enable access to nvram interface */
8839 bnx2x_enable_nvram_access(bp);
8840
8841 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8842 align_offset = (offset & ~0x03);
8843 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8844
8845 if (rc == 0) {
8846 val &= ~(0xff << BYTE_OFFSET(offset));
8847 val |= (*data_buf << BYTE_OFFSET(offset));
8848
8849 /* nvram data is returned as an array of bytes
8850 * convert it back to cpu order */
8851 val = be32_to_cpu(val);
8852
a2fbb9ea
ET
8853 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8854 cmd_flags);
8855 }
8856
8857 /* disable access to nvram interface */
8858 bnx2x_disable_nvram_access(bp);
8859 bnx2x_release_nvram_lock(bp);
8860
8861 return rc;
8862}
8863
8864static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8865 int buf_size)
8866{
8867 int rc;
8868 u32 cmd_flags;
8869 u32 val;
8870 u32 written_so_far;
8871
34f80b04 8872 if (buf_size == 1) /* ethtool */
a2fbb9ea 8873 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8874
8875 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8876 DP(BNX2X_MSG_NVM,
c14423fe 8877 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8878 offset, buf_size);
8879 return -EINVAL;
8880 }
8881
34f80b04
EG
8882 if (offset + buf_size > bp->common.flash_size) {
8883 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8884 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8885 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8886 return -EINVAL;
8887 }
8888
8889 /* request access to nvram interface */
8890 rc = bnx2x_acquire_nvram_lock(bp);
8891 if (rc)
8892 return rc;
8893
8894 /* enable access to nvram interface */
8895 bnx2x_enable_nvram_access(bp);
8896
8897 written_so_far = 0;
8898 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8899 while ((written_so_far < buf_size) && (rc == 0)) {
8900 if (written_so_far == (buf_size - sizeof(u32)))
8901 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8902 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8903 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8904 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8905 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8906
8907 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8908
8909 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8910
8911 /* advance to the next dword */
8912 offset += sizeof(u32);
8913 data_buf += sizeof(u32);
8914 written_so_far += sizeof(u32);
8915 cmd_flags = 0;
8916 }
8917
8918 /* disable access to nvram interface */
8919 bnx2x_disable_nvram_access(bp);
8920 bnx2x_release_nvram_lock(bp);
8921
8922 return rc;
8923}
8924
8925static int bnx2x_set_eeprom(struct net_device *dev,
8926 struct ethtool_eeprom *eeprom, u8 *eebuf)
8927{
8928 struct bnx2x *bp = netdev_priv(dev);
8929 int rc;
8930
9f4c9583
EG
8931 if (!netif_running(dev))
8932 return -EAGAIN;
8933
34f80b04 8934 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8935 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8936 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8937 eeprom->len, eeprom->len);
8938
8939 /* parameters already validated in ethtool_set_eeprom */
8940
c18487ee 8941 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8942 if (eeprom->magic == 0x00504859)
8943 if (bp->port.pmf) {
8944
4a37fb66 8945 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8946 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8947 bp->link_params.ext_phy_config,
8948 (bp->state != BNX2X_STATE_CLOSED),
8949 eebuf, eeprom->len);
bb2a0f7a
YG
8950 if ((bp->state == BNX2X_STATE_OPEN) ||
8951 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8952 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8953 &bp->link_vars, 1);
34f80b04
EG
8954 rc |= bnx2x_phy_init(&bp->link_params,
8955 &bp->link_vars);
bb2a0f7a 8956 }
4a37fb66 8957 bnx2x_release_phy_lock(bp);
34f80b04
EG
8958
8959 } else /* Only the PMF can access the PHY */
8960 return -EINVAL;
8961 else
c18487ee 8962 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8963
8964 return rc;
8965}
8966
8967static int bnx2x_get_coalesce(struct net_device *dev,
8968 struct ethtool_coalesce *coal)
8969{
8970 struct bnx2x *bp = netdev_priv(dev);
8971
8972 memset(coal, 0, sizeof(struct ethtool_coalesce));
8973
8974 coal->rx_coalesce_usecs = bp->rx_ticks;
8975 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8976
8977 return 0;
8978}
8979
8980static int bnx2x_set_coalesce(struct net_device *dev,
8981 struct ethtool_coalesce *coal)
8982{
8983 struct bnx2x *bp = netdev_priv(dev);
8984
8985 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8986 if (bp->rx_ticks > 3000)
8987 bp->rx_ticks = 3000;
8988
8989 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8990 if (bp->tx_ticks > 0x3000)
8991 bp->tx_ticks = 0x3000;
8992
34f80b04 8993 if (netif_running(dev))
a2fbb9ea
ET
8994 bnx2x_update_coalesce(bp);
8995
8996 return 0;
8997}
8998
8999static void bnx2x_get_ringparam(struct net_device *dev,
9000 struct ethtool_ringparam *ering)
9001{
9002 struct bnx2x *bp = netdev_priv(dev);
9003
9004 ering->rx_max_pending = MAX_RX_AVAIL;
9005 ering->rx_mini_max_pending = 0;
9006 ering->rx_jumbo_max_pending = 0;
9007
9008 ering->rx_pending = bp->rx_ring_size;
9009 ering->rx_mini_pending = 0;
9010 ering->rx_jumbo_pending = 0;
9011
9012 ering->tx_max_pending = MAX_TX_AVAIL;
9013 ering->tx_pending = bp->tx_ring_size;
9014}
9015
9016static int bnx2x_set_ringparam(struct net_device *dev,
9017 struct ethtool_ringparam *ering)
9018{
9019 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9020 int rc = 0;
a2fbb9ea
ET
9021
9022 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9023 (ering->tx_pending > MAX_TX_AVAIL) ||
9024 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9025 return -EINVAL;
9026
9027 bp->rx_ring_size = ering->rx_pending;
9028 bp->tx_ring_size = ering->tx_pending;
9029
34f80b04
EG
9030 if (netif_running(dev)) {
9031 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9032 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9033 }
9034
34f80b04 9035 return rc;
a2fbb9ea
ET
9036}
9037
9038static void bnx2x_get_pauseparam(struct net_device *dev,
9039 struct ethtool_pauseparam *epause)
9040{
9041 struct bnx2x *bp = netdev_priv(dev);
9042
c0700f90 9043 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9044 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9045
c0700f90
DM
9046 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9047 BNX2X_FLOW_CTRL_RX);
9048 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9049 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9050
9051 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9052 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9053 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9054}
9055
9056static int bnx2x_set_pauseparam(struct net_device *dev,
9057 struct ethtool_pauseparam *epause)
9058{
9059 struct bnx2x *bp = netdev_priv(dev);
9060
34f80b04
EG
9061 if (IS_E1HMF(bp))
9062 return 0;
9063
a2fbb9ea
ET
9064 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9065 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9066 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9067
c0700f90 9068 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9069
f1410647 9070 if (epause->rx_pause)
c0700f90 9071 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9072
f1410647 9073 if (epause->tx_pause)
c0700f90 9074 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9075
c0700f90
DM
9076 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9077 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9078
c18487ee 9079 if (epause->autoneg) {
34f80b04 9080 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9081 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9082 return -EINVAL;
9083 }
a2fbb9ea 9084
c18487ee 9085 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9086 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9087 }
a2fbb9ea 9088
c18487ee
YR
9089 DP(NETIF_MSG_LINK,
9090 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9091
9092 if (netif_running(dev)) {
bb2a0f7a 9093 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9094 bnx2x_link_set(bp);
9095 }
a2fbb9ea
ET
9096
9097 return 0;
9098}
9099
df0f2343
VZ
9100static int bnx2x_set_flags(struct net_device *dev, u32 data)
9101{
9102 struct bnx2x *bp = netdev_priv(dev);
9103 int changed = 0;
9104 int rc = 0;
9105
9106 /* TPA requires Rx CSUM offloading */
9107 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9108 if (!(dev->features & NETIF_F_LRO)) {
9109 dev->features |= NETIF_F_LRO;
9110 bp->flags |= TPA_ENABLE_FLAG;
9111 changed = 1;
9112 }
9113
9114 } else if (dev->features & NETIF_F_LRO) {
9115 dev->features &= ~NETIF_F_LRO;
9116 bp->flags &= ~TPA_ENABLE_FLAG;
9117 changed = 1;
9118 }
9119
9120 if (changed && netif_running(dev)) {
9121 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9122 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9123 }
9124
9125 return rc;
9126}
9127
a2fbb9ea
ET
9128static u32 bnx2x_get_rx_csum(struct net_device *dev)
9129{
9130 struct bnx2x *bp = netdev_priv(dev);
9131
9132 return bp->rx_csum;
9133}
9134
9135static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9136{
9137 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9138 int rc = 0;
a2fbb9ea
ET
9139
9140 bp->rx_csum = data;
df0f2343
VZ
9141
9142 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9143 TPA'ed packets will be discarded due to wrong TCP CSUM */
9144 if (!data) {
9145 u32 flags = ethtool_op_get_flags(dev);
9146
9147 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9148 }
9149
9150 return rc;
a2fbb9ea
ET
9151}
9152
9153static int bnx2x_set_tso(struct net_device *dev, u32 data)
9154{
755735eb 9155 if (data) {
a2fbb9ea 9156 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9157 dev->features |= NETIF_F_TSO6;
9158 } else {
a2fbb9ea 9159 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9160 dev->features &= ~NETIF_F_TSO6;
9161 }
9162
a2fbb9ea
ET
9163 return 0;
9164}
9165
f3c87cdd 9166static const struct {
a2fbb9ea
ET
9167 char string[ETH_GSTRING_LEN];
9168} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9169 { "register_test (offline)" },
9170 { "memory_test (offline)" },
9171 { "loopback_test (offline)" },
9172 { "nvram_test (online)" },
9173 { "interrupt_test (online)" },
9174 { "link_test (online)" },
d3d4f495 9175 { "idle check (online)" }
a2fbb9ea
ET
9176};
9177
9178static int bnx2x_self_test_count(struct net_device *dev)
9179{
9180 return BNX2X_NUM_TESTS;
9181}
9182
f3c87cdd
YG
9183static int bnx2x_test_registers(struct bnx2x *bp)
9184{
9185 int idx, i, rc = -ENODEV;
9186 u32 wr_val = 0;
9dabc424 9187 int port = BP_PORT(bp);
f3c87cdd
YG
9188 static const struct {
9189 u32 offset0;
9190 u32 offset1;
9191 u32 mask;
9192 } reg_tbl[] = {
9193/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9194 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9195 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9196 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9197 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9198 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9199 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9200 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9201 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9202 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9203/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9204 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9205 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9206 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9207 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9208 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9209 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9210 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9211 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9212 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9213/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9214 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9215 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9216 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9217 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9218 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9219 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9220 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9221 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9222 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9223/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9224 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9225 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9226 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9227 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9228 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9229 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9230 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9231
9232 { 0xffffffff, 0, 0x00000000 }
9233 };
9234
9235 if (!netif_running(bp->dev))
9236 return rc;
9237
9238 /* Repeat the test twice:
9239 First by writing 0x00000000, second by writing 0xffffffff */
9240 for (idx = 0; idx < 2; idx++) {
9241
9242 switch (idx) {
9243 case 0:
9244 wr_val = 0;
9245 break;
9246 case 1:
9247 wr_val = 0xffffffff;
9248 break;
9249 }
9250
9251 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9252 u32 offset, mask, save_val, val;
f3c87cdd
YG
9253
9254 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9255 mask = reg_tbl[i].mask;
9256
9257 save_val = REG_RD(bp, offset);
9258
9259 REG_WR(bp, offset, wr_val);
9260 val = REG_RD(bp, offset);
9261
9262 /* Restore the original register's value */
9263 REG_WR(bp, offset, save_val);
9264
9265 /* verify that value is as expected value */
9266 if ((val & mask) != (wr_val & mask))
9267 goto test_reg_exit;
9268 }
9269 }
9270
9271 rc = 0;
9272
9273test_reg_exit:
9274 return rc;
9275}
9276
9277static int bnx2x_test_memory(struct bnx2x *bp)
9278{
9279 int i, j, rc = -ENODEV;
9280 u32 val;
9281 static const struct {
9282 u32 offset;
9283 int size;
9284 } mem_tbl[] = {
9285 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9286 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9287 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9288 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9289 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9290 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9291 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9292
9293 { 0xffffffff, 0 }
9294 };
9295 static const struct {
9296 char *name;
9297 u32 offset;
9dabc424
YG
9298 u32 e1_mask;
9299 u32 e1h_mask;
f3c87cdd 9300 } prty_tbl[] = {
9dabc424
YG
9301 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9302 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9303 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9304 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9305 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9306 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9307
9308 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9309 };
9310
9311 if (!netif_running(bp->dev))
9312 return rc;
9313
9314 /* Go through all the memories */
9315 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9316 for (j = 0; j < mem_tbl[i].size; j++)
9317 REG_RD(bp, mem_tbl[i].offset + j*4);
9318
9319 /* Check the parity status */
9320 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9321 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9322 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9323 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9324 DP(NETIF_MSG_HW,
9325 "%s is 0x%x\n", prty_tbl[i].name, val);
9326 goto test_mem_exit;
9327 }
9328 }
9329
9330 rc = 0;
9331
9332test_mem_exit:
9333 return rc;
9334}
9335
f3c87cdd
YG
9336static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9337{
9338 int cnt = 1000;
9339
9340 if (link_up)
9341 while (bnx2x_link_test(bp) && cnt--)
9342 msleep(10);
9343}
9344
9345static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9346{
9347 unsigned int pkt_size, num_pkts, i;
9348 struct sk_buff *skb;
9349 unsigned char *packet;
9350 struct bnx2x_fastpath *fp = &bp->fp[0];
9351 u16 tx_start_idx, tx_idx;
9352 u16 rx_start_idx, rx_idx;
9353 u16 pkt_prod;
9354 struct sw_tx_bd *tx_buf;
9355 struct eth_tx_bd *tx_bd;
9356 dma_addr_t mapping;
9357 union eth_rx_cqe *cqe;
9358 u8 cqe_fp_flags;
9359 struct sw_rx_bd *rx_buf;
9360 u16 len;
9361 int rc = -ENODEV;
9362
b5bf9068
EG
9363 /* check the loopback mode */
9364 switch (loopback_mode) {
9365 case BNX2X_PHY_LOOPBACK:
9366 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9367 return -EINVAL;
9368 break;
9369 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9370 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9371 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9372 break;
9373 default:
f3c87cdd 9374 return -EINVAL;
b5bf9068 9375 }
f3c87cdd 9376
b5bf9068
EG
9377 /* prepare the loopback packet */
9378 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9379 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9380 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9381 if (!skb) {
9382 rc = -ENOMEM;
9383 goto test_loopback_exit;
9384 }
9385 packet = skb_put(skb, pkt_size);
9386 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9387 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9388 for (i = ETH_HLEN; i < pkt_size; i++)
9389 packet[i] = (unsigned char) (i & 0xff);
9390
b5bf9068 9391 /* send the loopback packet */
f3c87cdd
YG
9392 num_pkts = 0;
9393 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9394 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9395
9396 pkt_prod = fp->tx_pkt_prod++;
9397 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9398 tx_buf->first_bd = fp->tx_bd_prod;
9399 tx_buf->skb = skb;
9400
9401 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9402 mapping = pci_map_single(bp->pdev, skb->data,
9403 skb_headlen(skb), PCI_DMA_TODEVICE);
9404 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9405 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9406 tx_bd->nbd = cpu_to_le16(1);
9407 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9408 tx_bd->vlan = cpu_to_le16(pkt_prod);
9409 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9410 ETH_TX_BD_FLAGS_END_BD);
9411 tx_bd->general_data = ((UNICAST_ADDRESS <<
9412 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9413
58f4c4cf
EG
9414 wmb();
9415
4781bfad 9416 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9417 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9418 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9419 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9420
9421 mmiowb();
9422
9423 num_pkts++;
9424 fp->tx_bd_prod++;
9425 bp->dev->trans_start = jiffies;
9426
9427 udelay(100);
9428
9429 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9430 if (tx_idx != tx_start_idx + num_pkts)
9431 goto test_loopback_exit;
9432
9433 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9434 if (rx_idx != rx_start_idx + num_pkts)
9435 goto test_loopback_exit;
9436
9437 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9438 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9439 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9440 goto test_loopback_rx_exit;
9441
9442 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9443 if (len != pkt_size)
9444 goto test_loopback_rx_exit;
9445
9446 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9447 skb = rx_buf->skb;
9448 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9449 for (i = ETH_HLEN; i < pkt_size; i++)
9450 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9451 goto test_loopback_rx_exit;
9452
9453 rc = 0;
9454
9455test_loopback_rx_exit:
f3c87cdd
YG
9456
9457 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9458 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9459 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9460 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9461
9462 /* Update producers */
9463 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9464 fp->rx_sge_prod);
f3c87cdd
YG
9465
9466test_loopback_exit:
9467 bp->link_params.loopback_mode = LOOPBACK_NONE;
9468
9469 return rc;
9470}
9471
9472static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9473{
b5bf9068 9474 int rc = 0, res;
f3c87cdd
YG
9475
9476 if (!netif_running(bp->dev))
9477 return BNX2X_LOOPBACK_FAILED;
9478
f8ef6e44 9479 bnx2x_netif_stop(bp, 1);
3910c8ae 9480 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9481
b5bf9068
EG
9482 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9483 if (res) {
9484 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9485 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9486 }
9487
b5bf9068
EG
9488 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9489 if (res) {
9490 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9491 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9492 }
9493
3910c8ae 9494 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9495 bnx2x_netif_start(bp);
9496
9497 return rc;
9498}
9499
9500#define CRC32_RESIDUAL 0xdebb20e3
9501
9502static int bnx2x_test_nvram(struct bnx2x *bp)
9503{
9504 static const struct {
9505 int offset;
9506 int size;
9507 } nvram_tbl[] = {
9508 { 0, 0x14 }, /* bootstrap */
9509 { 0x14, 0xec }, /* dir */
9510 { 0x100, 0x350 }, /* manuf_info */
9511 { 0x450, 0xf0 }, /* feature_info */
9512 { 0x640, 0x64 }, /* upgrade_key_info */
9513 { 0x6a4, 0x64 },
9514 { 0x708, 0x70 }, /* manuf_key_info */
9515 { 0x778, 0x70 },
9516 { 0, 0 }
9517 };
4781bfad 9518 __be32 buf[0x350 / 4];
f3c87cdd
YG
9519 u8 *data = (u8 *)buf;
9520 int i, rc;
9521 u32 magic, csum;
9522
9523 rc = bnx2x_nvram_read(bp, 0, data, 4);
9524 if (rc) {
9525 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9526 goto test_nvram_exit;
9527 }
9528
9529 magic = be32_to_cpu(buf[0]);
9530 if (magic != 0x669955aa) {
9531 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9532 rc = -ENODEV;
9533 goto test_nvram_exit;
9534 }
9535
9536 for (i = 0; nvram_tbl[i].size; i++) {
9537
9538 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9539 nvram_tbl[i].size);
9540 if (rc) {
9541 DP(NETIF_MSG_PROBE,
9542 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9543 goto test_nvram_exit;
9544 }
9545
9546 csum = ether_crc_le(nvram_tbl[i].size, data);
9547 if (csum != CRC32_RESIDUAL) {
9548 DP(NETIF_MSG_PROBE,
9549 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9550 rc = -ENODEV;
9551 goto test_nvram_exit;
9552 }
9553 }
9554
9555test_nvram_exit:
9556 return rc;
9557}
9558
9559static int bnx2x_test_intr(struct bnx2x *bp)
9560{
9561 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9562 int i, rc;
9563
9564 if (!netif_running(bp->dev))
9565 return -ENODEV;
9566
8d9c5f34 9567 config->hdr.length = 0;
af246401
EG
9568 if (CHIP_IS_E1(bp))
9569 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9570 else
9571 config->hdr.offset = BP_FUNC(bp);
0626b899 9572 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9573 config->hdr.reserved1 = 0;
9574
9575 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9576 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9577 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9578 if (rc == 0) {
9579 bp->set_mac_pending++;
9580 for (i = 0; i < 10; i++) {
9581 if (!bp->set_mac_pending)
9582 break;
9583 msleep_interruptible(10);
9584 }
9585 if (i == 10)
9586 rc = -ENODEV;
9587 }
9588
9589 return rc;
9590}
9591
a2fbb9ea
ET
9592static void bnx2x_self_test(struct net_device *dev,
9593 struct ethtool_test *etest, u64 *buf)
9594{
9595 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9596
9597 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9598
f3c87cdd 9599 if (!netif_running(dev))
a2fbb9ea 9600 return;
a2fbb9ea 9601
33471629 9602 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9603 if (IS_E1HMF(bp))
9604 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9605
9606 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9607 u8 link_up;
9608
9609 link_up = bp->link_vars.link_up;
9610 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9611 bnx2x_nic_load(bp, LOAD_DIAG);
9612 /* wait until link state is restored */
9613 bnx2x_wait_for_link(bp, link_up);
9614
9615 if (bnx2x_test_registers(bp) != 0) {
9616 buf[0] = 1;
9617 etest->flags |= ETH_TEST_FL_FAILED;
9618 }
9619 if (bnx2x_test_memory(bp) != 0) {
9620 buf[1] = 1;
9621 etest->flags |= ETH_TEST_FL_FAILED;
9622 }
9623 buf[2] = bnx2x_test_loopback(bp, link_up);
9624 if (buf[2] != 0)
9625 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9626
f3c87cdd
YG
9627 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9628 bnx2x_nic_load(bp, LOAD_NORMAL);
9629 /* wait until link state is restored */
9630 bnx2x_wait_for_link(bp, link_up);
9631 }
9632 if (bnx2x_test_nvram(bp) != 0) {
9633 buf[3] = 1;
a2fbb9ea
ET
9634 etest->flags |= ETH_TEST_FL_FAILED;
9635 }
f3c87cdd
YG
9636 if (bnx2x_test_intr(bp) != 0) {
9637 buf[4] = 1;
9638 etest->flags |= ETH_TEST_FL_FAILED;
9639 }
9640 if (bp->port.pmf)
9641 if (bnx2x_link_test(bp) != 0) {
9642 buf[5] = 1;
9643 etest->flags |= ETH_TEST_FL_FAILED;
9644 }
f3c87cdd
YG
9645
9646#ifdef BNX2X_EXTRA_DEBUG
9647 bnx2x_panic_dump(bp);
9648#endif
a2fbb9ea
ET
9649}
9650
de832a55
EG
9651static const struct {
9652 long offset;
9653 int size;
9654 u8 string[ETH_GSTRING_LEN];
9655} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9656/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9657 { Q_STATS_OFFSET32(error_bytes_received_hi),
9658 8, "[%d]: rx_error_bytes" },
9659 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9660 8, "[%d]: rx_ucast_packets" },
9661 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9662 8, "[%d]: rx_mcast_packets" },
9663 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9664 8, "[%d]: rx_bcast_packets" },
9665 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9666 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9667 4, "[%d]: rx_phy_ip_err_discards"},
9668 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9669 4, "[%d]: rx_skb_alloc_discard" },
9670 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9671
9672/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9673 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9674 8, "[%d]: tx_packets" }
9675};
9676
bb2a0f7a
YG
9677static const struct {
9678 long offset;
9679 int size;
9680 u32 flags;
66e855f3
YG
9681#define STATS_FLAGS_PORT 1
9682#define STATS_FLAGS_FUNC 2
de832a55 9683#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9684 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9685} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9686/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9687 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9688 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9689 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9690 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9691 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9692 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9693 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9694 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9695 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9696 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9697 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9698 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9699 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9700 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9701 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9702 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9703 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9704/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9705 8, STATS_FLAGS_PORT, "rx_fragments" },
9706 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9707 8, STATS_FLAGS_PORT, "rx_jabbers" },
9708 { STATS_OFFSET32(no_buff_discard_hi),
9709 8, STATS_FLAGS_BOTH, "rx_discards" },
9710 { STATS_OFFSET32(mac_filter_discard),
9711 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9712 { STATS_OFFSET32(xxoverflow_discard),
9713 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9714 { STATS_OFFSET32(brb_drop_hi),
9715 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9716 { STATS_OFFSET32(brb_truncate_hi),
9717 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9718 { STATS_OFFSET32(pause_frames_received_hi),
9719 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9720 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9721 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9722 { STATS_OFFSET32(nig_timer_max),
9723 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9724/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9725 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9726 { STATS_OFFSET32(rx_skb_alloc_failed),
9727 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9728 { STATS_OFFSET32(hw_csum_err),
9729 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9730
9731 { STATS_OFFSET32(total_bytes_transmitted_hi),
9732 8, STATS_FLAGS_BOTH, "tx_bytes" },
9733 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9734 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9735 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9736 8, STATS_FLAGS_BOTH, "tx_packets" },
9737 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9738 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9739 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9740 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9741 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9742 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9743 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9744 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9745/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9746 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9747 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9748 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9749 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9750 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9751 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9752 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9753 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9754 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9755 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9756 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9757 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9758 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9759 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9760 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9761 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9762 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9763 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9764 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9765/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9766 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9767 { STATS_OFFSET32(pause_frames_sent_hi),
9768 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9769};
9770
de832a55
EG
9771#define IS_PORT_STAT(i) \
9772 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9773#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9774#define IS_E1HMF_MODE_STAT(bp) \
9775 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9776
a2fbb9ea
ET
9777static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9778{
bb2a0f7a 9779 struct bnx2x *bp = netdev_priv(dev);
de832a55 9780 int i, j, k;
bb2a0f7a 9781
a2fbb9ea
ET
9782 switch (stringset) {
9783 case ETH_SS_STATS:
de832a55
EG
9784 if (is_multi(bp)) {
9785 k = 0;
9786 for_each_queue(bp, i) {
9787 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9788 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9789 bnx2x_q_stats_arr[j].string, i);
9790 k += BNX2X_NUM_Q_STATS;
9791 }
9792 if (IS_E1HMF_MODE_STAT(bp))
9793 break;
9794 for (j = 0; j < BNX2X_NUM_STATS; j++)
9795 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9796 bnx2x_stats_arr[j].string);
9797 } else {
9798 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9799 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9800 continue;
9801 strcpy(buf + j*ETH_GSTRING_LEN,
9802 bnx2x_stats_arr[i].string);
9803 j++;
9804 }
bb2a0f7a 9805 }
a2fbb9ea
ET
9806 break;
9807
9808 case ETH_SS_TEST:
9809 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9810 break;
9811 }
9812}
9813
9814static int bnx2x_get_stats_count(struct net_device *dev)
9815{
bb2a0f7a 9816 struct bnx2x *bp = netdev_priv(dev);
de832a55 9817 int i, num_stats;
bb2a0f7a 9818
de832a55
EG
9819 if (is_multi(bp)) {
9820 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9821 if (!IS_E1HMF_MODE_STAT(bp))
9822 num_stats += BNX2X_NUM_STATS;
9823 } else {
9824 if (IS_E1HMF_MODE_STAT(bp)) {
9825 num_stats = 0;
9826 for (i = 0; i < BNX2X_NUM_STATS; i++)
9827 if (IS_FUNC_STAT(i))
9828 num_stats++;
9829 } else
9830 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9831 }
de832a55 9832
bb2a0f7a 9833 return num_stats;
a2fbb9ea
ET
9834}
9835
9836static void bnx2x_get_ethtool_stats(struct net_device *dev,
9837 struct ethtool_stats *stats, u64 *buf)
9838{
9839 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9840 u32 *hw_stats, *offset;
9841 int i, j, k;
bb2a0f7a 9842
de832a55
EG
9843 if (is_multi(bp)) {
9844 k = 0;
9845 for_each_queue(bp, i) {
9846 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9847 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9848 if (bnx2x_q_stats_arr[j].size == 0) {
9849 /* skip this counter */
9850 buf[k + j] = 0;
9851 continue;
9852 }
9853 offset = (hw_stats +
9854 bnx2x_q_stats_arr[j].offset);
9855 if (bnx2x_q_stats_arr[j].size == 4) {
9856 /* 4-byte counter */
9857 buf[k + j] = (u64) *offset;
9858 continue;
9859 }
9860 /* 8-byte counter */
9861 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9862 }
9863 k += BNX2X_NUM_Q_STATS;
9864 }
9865 if (IS_E1HMF_MODE_STAT(bp))
9866 return;
9867 hw_stats = (u32 *)&bp->eth_stats;
9868 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9869 if (bnx2x_stats_arr[j].size == 0) {
9870 /* skip this counter */
9871 buf[k + j] = 0;
9872 continue;
9873 }
9874 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9875 if (bnx2x_stats_arr[j].size == 4) {
9876 /* 4-byte counter */
9877 buf[k + j] = (u64) *offset;
9878 continue;
9879 }
9880 /* 8-byte counter */
9881 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9882 }
de832a55
EG
9883 } else {
9884 hw_stats = (u32 *)&bp->eth_stats;
9885 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9886 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9887 continue;
9888 if (bnx2x_stats_arr[i].size == 0) {
9889 /* skip this counter */
9890 buf[j] = 0;
9891 j++;
9892 continue;
9893 }
9894 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9895 if (bnx2x_stats_arr[i].size == 4) {
9896 /* 4-byte counter */
9897 buf[j] = (u64) *offset;
9898 j++;
9899 continue;
9900 }
9901 /* 8-byte counter */
9902 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9903 j++;
a2fbb9ea 9904 }
a2fbb9ea
ET
9905 }
9906}
9907
9908static int bnx2x_phys_id(struct net_device *dev, u32 data)
9909{
9910 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9911 int port = BP_PORT(bp);
a2fbb9ea
ET
9912 int i;
9913
34f80b04
EG
9914 if (!netif_running(dev))
9915 return 0;
9916
9917 if (!bp->port.pmf)
9918 return 0;
9919
a2fbb9ea
ET
9920 if (data == 0)
9921 data = 2;
9922
9923 for (i = 0; i < (data * 2); i++) {
c18487ee 9924 if ((i % 2) == 0)
34f80b04 9925 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9926 bp->link_params.hw_led_mode,
9927 bp->link_params.chip_id);
9928 else
34f80b04 9929 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9930 bp->link_params.hw_led_mode,
9931 bp->link_params.chip_id);
9932
a2fbb9ea
ET
9933 msleep_interruptible(500);
9934 if (signal_pending(current))
9935 break;
9936 }
9937
c18487ee 9938 if (bp->link_vars.link_up)
34f80b04 9939 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9940 bp->link_vars.line_speed,
9941 bp->link_params.hw_led_mode,
9942 bp->link_params.chip_id);
a2fbb9ea
ET
9943
9944 return 0;
9945}
9946
9947static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9948 .get_settings = bnx2x_get_settings,
9949 .set_settings = bnx2x_set_settings,
9950 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9951 .get_wol = bnx2x_get_wol,
9952 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9953 .get_msglevel = bnx2x_get_msglevel,
9954 .set_msglevel = bnx2x_set_msglevel,
9955 .nway_reset = bnx2x_nway_reset,
9956 .get_link = ethtool_op_get_link,
9957 .get_eeprom_len = bnx2x_get_eeprom_len,
9958 .get_eeprom = bnx2x_get_eeprom,
9959 .set_eeprom = bnx2x_set_eeprom,
9960 .get_coalesce = bnx2x_get_coalesce,
9961 .set_coalesce = bnx2x_set_coalesce,
9962 .get_ringparam = bnx2x_get_ringparam,
9963 .set_ringparam = bnx2x_set_ringparam,
9964 .get_pauseparam = bnx2x_get_pauseparam,
9965 .set_pauseparam = bnx2x_set_pauseparam,
9966 .get_rx_csum = bnx2x_get_rx_csum,
9967 .set_rx_csum = bnx2x_set_rx_csum,
9968 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9969 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9970 .set_flags = bnx2x_set_flags,
9971 .get_flags = ethtool_op_get_flags,
9972 .get_sg = ethtool_op_get_sg,
9973 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9974 .get_tso = ethtool_op_get_tso,
9975 .set_tso = bnx2x_set_tso,
9976 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9977 .self_test = bnx2x_self_test,
9978 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9979 .phys_id = bnx2x_phys_id,
9980 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9981 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9982};
9983
9984/* end of ethtool_ops */
9985
9986/****************************************************************************
9987* General service functions
9988****************************************************************************/
9989
9990static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9991{
9992 u16 pmcsr;
9993
9994 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9995
9996 switch (state) {
9997 case PCI_D0:
34f80b04 9998 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9999 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10000 PCI_PM_CTRL_PME_STATUS));
10001
10002 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10003 /* delay required during transition out of D3hot */
a2fbb9ea 10004 msleep(20);
34f80b04 10005 break;
a2fbb9ea 10006
34f80b04
EG
10007 case PCI_D3hot:
10008 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10009 pmcsr |= 3;
a2fbb9ea 10010
34f80b04
EG
10011 if (bp->wol)
10012 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10013
34f80b04
EG
10014 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10015 pmcsr);
a2fbb9ea 10016
34f80b04
EG
10017 /* No more memory access after this point until
10018 * device is brought back to D0.
10019 */
10020 break;
10021
10022 default:
10023 return -EINVAL;
10024 }
10025 return 0;
a2fbb9ea
ET
10026}
10027
237907c1
EG
10028static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10029{
10030 u16 rx_cons_sb;
10031
10032 /* Tell compiler that status block fields can change */
10033 barrier();
10034 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10035 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10036 rx_cons_sb++;
10037 return (fp->rx_comp_cons != rx_cons_sb);
10038}
10039
34f80b04
EG
10040/*
10041 * net_device service functions
10042 */
10043
a2fbb9ea
ET
10044static int bnx2x_poll(struct napi_struct *napi, int budget)
10045{
10046 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10047 napi);
10048 struct bnx2x *bp = fp->bp;
10049 int work_done = 0;
10050
10051#ifdef BNX2X_STOP_ON_ERROR
10052 if (unlikely(bp->panic))
34f80b04 10053 goto poll_panic;
a2fbb9ea
ET
10054#endif
10055
10056 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10057 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10058 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10059
10060 bnx2x_update_fpsb_idx(fp);
10061
237907c1 10062 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
10063 bnx2x_tx_int(fp, budget);
10064
237907c1 10065 if (bnx2x_has_rx_work(fp))
a2fbb9ea 10066 work_done = bnx2x_rx_int(fp, budget);
da5a662a 10067 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10068
10069 /* must not complete if we consumed full budget */
da5a662a 10070 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10071
10072#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10073poll_panic:
a2fbb9ea 10074#endif
288379f0 10075 napi_complete(napi);
a2fbb9ea 10076
0626b899 10077 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10078 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10079 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10080 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10081 }
a2fbb9ea
ET
10082 return work_done;
10083}
10084
755735eb
EG
10085
10086/* we split the first BD into headers and data BDs
33471629 10087 * to ease the pain of our fellow microcode engineers
755735eb
EG
10088 * we use one mapping for both BDs
10089 * So far this has only been observed to happen
10090 * in Other Operating Systems(TM)
10091 */
10092static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10093 struct bnx2x_fastpath *fp,
10094 struct eth_tx_bd **tx_bd, u16 hlen,
10095 u16 bd_prod, int nbd)
10096{
10097 struct eth_tx_bd *h_tx_bd = *tx_bd;
10098 struct eth_tx_bd *d_tx_bd;
10099 dma_addr_t mapping;
10100 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10101
10102 /* first fix first BD */
10103 h_tx_bd->nbd = cpu_to_le16(nbd);
10104 h_tx_bd->nbytes = cpu_to_le16(hlen);
10105
10106 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10107 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10108 h_tx_bd->addr_lo, h_tx_bd->nbd);
10109
10110 /* now get a new data BD
10111 * (after the pbd) and fill it */
10112 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10113 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10114
10115 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10116 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10117
10118 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10119 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10120 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10121 d_tx_bd->vlan = 0;
10122 /* this marks the BD as one that has no individual mapping
10123 * the FW ignores this flag in a BD not marked start
10124 */
10125 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10126 DP(NETIF_MSG_TX_QUEUED,
10127 "TSO split data size is %d (%x:%x)\n",
10128 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10129
10130 /* update tx_bd for marking the last BD flag */
10131 *tx_bd = d_tx_bd;
10132
10133 return bd_prod;
10134}
10135
10136static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10137{
10138 if (fix > 0)
10139 csum = (u16) ~csum_fold(csum_sub(csum,
10140 csum_partial(t_header - fix, fix, 0)));
10141
10142 else if (fix < 0)
10143 csum = (u16) ~csum_fold(csum_add(csum,
10144 csum_partial(t_header, -fix, 0)));
10145
10146 return swab16(csum);
10147}
10148
10149static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10150{
10151 u32 rc;
10152
10153 if (skb->ip_summed != CHECKSUM_PARTIAL)
10154 rc = XMIT_PLAIN;
10155
10156 else {
4781bfad 10157 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10158 rc = XMIT_CSUM_V6;
10159 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10160 rc |= XMIT_CSUM_TCP;
10161
10162 } else {
10163 rc = XMIT_CSUM_V4;
10164 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10165 rc |= XMIT_CSUM_TCP;
10166 }
10167 }
10168
10169 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10170 rc |= XMIT_GSO_V4;
10171
10172 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10173 rc |= XMIT_GSO_V6;
10174
10175 return rc;
10176}
10177
632da4d6 10178#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
10179/* check if packet requires linearization (packet is too fragmented) */
10180static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10181 u32 xmit_type)
10182{
10183 int to_copy = 0;
10184 int hlen = 0;
10185 int first_bd_sz = 0;
10186
10187 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10188 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10189
10190 if (xmit_type & XMIT_GSO) {
10191 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10192 /* Check if LSO packet needs to be copied:
10193 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10194 int wnd_size = MAX_FETCH_BD - 3;
33471629 10195 /* Number of windows to check */
755735eb
EG
10196 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10197 int wnd_idx = 0;
10198 int frag_idx = 0;
10199 u32 wnd_sum = 0;
10200
10201 /* Headers length */
10202 hlen = (int)(skb_transport_header(skb) - skb->data) +
10203 tcp_hdrlen(skb);
10204
10205 /* Amount of data (w/o headers) on linear part of SKB*/
10206 first_bd_sz = skb_headlen(skb) - hlen;
10207
10208 wnd_sum = first_bd_sz;
10209
10210 /* Calculate the first sum - it's special */
10211 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10212 wnd_sum +=
10213 skb_shinfo(skb)->frags[frag_idx].size;
10214
10215 /* If there was data on linear skb data - check it */
10216 if (first_bd_sz > 0) {
10217 if (unlikely(wnd_sum < lso_mss)) {
10218 to_copy = 1;
10219 goto exit_lbl;
10220 }
10221
10222 wnd_sum -= first_bd_sz;
10223 }
10224
10225 /* Others are easier: run through the frag list and
10226 check all windows */
10227 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10228 wnd_sum +=
10229 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10230
10231 if (unlikely(wnd_sum < lso_mss)) {
10232 to_copy = 1;
10233 break;
10234 }
10235 wnd_sum -=
10236 skb_shinfo(skb)->frags[wnd_idx].size;
10237 }
10238
10239 } else {
10240 /* in non-LSO too fragmented packet should always
10241 be linearized */
10242 to_copy = 1;
10243 }
10244 }
10245
10246exit_lbl:
10247 if (unlikely(to_copy))
10248 DP(NETIF_MSG_TX_QUEUED,
10249 "Linearization IS REQUIRED for %s packet. "
10250 "num_frags %d hlen %d first_bd_sz %d\n",
10251 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10252 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10253
10254 return to_copy;
10255}
632da4d6 10256#endif
755735eb
EG
10257
10258/* called with netif_tx_lock
a2fbb9ea 10259 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10260 * netif_wake_queue()
a2fbb9ea
ET
10261 */
10262static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10263{
10264 struct bnx2x *bp = netdev_priv(dev);
10265 struct bnx2x_fastpath *fp;
555f6c78 10266 struct netdev_queue *txq;
a2fbb9ea
ET
10267 struct sw_tx_bd *tx_buf;
10268 struct eth_tx_bd *tx_bd;
10269 struct eth_tx_parse_bd *pbd = NULL;
10270 u16 pkt_prod, bd_prod;
755735eb 10271 int nbd, fp_index;
a2fbb9ea 10272 dma_addr_t mapping;
755735eb
EG
10273 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10274 int vlan_off = (bp->e1hov ? 4 : 0);
10275 int i;
10276 u8 hlen = 0;
a2fbb9ea
ET
10277
10278#ifdef BNX2X_STOP_ON_ERROR
10279 if (unlikely(bp->panic))
10280 return NETDEV_TX_BUSY;
10281#endif
10282
555f6c78
EG
10283 fp_index = skb_get_queue_mapping(skb);
10284 txq = netdev_get_tx_queue(dev, fp_index);
10285
a2fbb9ea 10286 fp = &bp->fp[fp_index];
755735eb 10287
231fd58a 10288 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10289 fp->eth_q_stats.driver_xoff++,
555f6c78 10290 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10291 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10292 return NETDEV_TX_BUSY;
10293 }
10294
755735eb
EG
10295 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10296 " gso type %x xmit_type %x\n",
10297 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10298 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10299
632da4d6 10300#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10301 /* First, check if we need to linearize the skb
755735eb
EG
10302 (due to FW restrictions) */
10303 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10304 /* Statistics of linearization */
10305 bp->lin_cnt++;
10306 if (skb_linearize(skb) != 0) {
10307 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10308 "silently dropping this SKB\n");
10309 dev_kfree_skb_any(skb);
da5a662a 10310 return NETDEV_TX_OK;
755735eb
EG
10311 }
10312 }
632da4d6 10313#endif
755735eb 10314
a2fbb9ea 10315 /*
755735eb 10316 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10317 then for TSO or xsum we have a parsing info BD,
755735eb 10318 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10319 (don't forget to mark the last one as last,
10320 and to unmap only AFTER you write to the BD ...)
755735eb 10321 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10322 */
10323
10324 pkt_prod = fp->tx_pkt_prod++;
755735eb 10325 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10326
755735eb 10327 /* get a tx_buf and first BD */
a2fbb9ea
ET
10328 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10329 tx_bd = &fp->tx_desc_ring[bd_prod];
10330
10331 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10332 tx_bd->general_data = (UNICAST_ADDRESS <<
10333 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10334 /* header nbd */
10335 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10336
755735eb
EG
10337 /* remember the first BD of the packet */
10338 tx_buf->first_bd = fp->tx_bd_prod;
10339 tx_buf->skb = skb;
a2fbb9ea
ET
10340
10341 DP(NETIF_MSG_TX_QUEUED,
10342 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10343 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10344
0c6671b0
EG
10345#ifdef BCM_VLAN
10346 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10347 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10348 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10349 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10350 vlan_off += 4;
10351 } else
0c6671b0 10352#endif
755735eb 10353 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10354
755735eb 10355 if (xmit_type) {
755735eb 10356 /* turn on parsing and get a BD */
a2fbb9ea
ET
10357 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10358 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10359
10360 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10361 }
10362
10363 if (xmit_type & XMIT_CSUM) {
10364 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10365
10366 /* for now NS flag is not used in Linux */
4781bfad
EG
10367 pbd->global_data =
10368 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10369 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10370
755735eb
EG
10371 pbd->ip_hlen = (skb_transport_header(skb) -
10372 skb_network_header(skb)) / 2;
10373
10374 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10375
755735eb
EG
10376 pbd->total_hlen = cpu_to_le16(hlen);
10377 hlen = hlen*2 - vlan_off;
a2fbb9ea 10378
755735eb
EG
10379 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10380
10381 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10382 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10383 ETH_TX_BD_FLAGS_IP_CSUM;
10384 else
10385 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10386
10387 if (xmit_type & XMIT_CSUM_TCP) {
10388 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10389
10390 } else {
10391 s8 fix = SKB_CS_OFF(skb); /* signed! */
10392
a2fbb9ea 10393 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10394 pbd->cs_offset = fix / 2;
a2fbb9ea 10395
755735eb
EG
10396 DP(NETIF_MSG_TX_QUEUED,
10397 "hlen %d offset %d fix %d csum before fix %x\n",
10398 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10399 SKB_CS(skb));
10400
10401 /* HW bug: fixup the CSUM */
10402 pbd->tcp_pseudo_csum =
10403 bnx2x_csum_fix(skb_transport_header(skb),
10404 SKB_CS(skb), fix);
10405
10406 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10407 pbd->tcp_pseudo_csum);
10408 }
a2fbb9ea
ET
10409 }
10410
10411 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10412 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10413
10414 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10415 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10416 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10417 tx_bd->nbd = cpu_to_le16(nbd);
10418 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10419
10420 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10421 " nbytes %d flags %x vlan %x\n",
10422 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10423 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10424 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10425
755735eb 10426 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10427
10428 DP(NETIF_MSG_TX_QUEUED,
10429 "TSO packet len %d hlen %d total len %d tso size %d\n",
10430 skb->len, hlen, skb_headlen(skb),
10431 skb_shinfo(skb)->gso_size);
10432
10433 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10434
755735eb
EG
10435 if (unlikely(skb_headlen(skb) > hlen))
10436 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10437 bd_prod, ++nbd);
a2fbb9ea
ET
10438
10439 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10440 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10441 pbd->tcp_flags = pbd_tcp_flags(skb);
10442
10443 if (xmit_type & XMIT_GSO_V4) {
10444 pbd->ip_id = swab16(ip_hdr(skb)->id);
10445 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10446 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10447 ip_hdr(skb)->daddr,
10448 0, IPPROTO_TCP, 0));
755735eb
EG
10449
10450 } else
10451 pbd->tcp_pseudo_csum =
10452 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10453 &ipv6_hdr(skb)->daddr,
10454 0, IPPROTO_TCP, 0));
10455
a2fbb9ea
ET
10456 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10457 }
10458
755735eb
EG
10459 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10460 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10461
755735eb
EG
10462 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10463 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10464
755735eb
EG
10465 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10466 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10467
755735eb
EG
10468 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10469 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10470 tx_bd->nbytes = cpu_to_le16(frag->size);
10471 tx_bd->vlan = cpu_to_le16(pkt_prod);
10472 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10473
755735eb
EG
10474 DP(NETIF_MSG_TX_QUEUED,
10475 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10476 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10477 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10478 }
10479
755735eb 10480 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10481 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10482
10483 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10484 tx_bd, tx_bd->bd_flags.as_bitfield);
10485
a2fbb9ea
ET
10486 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10487
755735eb 10488 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10489 * if the packet contains or ends with it
10490 */
10491 if (TX_BD_POFF(bd_prod) < nbd)
10492 nbd++;
10493
10494 if (pbd)
10495 DP(NETIF_MSG_TX_QUEUED,
10496 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10497 " tcp_flags %x xsum %x seq %u hlen %u\n",
10498 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10499 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10500 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10501
755735eb 10502 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10503
58f4c4cf
EG
10504 /*
10505 * Make sure that the BD data is updated before updating the producer
10506 * since FW might read the BD right after the producer is updated.
10507 * This is only applicable for weak-ordered memory model archs such
10508 * as IA-64. The following barrier is also mandatory since FW will
10509 * assumes packets must have BDs.
10510 */
10511 wmb();
10512
4781bfad 10513 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10514 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10515 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10516 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10517
10518 mmiowb();
10519
755735eb 10520 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10521 dev->trans_start = jiffies;
10522
10523 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10524 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10525 if we put Tx into XOFF state. */
10526 smp_mb();
555f6c78 10527 netif_tx_stop_queue(txq);
de832a55 10528 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10529 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10530 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10531 }
10532 fp->tx_pkt++;
10533
10534 return NETDEV_TX_OK;
10535}
10536
bb2a0f7a 10537/* called with rtnl_lock */
a2fbb9ea
ET
10538static int bnx2x_open(struct net_device *dev)
10539{
10540 struct bnx2x *bp = netdev_priv(dev);
10541
6eccabb3
EG
10542 netif_carrier_off(dev);
10543
a2fbb9ea
ET
10544 bnx2x_set_power_state(bp, PCI_D0);
10545
bb2a0f7a 10546 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10547}
10548
bb2a0f7a 10549/* called with rtnl_lock */
a2fbb9ea
ET
10550static int bnx2x_close(struct net_device *dev)
10551{
a2fbb9ea
ET
10552 struct bnx2x *bp = netdev_priv(dev);
10553
10554 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10555 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10556 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10557 if (!CHIP_REV_IS_SLOW(bp))
10558 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10559
10560 return 0;
10561}
10562
34f80b04
EG
10563/* called with netif_tx_lock from set_multicast */
10564static void bnx2x_set_rx_mode(struct net_device *dev)
10565{
10566 struct bnx2x *bp = netdev_priv(dev);
10567 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10568 int port = BP_PORT(bp);
10569
10570 if (bp->state != BNX2X_STATE_OPEN) {
10571 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10572 return;
10573 }
10574
10575 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10576
10577 if (dev->flags & IFF_PROMISC)
10578 rx_mode = BNX2X_RX_MODE_PROMISC;
10579
10580 else if ((dev->flags & IFF_ALLMULTI) ||
10581 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10582 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10583
10584 else { /* some multicasts */
10585 if (CHIP_IS_E1(bp)) {
10586 int i, old, offset;
10587 struct dev_mc_list *mclist;
10588 struct mac_configuration_cmd *config =
10589 bnx2x_sp(bp, mcast_config);
10590
10591 for (i = 0, mclist = dev->mc_list;
10592 mclist && (i < dev->mc_count);
10593 i++, mclist = mclist->next) {
10594
10595 config->config_table[i].
10596 cam_entry.msb_mac_addr =
10597 swab16(*(u16 *)&mclist->dmi_addr[0]);
10598 config->config_table[i].
10599 cam_entry.middle_mac_addr =
10600 swab16(*(u16 *)&mclist->dmi_addr[2]);
10601 config->config_table[i].
10602 cam_entry.lsb_mac_addr =
10603 swab16(*(u16 *)&mclist->dmi_addr[4]);
10604 config->config_table[i].cam_entry.flags =
10605 cpu_to_le16(port);
10606 config->config_table[i].
10607 target_table_entry.flags = 0;
10608 config->config_table[i].
10609 target_table_entry.client_id = 0;
10610 config->config_table[i].
10611 target_table_entry.vlan_id = 0;
10612
10613 DP(NETIF_MSG_IFUP,
10614 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10615 config->config_table[i].
10616 cam_entry.msb_mac_addr,
10617 config->config_table[i].
10618 cam_entry.middle_mac_addr,
10619 config->config_table[i].
10620 cam_entry.lsb_mac_addr);
10621 }
8d9c5f34 10622 old = config->hdr.length;
34f80b04
EG
10623 if (old > i) {
10624 for (; i < old; i++) {
10625 if (CAM_IS_INVALID(config->
10626 config_table[i])) {
af246401 10627 /* already invalidated */
34f80b04
EG
10628 break;
10629 }
10630 /* invalidate */
10631 CAM_INVALIDATE(config->
10632 config_table[i]);
10633 }
10634 }
10635
10636 if (CHIP_REV_IS_SLOW(bp))
10637 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10638 else
10639 offset = BNX2X_MAX_MULTICAST*(1 + port);
10640
8d9c5f34 10641 config->hdr.length = i;
34f80b04 10642 config->hdr.offset = offset;
8d9c5f34 10643 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10644 config->hdr.reserved1 = 0;
10645
10646 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10647 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10648 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10649 0);
10650 } else { /* E1H */
10651 /* Accept one or more multicasts */
10652 struct dev_mc_list *mclist;
10653 u32 mc_filter[MC_HASH_SIZE];
10654 u32 crc, bit, regidx;
10655 int i;
10656
10657 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10658
10659 for (i = 0, mclist = dev->mc_list;
10660 mclist && (i < dev->mc_count);
10661 i++, mclist = mclist->next) {
10662
7c510e4b
JB
10663 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10664 mclist->dmi_addr);
34f80b04
EG
10665
10666 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10667 bit = (crc >> 24) & 0xff;
10668 regidx = bit >> 5;
10669 bit &= 0x1f;
10670 mc_filter[regidx] |= (1 << bit);
10671 }
10672
10673 for (i = 0; i < MC_HASH_SIZE; i++)
10674 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10675 mc_filter[i]);
10676 }
10677 }
10678
10679 bp->rx_mode = rx_mode;
10680 bnx2x_set_storm_rx_mode(bp);
10681}
10682
10683/* called with rtnl_lock */
a2fbb9ea
ET
10684static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10685{
10686 struct sockaddr *addr = p;
10687 struct bnx2x *bp = netdev_priv(dev);
10688
34f80b04 10689 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10690 return -EINVAL;
10691
10692 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10693 if (netif_running(dev)) {
10694 if (CHIP_IS_E1(bp))
3101c2bc 10695 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10696 else
3101c2bc 10697 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10698 }
a2fbb9ea
ET
10699
10700 return 0;
10701}
10702
c18487ee 10703/* called with rtnl_lock */
a2fbb9ea
ET
10704static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10705{
10706 struct mii_ioctl_data *data = if_mii(ifr);
10707 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10708 int port = BP_PORT(bp);
a2fbb9ea
ET
10709 int err;
10710
10711 switch (cmd) {
10712 case SIOCGMIIPHY:
34f80b04 10713 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10714
c14423fe 10715 /* fallthrough */
c18487ee 10716
a2fbb9ea 10717 case SIOCGMIIREG: {
c18487ee 10718 u16 mii_regval;
a2fbb9ea 10719
c18487ee
YR
10720 if (!netif_running(dev))
10721 return -EAGAIN;
a2fbb9ea 10722
34f80b04 10723 mutex_lock(&bp->port.phy_mutex);
3196a88a 10724 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10725 DEFAULT_PHY_DEV_ADDR,
10726 (data->reg_num & 0x1f), &mii_regval);
10727 data->val_out = mii_regval;
34f80b04 10728 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10729 return err;
10730 }
10731
10732 case SIOCSMIIREG:
10733 if (!capable(CAP_NET_ADMIN))
10734 return -EPERM;
10735
c18487ee
YR
10736 if (!netif_running(dev))
10737 return -EAGAIN;
10738
34f80b04 10739 mutex_lock(&bp->port.phy_mutex);
3196a88a 10740 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10741 DEFAULT_PHY_DEV_ADDR,
10742 (data->reg_num & 0x1f), data->val_in);
34f80b04 10743 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10744 return err;
10745
10746 default:
10747 /* do nothing */
10748 break;
10749 }
10750
10751 return -EOPNOTSUPP;
10752}
10753
34f80b04 10754/* called with rtnl_lock */
a2fbb9ea
ET
10755static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10756{
10757 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10758 int rc = 0;
a2fbb9ea
ET
10759
10760 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10761 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10762 return -EINVAL;
10763
10764 /* This does not race with packet allocation
c14423fe 10765 * because the actual alloc size is
a2fbb9ea
ET
10766 * only updated as part of load
10767 */
10768 dev->mtu = new_mtu;
10769
10770 if (netif_running(dev)) {
34f80b04
EG
10771 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10772 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10773 }
34f80b04
EG
10774
10775 return rc;
a2fbb9ea
ET
10776}
10777
10778static void bnx2x_tx_timeout(struct net_device *dev)
10779{
10780 struct bnx2x *bp = netdev_priv(dev);
10781
10782#ifdef BNX2X_STOP_ON_ERROR
10783 if (!bp->panic)
10784 bnx2x_panic();
10785#endif
10786 /* This allows the netif to be shutdown gracefully before resetting */
10787 schedule_work(&bp->reset_task);
10788}
10789
10790#ifdef BCM_VLAN
34f80b04 10791/* called with rtnl_lock */
a2fbb9ea
ET
10792static void bnx2x_vlan_rx_register(struct net_device *dev,
10793 struct vlan_group *vlgrp)
10794{
10795 struct bnx2x *bp = netdev_priv(dev);
10796
10797 bp->vlgrp = vlgrp;
0c6671b0
EG
10798
10799 /* Set flags according to the required capabilities */
10800 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10801
10802 if (dev->features & NETIF_F_HW_VLAN_TX)
10803 bp->flags |= HW_VLAN_TX_FLAG;
10804
10805 if (dev->features & NETIF_F_HW_VLAN_RX)
10806 bp->flags |= HW_VLAN_RX_FLAG;
10807
a2fbb9ea 10808 if (netif_running(dev))
49d66772 10809 bnx2x_set_client_config(bp);
a2fbb9ea 10810}
34f80b04 10811
a2fbb9ea
ET
10812#endif
10813
10814#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10815static void poll_bnx2x(struct net_device *dev)
10816{
10817 struct bnx2x *bp = netdev_priv(dev);
10818
10819 disable_irq(bp->pdev->irq);
10820 bnx2x_interrupt(bp->pdev->irq, dev);
10821 enable_irq(bp->pdev->irq);
10822}
10823#endif
10824
c64213cd
SH
10825static const struct net_device_ops bnx2x_netdev_ops = {
10826 .ndo_open = bnx2x_open,
10827 .ndo_stop = bnx2x_close,
10828 .ndo_start_xmit = bnx2x_start_xmit,
10829 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10830 .ndo_set_mac_address = bnx2x_change_mac_addr,
10831 .ndo_validate_addr = eth_validate_addr,
10832 .ndo_do_ioctl = bnx2x_ioctl,
10833 .ndo_change_mtu = bnx2x_change_mtu,
10834 .ndo_tx_timeout = bnx2x_tx_timeout,
10835#ifdef BCM_VLAN
10836 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10837#endif
10838#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10839 .ndo_poll_controller = poll_bnx2x,
10840#endif
10841};
10842
10843
34f80b04
EG
10844static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10845 struct net_device *dev)
a2fbb9ea
ET
10846{
10847 struct bnx2x *bp;
10848 int rc;
10849
10850 SET_NETDEV_DEV(dev, &pdev->dev);
10851 bp = netdev_priv(dev);
10852
34f80b04
EG
10853 bp->dev = dev;
10854 bp->pdev = pdev;
a2fbb9ea 10855 bp->flags = 0;
34f80b04 10856 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10857
10858 rc = pci_enable_device(pdev);
10859 if (rc) {
10860 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10861 goto err_out;
10862 }
10863
10864 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10865 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10866 " aborting\n");
10867 rc = -ENODEV;
10868 goto err_out_disable;
10869 }
10870
10871 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10872 printk(KERN_ERR PFX "Cannot find second PCI device"
10873 " base address, aborting\n");
10874 rc = -ENODEV;
10875 goto err_out_disable;
10876 }
10877
34f80b04
EG
10878 if (atomic_read(&pdev->enable_cnt) == 1) {
10879 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10880 if (rc) {
10881 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10882 " aborting\n");
10883 goto err_out_disable;
10884 }
a2fbb9ea 10885
34f80b04
EG
10886 pci_set_master(pdev);
10887 pci_save_state(pdev);
10888 }
a2fbb9ea
ET
10889
10890 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10891 if (bp->pm_cap == 0) {
10892 printk(KERN_ERR PFX "Cannot find power management"
10893 " capability, aborting\n");
10894 rc = -EIO;
10895 goto err_out_release;
10896 }
10897
10898 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10899 if (bp->pcie_cap == 0) {
10900 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10901 " aborting\n");
10902 rc = -EIO;
10903 goto err_out_release;
10904 }
10905
10906 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10907 bp->flags |= USING_DAC_FLAG;
10908 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10909 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10910 " failed, aborting\n");
10911 rc = -EIO;
10912 goto err_out_release;
10913 }
10914
10915 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10916 printk(KERN_ERR PFX "System does not support DMA,"
10917 " aborting\n");
10918 rc = -EIO;
10919 goto err_out_release;
10920 }
10921
34f80b04
EG
10922 dev->mem_start = pci_resource_start(pdev, 0);
10923 dev->base_addr = dev->mem_start;
10924 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10925
10926 dev->irq = pdev->irq;
10927
275f165f 10928 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10929 if (!bp->regview) {
10930 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10931 rc = -ENOMEM;
10932 goto err_out_release;
10933 }
10934
34f80b04
EG
10935 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10936 min_t(u64, BNX2X_DB_SIZE,
10937 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10938 if (!bp->doorbells) {
10939 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10940 rc = -ENOMEM;
10941 goto err_out_unmap;
10942 }
10943
10944 bnx2x_set_power_state(bp, PCI_D0);
10945
34f80b04
EG
10946 /* clean indirect addresses */
10947 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10948 PCICFG_VENDOR_ID_OFFSET);
10949 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10950 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10951 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10952 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10953
34f80b04 10954 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10955
c64213cd 10956 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10957 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10958 dev->features |= NETIF_F_SG;
10959 dev->features |= NETIF_F_HW_CSUM;
10960 if (bp->flags & USING_DAC_FLAG)
10961 dev->features |= NETIF_F_HIGHDMA;
10962#ifdef BCM_VLAN
10963 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10964 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10965#endif
10966 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10967 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10968
10969 return 0;
10970
10971err_out_unmap:
10972 if (bp->regview) {
10973 iounmap(bp->regview);
10974 bp->regview = NULL;
10975 }
a2fbb9ea
ET
10976 if (bp->doorbells) {
10977 iounmap(bp->doorbells);
10978 bp->doorbells = NULL;
10979 }
10980
10981err_out_release:
34f80b04
EG
10982 if (atomic_read(&pdev->enable_cnt) == 1)
10983 pci_release_regions(pdev);
a2fbb9ea
ET
10984
10985err_out_disable:
10986 pci_disable_device(pdev);
10987 pci_set_drvdata(pdev, NULL);
10988
10989err_out:
10990 return rc;
10991}
10992
25047950
ET
10993static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10994{
10995 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10996
10997 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10998 return val;
10999}
11000
11001/* return value of 1=2.5GHz 2=5GHz */
11002static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11003{
11004 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11005
11006 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11007 return val;
11008}
11009
a2fbb9ea
ET
11010static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11011 const struct pci_device_id *ent)
11012{
11013 static int version_printed;
11014 struct net_device *dev = NULL;
11015 struct bnx2x *bp;
25047950 11016 int rc;
a2fbb9ea
ET
11017
11018 if (version_printed++ == 0)
11019 printk(KERN_INFO "%s", version);
11020
11021 /* dev zeroed in init_etherdev */
555f6c78 11022 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11023 if (!dev) {
11024 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11025 return -ENOMEM;
34f80b04 11026 }
a2fbb9ea 11027
a2fbb9ea
ET
11028 bp = netdev_priv(dev);
11029 bp->msglevel = debug;
11030
34f80b04 11031 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11032 if (rc < 0) {
11033 free_netdev(dev);
11034 return rc;
11035 }
11036
a2fbb9ea
ET
11037 pci_set_drvdata(pdev, dev);
11038
34f80b04 11039 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11040 if (rc)
11041 goto init_one_exit;
11042
11043 rc = register_netdev(dev);
34f80b04 11044 if (rc) {
693fc0d1 11045 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11046 goto init_one_exit;
11047 }
11048
25047950 11049 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11050 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11051 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11052 bnx2x_get_pcie_width(bp),
11053 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11054 dev->base_addr, bp->pdev->irq);
e174961c 11055 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 11056 return 0;
34f80b04
EG
11057
11058init_one_exit:
11059 if (bp->regview)
11060 iounmap(bp->regview);
11061
11062 if (bp->doorbells)
11063 iounmap(bp->doorbells);
11064
11065 free_netdev(dev);
11066
11067 if (atomic_read(&pdev->enable_cnt) == 1)
11068 pci_release_regions(pdev);
11069
11070 pci_disable_device(pdev);
11071 pci_set_drvdata(pdev, NULL);
11072
11073 return rc;
a2fbb9ea
ET
11074}
11075
11076static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11077{
11078 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11079 struct bnx2x *bp;
11080
11081 if (!dev) {
228241eb
ET
11082 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11083 return;
11084 }
228241eb 11085 bp = netdev_priv(dev);
a2fbb9ea 11086
a2fbb9ea
ET
11087 unregister_netdev(dev);
11088
11089 if (bp->regview)
11090 iounmap(bp->regview);
11091
11092 if (bp->doorbells)
11093 iounmap(bp->doorbells);
11094
11095 free_netdev(dev);
34f80b04
EG
11096
11097 if (atomic_read(&pdev->enable_cnt) == 1)
11098 pci_release_regions(pdev);
11099
a2fbb9ea
ET
11100 pci_disable_device(pdev);
11101 pci_set_drvdata(pdev, NULL);
11102}
11103
11104static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11105{
11106 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11107 struct bnx2x *bp;
11108
34f80b04
EG
11109 if (!dev) {
11110 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11111 return -ENODEV;
11112 }
11113 bp = netdev_priv(dev);
a2fbb9ea 11114
34f80b04 11115 rtnl_lock();
a2fbb9ea 11116
34f80b04 11117 pci_save_state(pdev);
228241eb 11118
34f80b04
EG
11119 if (!netif_running(dev)) {
11120 rtnl_unlock();
11121 return 0;
11122 }
a2fbb9ea
ET
11123
11124 netif_device_detach(dev);
a2fbb9ea 11125
da5a662a 11126 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11127
a2fbb9ea 11128 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11129
34f80b04
EG
11130 rtnl_unlock();
11131
a2fbb9ea
ET
11132 return 0;
11133}
11134
11135static int bnx2x_resume(struct pci_dev *pdev)
11136{
11137 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11138 struct bnx2x *bp;
a2fbb9ea
ET
11139 int rc;
11140
228241eb
ET
11141 if (!dev) {
11142 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11143 return -ENODEV;
11144 }
228241eb 11145 bp = netdev_priv(dev);
a2fbb9ea 11146
34f80b04
EG
11147 rtnl_lock();
11148
228241eb 11149 pci_restore_state(pdev);
34f80b04
EG
11150
11151 if (!netif_running(dev)) {
11152 rtnl_unlock();
11153 return 0;
11154 }
11155
a2fbb9ea
ET
11156 bnx2x_set_power_state(bp, PCI_D0);
11157 netif_device_attach(dev);
11158
da5a662a 11159 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11160
34f80b04
EG
11161 rtnl_unlock();
11162
11163 return rc;
a2fbb9ea
ET
11164}
11165
f8ef6e44
YG
11166static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11167{
11168 int i;
11169
11170 bp->state = BNX2X_STATE_ERROR;
11171
11172 bp->rx_mode = BNX2X_RX_MODE_NONE;
11173
11174 bnx2x_netif_stop(bp, 0);
11175
11176 del_timer_sync(&bp->timer);
11177 bp->stats_state = STATS_STATE_DISABLED;
11178 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11179
11180 /* Release IRQs */
11181 bnx2x_free_irq(bp);
11182
11183 if (CHIP_IS_E1(bp)) {
11184 struct mac_configuration_cmd *config =
11185 bnx2x_sp(bp, mcast_config);
11186
8d9c5f34 11187 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11188 CAM_INVALIDATE(config->config_table[i]);
11189 }
11190
11191 /* Free SKBs, SGEs, TPA pool and driver internals */
11192 bnx2x_free_skbs(bp);
555f6c78 11193 for_each_rx_queue(bp, i)
f8ef6e44 11194 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11195 for_each_rx_queue(bp, i)
7cde1c8b 11196 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11197 bnx2x_free_mem(bp);
11198
11199 bp->state = BNX2X_STATE_CLOSED;
11200
11201 netif_carrier_off(bp->dev);
11202
11203 return 0;
11204}
11205
11206static void bnx2x_eeh_recover(struct bnx2x *bp)
11207{
11208 u32 val;
11209
11210 mutex_init(&bp->port.phy_mutex);
11211
11212 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11213 bp->link_params.shmem_base = bp->common.shmem_base;
11214 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11215
11216 if (!bp->common.shmem_base ||
11217 (bp->common.shmem_base < 0xA0000) ||
11218 (bp->common.shmem_base >= 0xC0000)) {
11219 BNX2X_DEV_INFO("MCP not active\n");
11220 bp->flags |= NO_MCP_FLAG;
11221 return;
11222 }
11223
11224 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11225 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11226 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11227 BNX2X_ERR("BAD MCP validity signature\n");
11228
11229 if (!BP_NOMCP(bp)) {
11230 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11231 & DRV_MSG_SEQ_NUMBER_MASK);
11232 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11233 }
11234}
11235
493adb1f
WX
11236/**
11237 * bnx2x_io_error_detected - called when PCI error is detected
11238 * @pdev: Pointer to PCI device
11239 * @state: The current pci connection state
11240 *
11241 * This function is called after a PCI bus error affecting
11242 * this device has been detected.
11243 */
11244static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11245 pci_channel_state_t state)
11246{
11247 struct net_device *dev = pci_get_drvdata(pdev);
11248 struct bnx2x *bp = netdev_priv(dev);
11249
11250 rtnl_lock();
11251
11252 netif_device_detach(dev);
11253
11254 if (netif_running(dev))
f8ef6e44 11255 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11256
11257 pci_disable_device(pdev);
11258
11259 rtnl_unlock();
11260
11261 /* Request a slot reset */
11262 return PCI_ERS_RESULT_NEED_RESET;
11263}
11264
11265/**
11266 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11267 * @pdev: Pointer to PCI device
11268 *
11269 * Restart the card from scratch, as if from a cold-boot.
11270 */
11271static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11272{
11273 struct net_device *dev = pci_get_drvdata(pdev);
11274 struct bnx2x *bp = netdev_priv(dev);
11275
11276 rtnl_lock();
11277
11278 if (pci_enable_device(pdev)) {
11279 dev_err(&pdev->dev,
11280 "Cannot re-enable PCI device after reset\n");
11281 rtnl_unlock();
11282 return PCI_ERS_RESULT_DISCONNECT;
11283 }
11284
11285 pci_set_master(pdev);
11286 pci_restore_state(pdev);
11287
11288 if (netif_running(dev))
11289 bnx2x_set_power_state(bp, PCI_D0);
11290
11291 rtnl_unlock();
11292
11293 return PCI_ERS_RESULT_RECOVERED;
11294}
11295
11296/**
11297 * bnx2x_io_resume - called when traffic can start flowing again
11298 * @pdev: Pointer to PCI device
11299 *
11300 * This callback is called when the error recovery driver tells us that
11301 * its OK to resume normal operation.
11302 */
11303static void bnx2x_io_resume(struct pci_dev *pdev)
11304{
11305 struct net_device *dev = pci_get_drvdata(pdev);
11306 struct bnx2x *bp = netdev_priv(dev);
11307
11308 rtnl_lock();
11309
f8ef6e44
YG
11310 bnx2x_eeh_recover(bp);
11311
493adb1f 11312 if (netif_running(dev))
f8ef6e44 11313 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11314
11315 netif_device_attach(dev);
11316
11317 rtnl_unlock();
11318}
11319
11320static struct pci_error_handlers bnx2x_err_handler = {
11321 .error_detected = bnx2x_io_error_detected,
11322 .slot_reset = bnx2x_io_slot_reset,
11323 .resume = bnx2x_io_resume,
11324};
11325
a2fbb9ea 11326static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11327 .name = DRV_MODULE_NAME,
11328 .id_table = bnx2x_pci_tbl,
11329 .probe = bnx2x_init_one,
11330 .remove = __devexit_p(bnx2x_remove_one),
11331 .suspend = bnx2x_suspend,
11332 .resume = bnx2x_resume,
11333 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11334};
11335
11336static int __init bnx2x_init(void)
11337{
1cf167f2
EG
11338 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11339 if (bnx2x_wq == NULL) {
11340 printk(KERN_ERR PFX "Cannot create workqueue\n");
11341 return -ENOMEM;
11342 }
11343
a2fbb9ea
ET
11344 return pci_register_driver(&bnx2x_pci_driver);
11345}
11346
11347static void __exit bnx2x_cleanup(void)
11348{
11349 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11350
11351 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11352}
11353
11354module_init(bnx2x_init);
11355module_exit(bnx2x_cleanup);
11356