]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Better struct naming
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
8d5726c4
EG
91
92static int mrrs = -1;
93module_param(mrrs, int, 0);
94MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
95
a2fbb9ea 96module_param(debug, int, 0);
19680c48 97MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 98MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 99MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 100
1cf167f2 101static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
102
103enum bnx2x_board_type {
104 BCM57710 = 0,
34f80b04
EG
105 BCM57711 = 1,
106 BCM57711E = 2,
a2fbb9ea
ET
107};
108
34f80b04 109/* indexed by board_type, above */
53a10565 110static struct {
a2fbb9ea
ET
111 char *name;
112} board_info[] __devinitdata = {
34f80b04
EG
113 { "Broadcom NetXtreme II BCM57710 XGb" },
114 { "Broadcom NetXtreme II BCM57711 XGb" },
115 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
116};
117
34f80b04 118
a2fbb9ea
ET
119static const struct pci_device_id bnx2x_pci_tbl[] = {
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
126 { 0 }
127};
128
129MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
130
131/****************************************************************************
132* General service functions
133****************************************************************************/
134
135/* used only at init
136 * locking is done by mcp
137 */
138static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
139{
140 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
141 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
142 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
143 PCICFG_VENDOR_ID_OFFSET);
144}
145
a2fbb9ea
ET
146static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147{
148 u32 val;
149
150 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
151 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
152 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
153 PCICFG_VENDOR_ID_OFFSET);
154
155 return val;
156}
a2fbb9ea
ET
157
158static const u32 dmae_reg_go_c[] = {
159 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
160 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
161 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
162 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
163};
164
165/* copy command into DMAE command memory and set DMAE command go */
166static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 int idx)
168{
169 u32 cmd_offset;
170 int i;
171
172 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
173 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
174 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
175
ad8d3948
EG
176 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
177 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
178 }
179 REG_WR(bp, dmae_reg_go_c[idx], 1);
180}
181
ad8d3948
EG
182void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
183 u32 len32)
a2fbb9ea 184{
ad8d3948 185 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 186 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
187 int cnt = 200;
188
189 if (!bp->dmae_ready) {
190 u32 *data = bnx2x_sp(bp, wb_data[0]);
191
192 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
193 " using indirect\n", dst_addr, len32);
194 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 return;
196 }
197
198 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
199
200 memset(dmae, 0, sizeof(struct dmae_command));
201
202 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
203 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
205#ifdef __BIG_ENDIAN
206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
207#else
208 DMAE_CMD_ENDIANITY_DW_SWAP |
209#endif
34f80b04
EG
210 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
211 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
212 dmae->src_addr_lo = U64_LO(dma_addr);
213 dmae->src_addr_hi = U64_HI(dma_addr);
214 dmae->dst_addr_lo = dst_addr >> 2;
215 dmae->dst_addr_hi = 0;
216 dmae->len = len32;
217 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
218 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 219 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 220
ad8d3948 221 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
222 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
223 "dst_addr [%x:%08x (%08x)]\n"
224 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
225 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
226 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
227 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 228 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
229 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
230 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
231
232 *wb_comp = 0;
233
34f80b04 234 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
235
236 udelay(5);
ad8d3948
EG
237
238 while (*wb_comp != DMAE_COMP_VAL) {
239 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
240
ad8d3948 241 if (!cnt) {
a2fbb9ea
ET
242 BNX2X_ERR("dmae timeout!\n");
243 break;
244 }
ad8d3948 245 cnt--;
12469401
YG
246 /* adjust delay for emulation/FPGA */
247 if (CHIP_REV_IS_SLOW(bp))
248 msleep(100);
249 else
250 udelay(5);
a2fbb9ea 251 }
ad8d3948
EG
252
253 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
254}
255
c18487ee 256void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 257{
ad8d3948 258 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 259 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
260 int cnt = 200;
261
262 if (!bp->dmae_ready) {
263 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 int i;
265
266 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
267 " using indirect\n", src_addr, len32);
268 for (i = 0; i < len32; i++)
269 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 return;
271 }
272
273 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
274
275 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
276 memset(dmae, 0, sizeof(struct dmae_command));
277
278 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
279 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
280 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
281#ifdef __BIG_ENDIAN
282 DMAE_CMD_ENDIANITY_B_DW_SWAP |
283#else
284 DMAE_CMD_ENDIANITY_DW_SWAP |
285#endif
34f80b04
EG
286 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
287 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
288 dmae->src_addr_lo = src_addr >> 2;
289 dmae->src_addr_hi = 0;
290 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
292 dmae->len = len32;
293 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
294 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 295 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 296
ad8d3948 297 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
298 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
299 "dst_addr [%x:%08x (%08x)]\n"
300 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
301 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
302 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
303 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
304
305 *wb_comp = 0;
306
34f80b04 307 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
308
309 udelay(5);
ad8d3948
EG
310
311 while (*wb_comp != DMAE_COMP_VAL) {
312
ad8d3948 313 if (!cnt) {
a2fbb9ea
ET
314 BNX2X_ERR("dmae timeout!\n");
315 break;
316 }
ad8d3948 317 cnt--;
12469401
YG
318 /* adjust delay for emulation/FPGA */
319 if (CHIP_REV_IS_SLOW(bp))
320 msleep(100);
321 else
322 udelay(5);
a2fbb9ea 323 }
ad8d3948 324 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
325 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
326 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
327
328 mutex_unlock(&bp->dmae_mutex);
329}
330
331/* used only for slowpath so not inlined */
332static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333{
334 u32 wb_write[2];
335
336 wb_write[0] = val_hi;
337 wb_write[1] = val_lo;
338 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 339}
a2fbb9ea 340
ad8d3948
EG
341#ifdef USE_WB_RD
342static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343{
344 u32 wb_data[2];
345
346 REG_RD_DMAE(bp, reg, wb_data, 2);
347
348 return HILO_U64(wb_data[0], wb_data[1]);
349}
350#endif
351
a2fbb9ea
ET
352static int bnx2x_mc_assert(struct bnx2x *bp)
353{
a2fbb9ea 354 char last_idx;
34f80b04
EG
355 int i, rc = 0;
356 u32 row0, row1, row2, row3;
357
358 /* XSTORM */
359 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
360 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 if (last_idx)
362 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363
364 /* print the asserts */
365 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366
367 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i));
369 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
371 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
372 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
373 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375
376 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
377 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
378 " 0x%08x 0x%08x 0x%08x\n",
379 i, row3, row2, row1, row0);
380 rc++;
381 } else {
382 break;
383 }
384 }
385
386 /* TSTORM */
387 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
388 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 if (last_idx)
390 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391
392 /* print the asserts */
393 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394
395 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i));
397 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
399 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
400 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
401 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403
404 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
405 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
406 " 0x%08x 0x%08x 0x%08x\n",
407 i, row3, row2, row1, row0);
408 rc++;
409 } else {
410 break;
411 }
412 }
413
414 /* CSTORM */
415 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
416 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 if (last_idx)
418 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419
420 /* print the asserts */
421 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422
423 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i));
425 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
427 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
428 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
429 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431
432 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
433 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
434 " 0x%08x 0x%08x 0x%08x\n",
435 i, row3, row2, row1, row0);
436 rc++;
437 } else {
438 break;
439 }
440 }
441
442 /* USTORM */
443 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
444 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 if (last_idx)
446 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447
448 /* print the asserts */
449 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450
451 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i));
453 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 4);
455 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
456 USTORM_ASSERT_LIST_OFFSET(i) + 8);
457 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459
460 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
461 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
462 " 0x%08x 0x%08x 0x%08x\n",
463 i, row3, row2, row1, row0);
464 rc++;
465 } else {
466 break;
a2fbb9ea
ET
467 }
468 }
34f80b04 469
a2fbb9ea
ET
470 return rc;
471}
c14423fe 472
a2fbb9ea
ET
473static void bnx2x_fw_dump(struct bnx2x *bp)
474{
475 u32 mark, offset;
476 u32 data[9];
477 int word;
478
479 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
480 mark = ((mark + 0x3) & ~0x3);
481 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
482
483 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
484 for (word = 0; word < 8; word++)
485 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
486 offset + 4*word));
487 data[8] = 0x0;
49d66772 488 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
489 }
490 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
491 for (word = 0; word < 8; word++)
492 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
493 offset + 4*word));
494 data[8] = 0x0;
49d66772 495 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
496 }
497 printk("\n" KERN_ERR PFX "end of fw dump\n");
498}
499
500static void bnx2x_panic_dump(struct bnx2x *bp)
501{
502 int i;
503 u16 j, start, end;
504
66e855f3
YG
505 bp->stats_state = STATS_STATE_DISABLED;
506 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507
a2fbb9ea
ET
508 BNX2X_ERR("begin crash dump -----------------\n");
509
510 for_each_queue(bp, i) {
511 struct bnx2x_fastpath *fp = &bp->fp[i];
512 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
513
514 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 515 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 516 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 517 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
518 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
519 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
520 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
521 fp->rx_bd_prod, fp->rx_bd_cons,
522 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
523 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
524 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
525 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
526 " *sb_u_idx(%x) bd data(%x,%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
528 fp->status_blk->c_status_block.status_block_index,
529 fp->fp_u_idx,
530 fp->status_blk->u_status_block.status_block_index,
531 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
532
533 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
534 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
535 for (j = start; j < end; j++) {
536 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
537
538 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
539 sw_bd->skb, sw_bd->first_bd);
540 }
541
542 start = TX_BD(fp->tx_bd_cons - 10);
543 end = TX_BD(fp->tx_bd_cons + 254);
544 for (j = start; j < end; j++) {
545 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
546
547 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
548 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
549 }
550
551 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
552 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
553 for (j = start; j < end; j++) {
554 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
555 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
556
557 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 558 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
559 }
560
3196a88a
EG
561 start = RX_SGE(fp->rx_sge_prod);
562 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
563 for (j = start; j < end; j++) {
564 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
565 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
566
567 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
568 j, rx_sge[1], rx_sge[0], sw_page->page);
569 }
570
a2fbb9ea
ET
571 start = RCQ_BD(fp->rx_comp_cons - 10);
572 end = RCQ_BD(fp->rx_comp_cons + 503);
573 for (j = start; j < end; j++) {
574 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
575
576 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
577 j, cqe[0], cqe[1], cqe[2], cqe[3]);
578 }
579 }
580
49d66772
ET
581 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
582 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 583 " spq_prod_idx(%u)\n",
49d66772 584 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
585 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
586
34f80b04 587 bnx2x_fw_dump(bp);
a2fbb9ea
ET
588 bnx2x_mc_assert(bp);
589 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
590}
591
615f8fd9 592static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 593{
34f80b04 594 int port = BP_PORT(bp);
a2fbb9ea
ET
595 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
596 u32 val = REG_RD(bp, addr);
597 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 598 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
599
600 if (msix) {
8badd27a
EG
601 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
603 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
605 } else if (msi) {
606 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
607 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
608 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
610 } else {
611 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 612 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
613 HC_CONFIG_0_REG_INT_LINE_EN_0 |
614 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 615
8badd27a
EG
616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
617 val, port, addr);
615f8fd9
ET
618
619 REG_WR(bp, addr, val);
620
a2fbb9ea
ET
621 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
622 }
623
8badd27a
EG
624 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
625 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
626
627 REG_WR(bp, addr, val);
34f80b04
EG
628
629 if (CHIP_IS_E1H(bp)) {
630 /* init leading/trailing edge */
631 if (IS_E1HMF(bp)) {
8badd27a 632 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 633 if (bp->port.pmf)
4acac6a5
EG
634 /* enable nig and gpio3 attention */
635 val |= 0x1100;
34f80b04
EG
636 } else
637 val = 0xffff;
638
639 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
640 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
641 }
a2fbb9ea
ET
642}
643
615f8fd9 644static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 645{
34f80b04 646 int port = BP_PORT(bp);
a2fbb9ea
ET
647 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
648 u32 val = REG_RD(bp, addr);
649
650 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
651 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
652 HC_CONFIG_0_REG_INT_LINE_EN_0 |
653 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
654
655 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
656 val, port, addr);
657
8badd27a
EG
658 /* flush all outstanding writes */
659 mmiowb();
660
a2fbb9ea
ET
661 REG_WR(bp, addr, val);
662 if (REG_RD(bp, addr) != val)
663 BNX2X_ERR("BUG! proper val not read from IGU!\n");
664}
665
f8ef6e44 666static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 667{
a2fbb9ea 668 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 669 int i, offset;
a2fbb9ea 670
34f80b04 671 /* disable interrupt handling */
a2fbb9ea 672 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
673 if (disable_hw)
674 /* prevent the HW from sending interrupts */
675 bnx2x_int_disable(bp);
a2fbb9ea
ET
676
677 /* make sure all ISRs are done */
678 if (msix) {
8badd27a
EG
679 synchronize_irq(bp->msix_table[0].vector);
680 offset = 1;
a2fbb9ea 681 for_each_queue(bp, i)
8badd27a 682 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
683 } else
684 synchronize_irq(bp->pdev->irq);
685
686 /* make sure sp_task is not running */
1cf167f2
EG
687 cancel_delayed_work(&bp->sp_task);
688 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
689}
690
34f80b04 691/* fast path */
a2fbb9ea
ET
692
693/*
34f80b04 694 * General service functions
a2fbb9ea
ET
695 */
696
34f80b04 697static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
698 u8 storm, u16 index, u8 op, u8 update)
699{
5c862848
EG
700 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
701 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
702 struct igu_ack_register igu_ack;
703
704 igu_ack.status_block_index = index;
705 igu_ack.sb_id_and_flags =
34f80b04 706 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
707 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
708 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
709 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
710
5c862848
EG
711 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
712 (*(u32 *)&igu_ack), hc_addr);
713 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
714}
715
716static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
717{
718 struct host_status_block *fpsb = fp->status_blk;
719 u16 rc = 0;
720
721 barrier(); /* status block is written to by the chip */
722 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
723 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
724 rc |= 1;
725 }
726 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
727 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
728 rc |= 2;
729 }
730 return rc;
731}
732
a2fbb9ea
ET
733static u16 bnx2x_ack_int(struct bnx2x *bp)
734{
5c862848
EG
735 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
736 COMMAND_REG_SIMD_MASK);
737 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 738
5c862848
EG
739 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
740 result, hc_addr);
a2fbb9ea 741
a2fbb9ea
ET
742 return result;
743}
744
745
746/*
747 * fast path service functions
748 */
749
237907c1
EG
750static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
751{
752 u16 tx_cons_sb;
753
754 /* Tell compiler that status block fields can change */
755 barrier();
756 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
757 return (fp->tx_pkt_cons != tx_cons_sb);
758}
759
760static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
761{
762 /* Tell compiler that consumer and producer can change */
763 barrier();
764 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
765
237907c1
EG
766}
767
a2fbb9ea
ET
768/* free skb in the packet ring at pos idx
769 * return idx of last bd freed
770 */
771static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
772 u16 idx)
773{
774 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
775 struct eth_tx_bd *tx_bd;
776 struct sk_buff *skb = tx_buf->skb;
34f80b04 777 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
778 int nbd;
779
780 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
781 idx, tx_buf, skb);
782
783 /* unmap first bd */
784 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
785 tx_bd = &fp->tx_desc_ring[bd_idx];
786 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
787 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
788
789 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 790 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
791#ifdef BNX2X_STOP_ON_ERROR
792 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 793 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
794 bnx2x_panic();
795 }
796#endif
797
798 /* Skip a parse bd and the TSO split header bd
799 since they have no mapping */
800 if (nbd)
801 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
802
803 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
804 ETH_TX_BD_FLAGS_TCP_CSUM |
805 ETH_TX_BD_FLAGS_SW_LSO)) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 tx_bd = &fp->tx_desc_ring[bd_idx];
809 /* is this a TSO split header bd? */
810 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
811 if (--nbd)
812 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
813 }
814 }
815
816 /* now free frags */
817 while (nbd > 0) {
818
819 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
820 tx_bd = &fp->tx_desc_ring[bd_idx];
821 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
822 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
823 if (--nbd)
824 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
825 }
826
827 /* release skb */
53e5e96e 828 WARN_ON(!skb);
a2fbb9ea
ET
829 dev_kfree_skb(skb);
830 tx_buf->first_bd = 0;
831 tx_buf->skb = NULL;
832
34f80b04 833 return new_cons;
a2fbb9ea
ET
834}
835
34f80b04 836static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 837{
34f80b04
EG
838 s16 used;
839 u16 prod;
840 u16 cons;
a2fbb9ea 841
34f80b04 842 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
843 prod = fp->tx_bd_prod;
844 cons = fp->tx_bd_cons;
845
34f80b04
EG
846 /* NUM_TX_RINGS = number of "next-page" entries
847 It will be used as a threshold */
848 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 849
34f80b04 850#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
851 WARN_ON(used < 0);
852 WARN_ON(used > fp->bp->tx_ring_size);
853 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 854#endif
a2fbb9ea 855
34f80b04 856 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
857}
858
859static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
860{
861 struct bnx2x *bp = fp->bp;
555f6c78 862 struct netdev_queue *txq;
a2fbb9ea
ET
863 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
864 int done = 0;
865
866#ifdef BNX2X_STOP_ON_ERROR
867 if (unlikely(bp->panic))
868 return;
869#endif
870
555f6c78 871 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
872 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
873 sw_cons = fp->tx_pkt_cons;
874
875 while (sw_cons != hw_cons) {
876 u16 pkt_cons;
877
878 pkt_cons = TX_BD(sw_cons);
879
880 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
881
34f80b04 882 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
883 hw_cons, sw_cons, pkt_cons);
884
34f80b04 885/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
886 rmb();
887 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
888 }
889*/
890 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
891 sw_cons++;
892 done++;
893
894 if (done == work)
895 break;
896 }
897
898 fp->tx_pkt_cons = sw_cons;
899 fp->tx_bd_cons = bd_cons;
900
555f6c78
EG
901 /* Need to make the tx_bd_cons update visible to start_xmit()
902 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
903 * memory barrier, there is a small possibility that start_xmit()
904 * will miss it and cause the queue to be stopped forever.
905 */
906 smp_mb();
907
908 /* TBD need a thresh? */
555f6c78 909 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 910
555f6c78 911 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 912
555f6c78 913 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 914 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 915 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 916 netif_tx_wake_queue(txq);
a2fbb9ea 917
555f6c78 918 __netif_tx_unlock(txq);
a2fbb9ea
ET
919 }
920}
921
3196a88a 922
a2fbb9ea
ET
923static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
924 union eth_rx_cqe *rr_cqe)
925{
926 struct bnx2x *bp = fp->bp;
927 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
928 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
929
34f80b04 930 DP(BNX2X_MSG_SP,
a2fbb9ea 931 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
932 FP_IDX(fp), cid, command, bp->state,
933 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
934
935 bp->spq_left++;
936
34f80b04 937 if (FP_IDX(fp)) {
a2fbb9ea
ET
938 switch (command | fp->state) {
939 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
940 BNX2X_FP_STATE_OPENING):
941 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
942 cid);
943 fp->state = BNX2X_FP_STATE_OPEN;
944 break;
945
946 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
947 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
948 cid);
949 fp->state = BNX2X_FP_STATE_HALTED;
950 break;
951
952 default:
34f80b04
EG
953 BNX2X_ERR("unexpected MC reply (%d) "
954 "fp->state is %x\n", command, fp->state);
955 break;
a2fbb9ea 956 }
34f80b04 957 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
958 return;
959 }
c14423fe 960
a2fbb9ea
ET
961 switch (command | bp->state) {
962 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
963 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
964 bp->state = BNX2X_STATE_OPEN;
965 break;
966
967 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
968 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
969 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
970 fp->state = BNX2X_FP_STATE_HALTED;
971 break;
972
a2fbb9ea 973 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 974 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 975 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
976 break;
977
3196a88a 978
a2fbb9ea 979 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 981 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 982 bp->set_mac_pending = 0;
a2fbb9ea
ET
983 break;
984
49d66772 985 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 986 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
987 break;
988
a2fbb9ea 989 default:
34f80b04 990 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 991 command, bp->state);
34f80b04 992 break;
a2fbb9ea 993 }
34f80b04 994 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
995}
996
7a9b2557
VZ
997static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
998 struct bnx2x_fastpath *fp, u16 index)
999{
1000 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1001 struct page *page = sw_buf->page;
1002 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1003
1004 /* Skip "next page" elements */
1005 if (!page)
1006 return;
1007
1008 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1009 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1010 __free_pages(page, PAGES_PER_SGE_SHIFT);
1011
1012 sw_buf->page = NULL;
1013 sge->addr_hi = 0;
1014 sge->addr_lo = 0;
1015}
1016
1017static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1018 struct bnx2x_fastpath *fp, int last)
1019{
1020 int i;
1021
1022 for (i = 0; i < last; i++)
1023 bnx2x_free_rx_sge(bp, fp, i);
1024}
1025
1026static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1027 struct bnx2x_fastpath *fp, u16 index)
1028{
1029 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1030 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1031 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1032 dma_addr_t mapping;
1033
1034 if (unlikely(page == NULL))
1035 return -ENOMEM;
1036
4f40f2cb 1037 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1038 PCI_DMA_FROMDEVICE);
8d8bb39b 1039 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1040 __free_pages(page, PAGES_PER_SGE_SHIFT);
1041 return -ENOMEM;
1042 }
1043
1044 sw_buf->page = page;
1045 pci_unmap_addr_set(sw_buf, mapping, mapping);
1046
1047 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1048 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1049
1050 return 0;
1051}
1052
a2fbb9ea
ET
1053static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1054 struct bnx2x_fastpath *fp, u16 index)
1055{
1056 struct sk_buff *skb;
1057 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1058 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1059 dma_addr_t mapping;
1060
1061 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1062 if (unlikely(skb == NULL))
1063 return -ENOMEM;
1064
437cf2f1 1065 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1066 PCI_DMA_FROMDEVICE);
8d8bb39b 1067 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1068 dev_kfree_skb(skb);
1069 return -ENOMEM;
1070 }
1071
1072 rx_buf->skb = skb;
1073 pci_unmap_addr_set(rx_buf, mapping, mapping);
1074
1075 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1076 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1077
1078 return 0;
1079}
1080
1081/* note that we are not allocating a new skb,
1082 * we are just moving one from cons to prod
1083 * we are not creating a new mapping,
1084 * so there is no need to check for dma_mapping_error().
1085 */
1086static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1087 struct sk_buff *skb, u16 cons, u16 prod)
1088{
1089 struct bnx2x *bp = fp->bp;
1090 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1091 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1092 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1093 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1094
1095 pci_dma_sync_single_for_device(bp->pdev,
1096 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1097 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1098
1099 prod_rx_buf->skb = cons_rx_buf->skb;
1100 pci_unmap_addr_set(prod_rx_buf, mapping,
1101 pci_unmap_addr(cons_rx_buf, mapping));
1102 *prod_bd = *cons_bd;
1103}
1104
7a9b2557
VZ
1105static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1106 u16 idx)
1107{
1108 u16 last_max = fp->last_max_sge;
1109
1110 if (SUB_S16(idx, last_max) > 0)
1111 fp->last_max_sge = idx;
1112}
1113
1114static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1115{
1116 int i, j;
1117
1118 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1119 int idx = RX_SGE_CNT * i - 1;
1120
1121 for (j = 0; j < 2; j++) {
1122 SGE_MASK_CLEAR_BIT(fp, idx);
1123 idx--;
1124 }
1125 }
1126}
1127
1128static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1129 struct eth_fast_path_rx_cqe *fp_cqe)
1130{
1131 struct bnx2x *bp = fp->bp;
4f40f2cb 1132 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1133 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1134 SGE_PAGE_SHIFT;
7a9b2557
VZ
1135 u16 last_max, last_elem, first_elem;
1136 u16 delta = 0;
1137 u16 i;
1138
1139 if (!sge_len)
1140 return;
1141
1142 /* First mark all used pages */
1143 for (i = 0; i < sge_len; i++)
1144 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1145
1146 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1147 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149 /* Here we assume that the last SGE index is the biggest */
1150 prefetch((void *)(fp->sge_mask));
1151 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1152
1153 last_max = RX_SGE(fp->last_max_sge);
1154 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1155 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1156
1157 /* If ring is not full */
1158 if (last_elem + 1 != first_elem)
1159 last_elem++;
1160
1161 /* Now update the prod */
1162 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1163 if (likely(fp->sge_mask[i]))
1164 break;
1165
1166 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1167 delta += RX_SGE_MASK_ELEM_SZ;
1168 }
1169
1170 if (delta > 0) {
1171 fp->rx_sge_prod += delta;
1172 /* clear page-end entries */
1173 bnx2x_clear_sge_mask_next_elems(fp);
1174 }
1175
1176 DP(NETIF_MSG_RX_STATUS,
1177 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1178 fp->last_max_sge, fp->rx_sge_prod);
1179}
1180
1181static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1182{
1183 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1184 memset(fp->sge_mask, 0xff,
1185 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1186
33471629
EG
1187 /* Clear the two last indices in the page to 1:
1188 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1189 hence will never be indicated and should be removed from
1190 the calculations. */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1192}
1193
1194static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1195 struct sk_buff *skb, u16 cons, u16 prod)
1196{
1197 struct bnx2x *bp = fp->bp;
1198 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1199 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1200 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1201 dma_addr_t mapping;
1202
1203 /* move empty skb from pool to prod and map it */
1204 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1205 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1206 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1207 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1208
1209 /* move partial skb from cons to pool (don't unmap yet) */
1210 fp->tpa_pool[queue] = *cons_rx_buf;
1211
1212 /* mark bin state as start - print error if current state != stop */
1213 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1214 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1215
1216 fp->tpa_state[queue] = BNX2X_TPA_START;
1217
1218 /* point prod_bd to new skb */
1219 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1220 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1221
1222#ifdef BNX2X_STOP_ON_ERROR
1223 fp->tpa_queue_used |= (1 << queue);
1224#ifdef __powerpc64__
1225 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1226#else
1227 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1228#endif
1229 fp->tpa_queue_used);
1230#endif
1231}
1232
1233static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1234 struct sk_buff *skb,
1235 struct eth_fast_path_rx_cqe *fp_cqe,
1236 u16 cqe_idx)
1237{
1238 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1239 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1240 u32 i, frag_len, frag_size, pages;
1241 int err;
1242 int j;
1243
1244 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1245 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1246
1247 /* This is needed in order to enable forwarding support */
1248 if (frag_size)
4f40f2cb 1249 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1250 max(frag_size, (u32)len_on_bd));
1251
1252#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1253 if (pages >
1254 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1255 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1256 pages, cqe_idx);
1257 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1258 fp_cqe->pkt_len, len_on_bd);
1259 bnx2x_panic();
1260 return -EINVAL;
1261 }
1262#endif
1263
1264 /* Run through the SGL and compose the fragmented skb */
1265 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1266 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1267
1268 /* FW gives the indices of the SGE as if the ring is an array
1269 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1270 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1271 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1272 old_rx_pg = *rx_pg;
1273
1274 /* If we fail to allocate a substitute page, we simply stop
1275 where we are and drop the whole packet */
1276 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1277 if (unlikely(err)) {
de832a55 1278 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1279 return err;
1280 }
1281
1282 /* Unmap the page as we r going to pass it to the stack */
1283 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1284 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1285
1286 /* Add one frag and update the appropriate fields in the skb */
1287 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1288
1289 skb->data_len += frag_len;
1290 skb->truesize += frag_len;
1291 skb->len += frag_len;
1292
1293 frag_size -= frag_len;
1294 }
1295
1296 return 0;
1297}
1298
1299static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1300 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1301 u16 cqe_idx)
1302{
1303 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1304 struct sk_buff *skb = rx_buf->skb;
1305 /* alloc new skb */
1306 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1307
1308 /* Unmap skb in the pool anyway, as we are going to change
1309 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1310 fails. */
1311 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1312 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1313
7a9b2557 1314 if (likely(new_skb)) {
66e855f3
YG
1315 /* fix ip xsum and give it to the stack */
1316 /* (no need to map the new skb) */
0c6671b0
EG
1317#ifdef BCM_VLAN
1318 int is_vlan_cqe =
1319 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1320 PARSING_FLAGS_VLAN);
1321 int is_not_hwaccel_vlan_cqe =
1322 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1323#endif
7a9b2557
VZ
1324
1325 prefetch(skb);
1326 prefetch(((char *)(skb)) + 128);
1327
7a9b2557
VZ
1328#ifdef BNX2X_STOP_ON_ERROR
1329 if (pad + len > bp->rx_buf_size) {
1330 BNX2X_ERR("skb_put is about to fail... "
1331 "pad %d len %d rx_buf_size %d\n",
1332 pad, len, bp->rx_buf_size);
1333 bnx2x_panic();
1334 return;
1335 }
1336#endif
1337
1338 skb_reserve(skb, pad);
1339 skb_put(skb, len);
1340
1341 skb->protocol = eth_type_trans(skb, bp->dev);
1342 skb->ip_summed = CHECKSUM_UNNECESSARY;
1343
1344 {
1345 struct iphdr *iph;
1346
1347 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1348#ifdef BCM_VLAN
1349 /* If there is no Rx VLAN offloading -
1350 take VLAN tag into an account */
1351 if (unlikely(is_not_hwaccel_vlan_cqe))
1352 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1353#endif
7a9b2557
VZ
1354 iph->check = 0;
1355 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1356 }
1357
1358 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1359 &cqe->fast_path_cqe, cqe_idx)) {
1360#ifdef BCM_VLAN
0c6671b0
EG
1361 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1362 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1363 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1364 le16_to_cpu(cqe->fast_path_cqe.
1365 vlan_tag));
1366 else
1367#endif
1368 netif_receive_skb(skb);
1369 } else {
1370 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1371 " - dropping packet!\n");
1372 dev_kfree_skb(skb);
1373 }
1374
7a9b2557
VZ
1375
1376 /* put new skb in bin */
1377 fp->tpa_pool[queue].skb = new_skb;
1378
1379 } else {
66e855f3 1380 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1381 DP(NETIF_MSG_RX_STATUS,
1382 "Failed to allocate new skb - dropping packet!\n");
de832a55 1383 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1384 }
1385
1386 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1387}
1388
1389static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1390 struct bnx2x_fastpath *fp,
1391 u16 bd_prod, u16 rx_comp_prod,
1392 u16 rx_sge_prod)
1393{
8d9c5f34 1394 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1395 int i;
1396
1397 /* Update producers */
1398 rx_prods.bd_prod = bd_prod;
1399 rx_prods.cqe_prod = rx_comp_prod;
1400 rx_prods.sge_prod = rx_sge_prod;
1401
58f4c4cf
EG
1402 /*
1403 * Make sure that the BD and SGE data is updated before updating the
1404 * producers since FW might read the BD/SGE right after the producer
1405 * is updated.
1406 * This is only applicable for weak-ordered memory model archs such
1407 * as IA-64. The following barrier is also mandatory since FW will
1408 * assumes BDs must have buffers.
1409 */
1410 wmb();
1411
8d9c5f34
EG
1412 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1413 REG_WR(bp, BAR_USTRORM_INTMEM +
1414 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1415 ((u32 *)&rx_prods)[i]);
1416
58f4c4cf
EG
1417 mmiowb(); /* keep prod updates ordered */
1418
7a9b2557 1419 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1420 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1421 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1422}
1423
a2fbb9ea
ET
1424static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1425{
1426 struct bnx2x *bp = fp->bp;
34f80b04 1427 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1428 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1429 int rx_pkt = 0;
1430
1431#ifdef BNX2X_STOP_ON_ERROR
1432 if (unlikely(bp->panic))
1433 return 0;
1434#endif
1435
34f80b04
EG
1436 /* CQ "next element" is of the size of the regular element,
1437 that's why it's ok here */
a2fbb9ea
ET
1438 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1439 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1440 hw_comp_cons++;
1441
1442 bd_cons = fp->rx_bd_cons;
1443 bd_prod = fp->rx_bd_prod;
34f80b04 1444 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1445 sw_comp_cons = fp->rx_comp_cons;
1446 sw_comp_prod = fp->rx_comp_prod;
1447
1448 /* Memory barrier necessary as speculative reads of the rx
1449 * buffer can be ahead of the index in the status block
1450 */
1451 rmb();
1452
1453 DP(NETIF_MSG_RX_STATUS,
1454 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1455 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1456
1457 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1458 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1459 struct sk_buff *skb;
1460 union eth_rx_cqe *cqe;
34f80b04
EG
1461 u8 cqe_fp_flags;
1462 u16 len, pad;
a2fbb9ea
ET
1463
1464 comp_ring_cons = RCQ_BD(sw_comp_cons);
1465 bd_prod = RX_BD(bd_prod);
1466 bd_cons = RX_BD(bd_cons);
1467
1468 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1469 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1470
a2fbb9ea 1471 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1472 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1473 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1474 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1475 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1476 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1477
1478 /* is this a slowpath msg? */
34f80b04 1479 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1480 bnx2x_sp_event(fp, cqe);
1481 goto next_cqe;
1482
1483 /* this is an rx packet */
1484 } else {
1485 rx_buf = &fp->rx_buf_ring[bd_cons];
1486 skb = rx_buf->skb;
a2fbb9ea
ET
1487 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1488 pad = cqe->fast_path_cqe.placement_offset;
1489
7a9b2557
VZ
1490 /* If CQE is marked both TPA_START and TPA_END
1491 it is a non-TPA CQE */
1492 if ((!fp->disable_tpa) &&
1493 (TPA_TYPE(cqe_fp_flags) !=
1494 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1495 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1496
1497 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1498 DP(NETIF_MSG_RX_STATUS,
1499 "calling tpa_start on queue %d\n",
1500 queue);
1501
1502 bnx2x_tpa_start(fp, queue, skb,
1503 bd_cons, bd_prod);
1504 goto next_rx;
1505 }
1506
1507 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1508 DP(NETIF_MSG_RX_STATUS,
1509 "calling tpa_stop on queue %d\n",
1510 queue);
1511
1512 if (!BNX2X_RX_SUM_FIX(cqe))
1513 BNX2X_ERR("STOP on none TCP "
1514 "data\n");
1515
1516 /* This is a size of the linear data
1517 on this skb */
1518 len = le16_to_cpu(cqe->fast_path_cqe.
1519 len_on_bd);
1520 bnx2x_tpa_stop(bp, fp, queue, pad,
1521 len, cqe, comp_ring_cons);
1522#ifdef BNX2X_STOP_ON_ERROR
1523 if (bp->panic)
1524 return -EINVAL;
1525#endif
1526
1527 bnx2x_update_sge_prod(fp,
1528 &cqe->fast_path_cqe);
1529 goto next_cqe;
1530 }
1531 }
1532
a2fbb9ea
ET
1533 pci_dma_sync_single_for_device(bp->pdev,
1534 pci_unmap_addr(rx_buf, mapping),
1535 pad + RX_COPY_THRESH,
1536 PCI_DMA_FROMDEVICE);
1537 prefetch(skb);
1538 prefetch(((char *)(skb)) + 128);
1539
1540 /* is this an error packet? */
34f80b04 1541 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1542 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1543 "ERROR flags %x rx packet %u\n",
1544 cqe_fp_flags, sw_comp_cons);
de832a55 1545 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1546 goto reuse_rx;
1547 }
1548
1549 /* Since we don't have a jumbo ring
1550 * copy small packets if mtu > 1500
1551 */
1552 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1553 (len <= RX_COPY_THRESH)) {
1554 struct sk_buff *new_skb;
1555
1556 new_skb = netdev_alloc_skb(bp->dev,
1557 len + pad);
1558 if (new_skb == NULL) {
1559 DP(NETIF_MSG_RX_ERR,
34f80b04 1560 "ERROR packet dropped "
a2fbb9ea 1561 "because of alloc failure\n");
de832a55 1562 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1563 goto reuse_rx;
1564 }
1565
1566 /* aligned copy */
1567 skb_copy_from_linear_data_offset(skb, pad,
1568 new_skb->data + pad, len);
1569 skb_reserve(new_skb, pad);
1570 skb_put(new_skb, len);
1571
1572 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1573
1574 skb = new_skb;
1575
1576 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1577 pci_unmap_single(bp->pdev,
1578 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1579 bp->rx_buf_size,
a2fbb9ea
ET
1580 PCI_DMA_FROMDEVICE);
1581 skb_reserve(skb, pad);
1582 skb_put(skb, len);
1583
1584 } else {
1585 DP(NETIF_MSG_RX_ERR,
34f80b04 1586 "ERROR packet dropped because "
a2fbb9ea 1587 "of alloc failure\n");
de832a55 1588 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1589reuse_rx:
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591 goto next_rx;
1592 }
1593
1594 skb->protocol = eth_type_trans(skb, bp->dev);
1595
1596 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1597 if (bp->rx_csum) {
1adcd8be
EG
1598 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1599 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1600 else
de832a55 1601 fp->eth_q_stats.hw_csum_err++;
66e855f3 1602 }
a2fbb9ea
ET
1603 }
1604
748e5439 1605 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1606#ifdef BCM_VLAN
0c6671b0 1607 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1608 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1609 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1610 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1611 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1612 else
1613#endif
34f80b04 1614 netif_receive_skb(skb);
a2fbb9ea 1615
a2fbb9ea
ET
1616
1617next_rx:
1618 rx_buf->skb = NULL;
1619
1620 bd_cons = NEXT_RX_IDX(bd_cons);
1621 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1622 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1623 rx_pkt++;
a2fbb9ea
ET
1624next_cqe:
1625 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1626 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1627
34f80b04 1628 if (rx_pkt == budget)
a2fbb9ea
ET
1629 break;
1630 } /* while */
1631
1632 fp->rx_bd_cons = bd_cons;
34f80b04 1633 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1634 fp->rx_comp_cons = sw_comp_cons;
1635 fp->rx_comp_prod = sw_comp_prod;
1636
7a9b2557
VZ
1637 /* Update producers */
1638 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1639 fp->rx_sge_prod);
a2fbb9ea
ET
1640
1641 fp->rx_pkt += rx_pkt;
1642 fp->rx_calls++;
1643
1644 return rx_pkt;
1645}
1646
1647static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1648{
1649 struct bnx2x_fastpath *fp = fp_cookie;
1650 struct bnx2x *bp = fp->bp;
34f80b04 1651 int index = FP_IDX(fp);
a2fbb9ea 1652
da5a662a
VZ
1653 /* Return here if interrupt is disabled */
1654 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1655 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1656 return IRQ_HANDLED;
1657 }
1658
34f80b04
EG
1659 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1660 index, FP_SB_ID(fp));
1661 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1662
1663#ifdef BNX2X_STOP_ON_ERROR
1664 if (unlikely(bp->panic))
1665 return IRQ_HANDLED;
1666#endif
1667
1668 prefetch(fp->rx_cons_sb);
1669 prefetch(fp->tx_cons_sb);
1670 prefetch(&fp->status_blk->c_status_block.status_block_index);
1671 prefetch(&fp->status_blk->u_status_block.status_block_index);
1672
288379f0 1673 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1674
a2fbb9ea
ET
1675 return IRQ_HANDLED;
1676}
1677
1678static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1679{
555f6c78 1680 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1681 u16 status = bnx2x_ack_int(bp);
34f80b04 1682 u16 mask;
a2fbb9ea 1683
34f80b04 1684 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1685 if (unlikely(status == 0)) {
1686 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1687 return IRQ_NONE;
1688 }
34f80b04 1689 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1690
34f80b04 1691 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1692 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1693 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1694 return IRQ_HANDLED;
1695 }
1696
3196a88a
EG
1697#ifdef BNX2X_STOP_ON_ERROR
1698 if (unlikely(bp->panic))
1699 return IRQ_HANDLED;
1700#endif
1701
34f80b04
EG
1702 mask = 0x2 << bp->fp[0].sb_id;
1703 if (status & mask) {
a2fbb9ea
ET
1704 struct bnx2x_fastpath *fp = &bp->fp[0];
1705
1706 prefetch(fp->rx_cons_sb);
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709 prefetch(&fp->status_blk->u_status_block.status_block_index);
1710
288379f0 1711 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1712
34f80b04 1713 status &= ~mask;
a2fbb9ea
ET
1714 }
1715
a2fbb9ea 1716
34f80b04 1717 if (unlikely(status & 0x1)) {
1cf167f2 1718 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1719
1720 status &= ~0x1;
1721 if (!status)
1722 return IRQ_HANDLED;
1723 }
1724
34f80b04
EG
1725 if (status)
1726 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1727 status);
a2fbb9ea 1728
c18487ee 1729 return IRQ_HANDLED;
a2fbb9ea
ET
1730}
1731
c18487ee 1732/* end of fast path */
a2fbb9ea 1733
bb2a0f7a 1734static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1735
c18487ee
YR
1736/* Link */
1737
1738/*
1739 * General service functions
1740 */
a2fbb9ea 1741
4a37fb66 1742static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1743{
1744 u32 lock_status;
1745 u32 resource_bit = (1 << resource);
4a37fb66
YG
1746 int func = BP_FUNC(bp);
1747 u32 hw_lock_control_reg;
c18487ee 1748 int cnt;
a2fbb9ea 1749
c18487ee
YR
1750 /* Validating that the resource is within range */
1751 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1752 DP(NETIF_MSG_HW,
1753 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1754 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1755 return -EINVAL;
1756 }
a2fbb9ea 1757
4a37fb66
YG
1758 if (func <= 5) {
1759 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1760 } else {
1761 hw_lock_control_reg =
1762 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1763 }
1764
c18487ee 1765 /* Validating that the resource is not already taken */
4a37fb66 1766 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1767 if (lock_status & resource_bit) {
1768 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1769 lock_status, resource_bit);
1770 return -EEXIST;
1771 }
a2fbb9ea 1772
46230476
EG
1773 /* Try for 5 second every 5ms */
1774 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1775 /* Try to acquire the lock */
4a37fb66
YG
1776 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1777 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1778 if (lock_status & resource_bit)
1779 return 0;
a2fbb9ea 1780
c18487ee 1781 msleep(5);
a2fbb9ea 1782 }
c18487ee
YR
1783 DP(NETIF_MSG_HW, "Timeout\n");
1784 return -EAGAIN;
1785}
a2fbb9ea 1786
4a37fb66 1787static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1788{
1789 u32 lock_status;
1790 u32 resource_bit = (1 << resource);
4a37fb66
YG
1791 int func = BP_FUNC(bp);
1792 u32 hw_lock_control_reg;
a2fbb9ea 1793
c18487ee
YR
1794 /* Validating that the resource is within range */
1795 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1796 DP(NETIF_MSG_HW,
1797 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1798 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1799 return -EINVAL;
1800 }
1801
4a37fb66
YG
1802 if (func <= 5) {
1803 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1804 } else {
1805 hw_lock_control_reg =
1806 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1807 }
1808
c18487ee 1809 /* Validating that the resource is currently taken */
4a37fb66 1810 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1811 if (!(lock_status & resource_bit)) {
1812 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1813 lock_status, resource_bit);
1814 return -EFAULT;
a2fbb9ea
ET
1815 }
1816
4a37fb66 1817 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1818 return 0;
1819}
1820
1821/* HW Lock for shared dual port PHYs */
4a37fb66 1822static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1823{
34f80b04 1824 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1825
46c6a674
EG
1826 if (bp->port.need_hw_lock)
1827 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1828}
a2fbb9ea 1829
4a37fb66 1830static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1831{
46c6a674
EG
1832 if (bp->port.need_hw_lock)
1833 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1834
34f80b04 1835 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1836}
a2fbb9ea 1837
4acac6a5
EG
1838int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1839{
1840 /* The GPIO should be swapped if swap register is set and active */
1841 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1842 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1843 int gpio_shift = gpio_num +
1844 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1845 u32 gpio_mask = (1 << gpio_shift);
1846 u32 gpio_reg;
1847 int value;
1848
1849 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1850 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1851 return -EINVAL;
1852 }
1853
1854 /* read GPIO value */
1855 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1856
1857 /* get the requested pin value */
1858 if ((gpio_reg & gpio_mask) == gpio_mask)
1859 value = 1;
1860 else
1861 value = 0;
1862
1863 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1864
1865 return value;
1866}
1867
17de50b7 1868int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1869{
1870 /* The GPIO should be swapped if swap register is set and active */
1871 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1872 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1873 int gpio_shift = gpio_num +
1874 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1875 u32 gpio_mask = (1 << gpio_shift);
1876 u32 gpio_reg;
a2fbb9ea 1877
c18487ee
YR
1878 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1879 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1880 return -EINVAL;
1881 }
a2fbb9ea 1882
4a37fb66 1883 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1884 /* read GPIO and mask except the float bits */
1885 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1886
c18487ee
YR
1887 switch (mode) {
1888 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1889 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1890 gpio_num, gpio_shift);
1891 /* clear FLOAT and set CLR */
1892 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1893 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1894 break;
a2fbb9ea 1895
c18487ee
YR
1896 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1897 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1898 gpio_num, gpio_shift);
1899 /* clear FLOAT and set SET */
1900 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1901 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1902 break;
a2fbb9ea 1903
17de50b7 1904 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1905 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1906 gpio_num, gpio_shift);
1907 /* set FLOAT */
1908 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1909 break;
a2fbb9ea 1910
c18487ee
YR
1911 default:
1912 break;
a2fbb9ea
ET
1913 }
1914
c18487ee 1915 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1916 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1917
c18487ee 1918 return 0;
a2fbb9ea
ET
1919}
1920
4acac6a5
EG
1921int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1922{
1923 /* The GPIO should be swapped if swap register is set and active */
1924 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1925 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1926 int gpio_shift = gpio_num +
1927 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1928 u32 gpio_mask = (1 << gpio_shift);
1929 u32 gpio_reg;
1930
1931 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1932 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1933 return -EINVAL;
1934 }
1935
1936 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1937 /* read GPIO int */
1938 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1939
1940 switch (mode) {
1941 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1942 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1943 "output low\n", gpio_num, gpio_shift);
1944 /* clear SET and set CLR */
1945 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1946 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1947 break;
1948
1949 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1950 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1951 "output high\n", gpio_num, gpio_shift);
1952 /* clear CLR and set SET */
1953 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1954 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1955 break;
1956
1957 default:
1958 break;
1959 }
1960
1961 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1962 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1963
1964 return 0;
1965}
1966
c18487ee 1967static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1968{
c18487ee
YR
1969 u32 spio_mask = (1 << spio_num);
1970 u32 spio_reg;
a2fbb9ea 1971
c18487ee
YR
1972 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1973 (spio_num > MISC_REGISTERS_SPIO_7)) {
1974 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1975 return -EINVAL;
a2fbb9ea
ET
1976 }
1977
4a37fb66 1978 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1979 /* read SPIO and mask except the float bits */
1980 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1981
c18487ee 1982 switch (mode) {
6378c025 1983 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1984 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1985 /* clear FLOAT and set CLR */
1986 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1987 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1988 break;
a2fbb9ea 1989
6378c025 1990 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1991 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1992 /* clear FLOAT and set SET */
1993 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1994 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1995 break;
a2fbb9ea 1996
c18487ee
YR
1997 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1998 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1999 /* set FLOAT */
2000 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2001 break;
a2fbb9ea 2002
c18487ee
YR
2003 default:
2004 break;
a2fbb9ea
ET
2005 }
2006
c18487ee 2007 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2008 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2009
a2fbb9ea
ET
2010 return 0;
2011}
2012
c18487ee 2013static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2014{
ad33ea3a
EG
2015 switch (bp->link_vars.ieee_fc &
2016 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2017 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2018 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2019 ADVERTISED_Pause);
2020 break;
2021 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2022 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2023 ADVERTISED_Pause);
2024 break;
2025 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2026 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
2027 break;
2028 default:
34f80b04 2029 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2030 ADVERTISED_Pause);
2031 break;
2032 }
2033}
f1410647 2034
c18487ee
YR
2035static void bnx2x_link_report(struct bnx2x *bp)
2036{
2037 if (bp->link_vars.link_up) {
2038 if (bp->state == BNX2X_STATE_OPEN)
2039 netif_carrier_on(bp->dev);
2040 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2041
c18487ee 2042 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2043
c18487ee
YR
2044 if (bp->link_vars.duplex == DUPLEX_FULL)
2045 printk("full duplex");
2046 else
2047 printk("half duplex");
f1410647 2048
c0700f90
DM
2049 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2050 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2051 printk(", receive ");
c0700f90 2052 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2053 printk("& transmit ");
2054 } else {
2055 printk(", transmit ");
2056 }
2057 printk("flow control ON");
2058 }
2059 printk("\n");
f1410647 2060
c18487ee
YR
2061 } else { /* link_down */
2062 netif_carrier_off(bp->dev);
2063 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2064 }
c18487ee
YR
2065}
2066
b5bf9068 2067static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2068{
19680c48
EG
2069 if (!BP_NOMCP(bp)) {
2070 u8 rc;
a2fbb9ea 2071
19680c48 2072 /* Initialize link parameters structure variables */
8c99e7b0
YR
2073 /* It is recommended to turn off RX FC for jumbo frames
2074 for better performance */
2075 if (IS_E1HMF(bp))
c0700f90 2076 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2077 else if (bp->dev->mtu > 5000)
c0700f90 2078 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2079 else
c0700f90 2080 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2081
4a37fb66 2082 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2083
2084 if (load_mode == LOAD_DIAG)
2085 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2086
19680c48 2087 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2088
4a37fb66 2089 bnx2x_release_phy_lock(bp);
a2fbb9ea 2090
3c96c68b
EG
2091 bnx2x_calc_fc_adv(bp);
2092
b5bf9068
EG
2093 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2094 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2095 bnx2x_link_report(bp);
b5bf9068 2096 }
34f80b04 2097
19680c48
EG
2098 return rc;
2099 }
2100 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2101 return -EINVAL;
a2fbb9ea
ET
2102}
2103
c18487ee 2104static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2105{
19680c48 2106 if (!BP_NOMCP(bp)) {
4a37fb66 2107 bnx2x_acquire_phy_lock(bp);
19680c48 2108 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2109 bnx2x_release_phy_lock(bp);
a2fbb9ea 2110
19680c48
EG
2111 bnx2x_calc_fc_adv(bp);
2112 } else
2113 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2114}
a2fbb9ea 2115
c18487ee
YR
2116static void bnx2x__link_reset(struct bnx2x *bp)
2117{
19680c48 2118 if (!BP_NOMCP(bp)) {
4a37fb66 2119 bnx2x_acquire_phy_lock(bp);
589abe3a 2120 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2121 bnx2x_release_phy_lock(bp);
19680c48
EG
2122 } else
2123 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2124}
a2fbb9ea 2125
c18487ee
YR
2126static u8 bnx2x_link_test(struct bnx2x *bp)
2127{
2128 u8 rc;
a2fbb9ea 2129
4a37fb66 2130 bnx2x_acquire_phy_lock(bp);
c18487ee 2131 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2132 bnx2x_release_phy_lock(bp);
a2fbb9ea 2133
c18487ee
YR
2134 return rc;
2135}
a2fbb9ea 2136
8a1c38d1 2137static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2138{
8a1c38d1
EG
2139 u32 r_param = bp->link_vars.line_speed / 8;
2140 u32 fair_periodic_timeout_usec;
2141 u32 t_fair;
34f80b04 2142
8a1c38d1
EG
2143 memset(&(bp->cmng.rs_vars), 0,
2144 sizeof(struct rate_shaping_vars_per_port));
2145 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2146
8a1c38d1
EG
2147 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2148 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2149
8a1c38d1
EG
2150 /* this is the threshold below which no timer arming will occur
2151 1.25 coefficient is for the threshold to be a little bigger
2152 than the real time, to compensate for timer in-accuracy */
2153 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2154 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2155
8a1c38d1
EG
2156 /* resolution of fairness timer */
2157 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2158 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2159 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2160
8a1c38d1
EG
2161 /* this is the threshold below which we won't arm the timer anymore */
2162 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2163
8a1c38d1
EG
2164 /* we multiply by 1e3/8 to get bytes/msec.
2165 We don't want the credits to pass a credit
2166 of the t_fair*FAIR_MEM (algorithm resolution) */
2167 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2168 /* since each tick is 4 usec */
2169 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2170}
2171
8a1c38d1 2172static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2173{
2174 struct rate_shaping_vars_per_vn m_rs_vn;
2175 struct fairness_vars_per_vn m_fair_vn;
2176 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2177 u16 vn_min_rate, vn_max_rate;
2178 int i;
2179
2180 /* If function is hidden - set min and max to zeroes */
2181 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2182 vn_min_rate = 0;
2183 vn_max_rate = 0;
2184
2185 } else {
2186 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2187 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2188 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2189 if current min rate is zero - set it to 1.
33471629 2190 This is a requirement of the algorithm. */
8a1c38d1 2191 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2192 vn_min_rate = DEF_MIN_RATE;
2193 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2194 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2195 }
2196
8a1c38d1
EG
2197 DP(NETIF_MSG_IFUP,
2198 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2199 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2200
2201 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2202 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2203
2204 /* global vn counter - maximal Mbps for this vn */
2205 m_rs_vn.vn_counter.rate = vn_max_rate;
2206
2207 /* quota - number of bytes transmitted in this period */
2208 m_rs_vn.vn_counter.quota =
2209 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2210
8a1c38d1 2211 if (bp->vn_weight_sum) {
34f80b04
EG
2212 /* credit for each period of the fairness algorithm:
2213 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2214 vn_weight_sum should not be larger than 10000, thus
2215 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2216 than zero */
34f80b04 2217 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2218 max((u32)(vn_min_rate * (T_FAIR_COEF /
2219 (8 * bp->vn_weight_sum))),
2220 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2221 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2222 m_fair_vn.vn_credit_delta);
2223 }
2224
34f80b04
EG
2225 /* Store it to internal memory */
2226 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2227 REG_WR(bp, BAR_XSTRORM_INTMEM +
2228 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2229 ((u32 *)(&m_rs_vn))[i]);
2230
2231 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2232 REG_WR(bp, BAR_XSTRORM_INTMEM +
2233 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2234 ((u32 *)(&m_fair_vn))[i]);
2235}
2236
8a1c38d1 2237
c18487ee
YR
2238/* This function is called upon link interrupt */
2239static void bnx2x_link_attn(struct bnx2x *bp)
2240{
bb2a0f7a
YG
2241 /* Make sure that we are synced with the current statistics */
2242 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2243
c18487ee 2244 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2245
bb2a0f7a
YG
2246 if (bp->link_vars.link_up) {
2247
1c06328c
EG
2248 /* dropless flow control */
2249 if (CHIP_IS_E1H(bp)) {
2250 int port = BP_PORT(bp);
2251 u32 pause_enabled = 0;
2252
2253 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2254 pause_enabled = 1;
2255
2256 REG_WR(bp, BAR_USTRORM_INTMEM +
2257 USTORM_PAUSE_ENABLED_OFFSET(port),
2258 pause_enabled);
2259 }
2260
bb2a0f7a
YG
2261 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2262 struct host_port_stats *pstats;
2263
2264 pstats = bnx2x_sp(bp, port_stats);
2265 /* reset old bmac stats */
2266 memset(&(pstats->mac_stx[0]), 0,
2267 sizeof(struct mac_stx));
2268 }
2269 if ((bp->state == BNX2X_STATE_OPEN) ||
2270 (bp->state == BNX2X_STATE_DISABLED))
2271 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2272 }
2273
c18487ee
YR
2274 /* indicate link status */
2275 bnx2x_link_report(bp);
34f80b04
EG
2276
2277 if (IS_E1HMF(bp)) {
8a1c38d1 2278 int port = BP_PORT(bp);
34f80b04 2279 int func;
8a1c38d1 2280 int vn;
34f80b04
EG
2281
2282 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2283 if (vn == BP_E1HVN(bp))
2284 continue;
2285
8a1c38d1 2286 func = ((vn << 1) | port);
34f80b04
EG
2287
2288 /* Set the attention towards other drivers
2289 on the same port */
2290 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2291 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2292 }
34f80b04 2293
8a1c38d1
EG
2294 if (bp->link_vars.link_up) {
2295 int i;
2296
2297 /* Init rate shaping and fairness contexts */
2298 bnx2x_init_port_minmax(bp);
34f80b04 2299
34f80b04 2300 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2301 bnx2x_init_vn_minmax(bp, 2*vn + port);
2302
2303 /* Store it to internal memory */
2304 for (i = 0;
2305 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2306 REG_WR(bp, BAR_XSTRORM_INTMEM +
2307 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2308 ((u32 *)(&bp->cmng))[i]);
2309 }
34f80b04 2310 }
c18487ee 2311}
a2fbb9ea 2312
c18487ee
YR
2313static void bnx2x__link_status_update(struct bnx2x *bp)
2314{
2315 if (bp->state != BNX2X_STATE_OPEN)
2316 return;
a2fbb9ea 2317
c18487ee 2318 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2319
bb2a0f7a
YG
2320 if (bp->link_vars.link_up)
2321 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2322 else
2323 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2324
c18487ee
YR
2325 /* indicate link status */
2326 bnx2x_link_report(bp);
a2fbb9ea 2327}
a2fbb9ea 2328
34f80b04
EG
2329static void bnx2x_pmf_update(struct bnx2x *bp)
2330{
2331 int port = BP_PORT(bp);
2332 u32 val;
2333
2334 bp->port.pmf = 1;
2335 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2336
2337 /* enable nig attention */
2338 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2339 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2340 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2341
2342 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2343}
2344
c18487ee 2345/* end of Link */
a2fbb9ea
ET
2346
2347/* slow path */
2348
2349/*
2350 * General service functions
2351 */
2352
2353/* the slow path queue is odd since completions arrive on the fastpath ring */
2354static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2355 u32 data_hi, u32 data_lo, int common)
2356{
34f80b04 2357 int func = BP_FUNC(bp);
a2fbb9ea 2358
34f80b04
EG
2359 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2360 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2361 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2362 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2363 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2364
2365#ifdef BNX2X_STOP_ON_ERROR
2366 if (unlikely(bp->panic))
2367 return -EIO;
2368#endif
2369
34f80b04 2370 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2371
2372 if (!bp->spq_left) {
2373 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2374 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2375 bnx2x_panic();
2376 return -EBUSY;
2377 }
f1410647 2378
a2fbb9ea
ET
2379 /* CID needs port number to be encoded int it */
2380 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2381 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2382 HW_CID(bp, cid)));
2383 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2384 if (common)
2385 bp->spq_prod_bd->hdr.type |=
2386 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2387
2388 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2389 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2390
2391 bp->spq_left--;
2392
2393 if (bp->spq_prod_bd == bp->spq_last_bd) {
2394 bp->spq_prod_bd = bp->spq;
2395 bp->spq_prod_idx = 0;
2396 DP(NETIF_MSG_TIMER, "end of spq\n");
2397
2398 } else {
2399 bp->spq_prod_bd++;
2400 bp->spq_prod_idx++;
2401 }
2402
34f80b04 2403 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2404 bp->spq_prod_idx);
2405
34f80b04 2406 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2407 return 0;
2408}
2409
2410/* acquire split MCP access lock register */
4a37fb66 2411static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2412{
a2fbb9ea 2413 u32 i, j, val;
34f80b04 2414 int rc = 0;
a2fbb9ea
ET
2415
2416 might_sleep();
2417 i = 100;
2418 for (j = 0; j < i*10; j++) {
2419 val = (1UL << 31);
2420 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2421 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2422 if (val & (1L << 31))
2423 break;
2424
2425 msleep(5);
2426 }
a2fbb9ea 2427 if (!(val & (1L << 31))) {
19680c48 2428 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2429 rc = -EBUSY;
2430 }
2431
2432 return rc;
2433}
2434
4a37fb66
YG
2435/* release split MCP access lock register */
2436static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2437{
2438 u32 val = 0;
2439
2440 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2441}
2442
2443static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2444{
2445 struct host_def_status_block *def_sb = bp->def_status_blk;
2446 u16 rc = 0;
2447
2448 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2449 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2450 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2451 rc |= 1;
2452 }
2453 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2454 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2455 rc |= 2;
2456 }
2457 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2458 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2459 rc |= 4;
2460 }
2461 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2462 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2463 rc |= 8;
2464 }
2465 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2466 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2467 rc |= 16;
2468 }
2469 return rc;
2470}
2471
2472/*
2473 * slow path service functions
2474 */
2475
2476static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2477{
34f80b04 2478 int port = BP_PORT(bp);
5c862848
EG
2479 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2480 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2481 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2482 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2483 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2484 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2485 u32 aeu_mask;
87942b46 2486 u32 nig_mask = 0;
a2fbb9ea 2487
a2fbb9ea
ET
2488 if (bp->attn_state & asserted)
2489 BNX2X_ERR("IGU ERROR\n");
2490
3fcaf2e5
EG
2491 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2492 aeu_mask = REG_RD(bp, aeu_addr);
2493
a2fbb9ea 2494 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2495 aeu_mask, asserted);
2496 aeu_mask &= ~(asserted & 0xff);
2497 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2498
3fcaf2e5
EG
2499 REG_WR(bp, aeu_addr, aeu_mask);
2500 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2501
3fcaf2e5 2502 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2503 bp->attn_state |= asserted;
3fcaf2e5 2504 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2505
2506 if (asserted & ATTN_HARD_WIRED_MASK) {
2507 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2508
a5e9a7cf
EG
2509 bnx2x_acquire_phy_lock(bp);
2510
877e9aa4 2511 /* save nig interrupt mask */
87942b46 2512 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2513 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2514
c18487ee 2515 bnx2x_link_attn(bp);
a2fbb9ea
ET
2516
2517 /* handle unicore attn? */
2518 }
2519 if (asserted & ATTN_SW_TIMER_4_FUNC)
2520 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2521
2522 if (asserted & GPIO_2_FUNC)
2523 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2524
2525 if (asserted & GPIO_3_FUNC)
2526 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2527
2528 if (asserted & GPIO_4_FUNC)
2529 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2530
2531 if (port == 0) {
2532 if (asserted & ATTN_GENERAL_ATTN_1) {
2533 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2534 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2535 }
2536 if (asserted & ATTN_GENERAL_ATTN_2) {
2537 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2538 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2539 }
2540 if (asserted & ATTN_GENERAL_ATTN_3) {
2541 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2542 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2543 }
2544 } else {
2545 if (asserted & ATTN_GENERAL_ATTN_4) {
2546 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2547 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2548 }
2549 if (asserted & ATTN_GENERAL_ATTN_5) {
2550 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2551 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2552 }
2553 if (asserted & ATTN_GENERAL_ATTN_6) {
2554 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2555 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2556 }
2557 }
2558
2559 } /* if hardwired */
2560
5c862848
EG
2561 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2562 asserted, hc_addr);
2563 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2564
2565 /* now set back the mask */
a5e9a7cf 2566 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2567 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2568 bnx2x_release_phy_lock(bp);
2569 }
a2fbb9ea
ET
2570}
2571
877e9aa4 2572static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2573{
34f80b04 2574 int port = BP_PORT(bp);
877e9aa4
ET
2575 int reg_offset;
2576 u32 val;
2577
34f80b04
EG
2578 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2579 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2580
34f80b04 2581 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2582
2583 val = REG_RD(bp, reg_offset);
2584 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2585 REG_WR(bp, reg_offset, val);
2586
2587 BNX2X_ERR("SPIO5 hw attention\n");
2588
35b19ba5
EG
2589 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2590 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2591 /* Fan failure attention */
2592
17de50b7 2593 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2594 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2595 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2596 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2597 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2598 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2599 /* mark the failure */
c18487ee 2600 bp->link_params.ext_phy_config &=
877e9aa4 2601 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2602 bp->link_params.ext_phy_config |=
877e9aa4
ET
2603 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2604 SHMEM_WR(bp,
2605 dev_info.port_hw_config[port].
2606 external_phy_config,
c18487ee 2607 bp->link_params.ext_phy_config);
877e9aa4
ET
2608 /* log the failure */
2609 printk(KERN_ERR PFX "Fan Failure on Network"
2610 " Controller %s has caused the driver to"
2611 " shutdown the card to prevent permanent"
2612 " damage. Please contact Dell Support for"
2613 " assistance\n", bp->dev->name);
2614 break;
2615
2616 default:
2617 break;
2618 }
2619 }
34f80b04 2620
589abe3a
EG
2621 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2622 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2623 bnx2x_acquire_phy_lock(bp);
2624 bnx2x_handle_module_detect_int(&bp->link_params);
2625 bnx2x_release_phy_lock(bp);
2626 }
2627
34f80b04
EG
2628 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2629
2630 val = REG_RD(bp, reg_offset);
2631 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2632 REG_WR(bp, reg_offset, val);
2633
2634 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2635 (attn & HW_INTERRUT_ASSERT_SET_0));
2636 bnx2x_panic();
2637 }
877e9aa4
ET
2638}
2639
2640static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2641{
2642 u32 val;
2643
2644 if (attn & BNX2X_DOORQ_ASSERT) {
2645
2646 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2647 BNX2X_ERR("DB hw attention 0x%x\n", val);
2648 /* DORQ discard attention */
2649 if (val & 0x2)
2650 BNX2X_ERR("FATAL error from DORQ\n");
2651 }
34f80b04
EG
2652
2653 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2654
2655 int port = BP_PORT(bp);
2656 int reg_offset;
2657
2658 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2659 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2660
2661 val = REG_RD(bp, reg_offset);
2662 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2663 REG_WR(bp, reg_offset, val);
2664
2665 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2666 (attn & HW_INTERRUT_ASSERT_SET_1));
2667 bnx2x_panic();
2668 }
877e9aa4
ET
2669}
2670
2671static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2672{
2673 u32 val;
2674
2675 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2676
2677 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2678 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2679 /* CFC error attention */
2680 if (val & 0x2)
2681 BNX2X_ERR("FATAL error from CFC\n");
2682 }
2683
2684 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2685
2686 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2687 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2688 /* RQ_USDMDP_FIFO_OVERFLOW */
2689 if (val & 0x18000)
2690 BNX2X_ERR("FATAL error from PXP\n");
2691 }
34f80b04
EG
2692
2693 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2694
2695 int port = BP_PORT(bp);
2696 int reg_offset;
2697
2698 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2699 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2700
2701 val = REG_RD(bp, reg_offset);
2702 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2703 REG_WR(bp, reg_offset, val);
2704
2705 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2706 (attn & HW_INTERRUT_ASSERT_SET_2));
2707 bnx2x_panic();
2708 }
877e9aa4
ET
2709}
2710
2711static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2712{
34f80b04
EG
2713 u32 val;
2714
877e9aa4
ET
2715 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2716
34f80b04
EG
2717 if (attn & BNX2X_PMF_LINK_ASSERT) {
2718 int func = BP_FUNC(bp);
2719
2720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2721 bnx2x__link_status_update(bp);
2722 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2723 DRV_STATUS_PMF)
2724 bnx2x_pmf_update(bp);
2725
2726 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2727
2728 BNX2X_ERR("MC assert!\n");
2729 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2731 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2732 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2733 bnx2x_panic();
2734
2735 } else if (attn & BNX2X_MCP_ASSERT) {
2736
2737 BNX2X_ERR("MCP assert!\n");
2738 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2739 bnx2x_fw_dump(bp);
877e9aa4
ET
2740
2741 } else
2742 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2743 }
2744
2745 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2746 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2747 if (attn & BNX2X_GRC_TIMEOUT) {
2748 val = CHIP_IS_E1H(bp) ?
2749 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2750 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2751 }
2752 if (attn & BNX2X_GRC_RSV) {
2753 val = CHIP_IS_E1H(bp) ?
2754 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2755 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2756 }
877e9aa4 2757 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2758 }
2759}
2760
2761static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2762{
a2fbb9ea
ET
2763 struct attn_route attn;
2764 struct attn_route group_mask;
34f80b04 2765 int port = BP_PORT(bp);
877e9aa4 2766 int index;
a2fbb9ea
ET
2767 u32 reg_addr;
2768 u32 val;
3fcaf2e5 2769 u32 aeu_mask;
a2fbb9ea
ET
2770
2771 /* need to take HW lock because MCP or other port might also
2772 try to handle this event */
4a37fb66 2773 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2774
2775 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2776 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2777 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2778 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2779 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2780 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2781
2782 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2783 if (deasserted & (1 << index)) {
2784 group_mask = bp->attn_group[index];
2785
34f80b04
EG
2786 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2787 index, group_mask.sig[0], group_mask.sig[1],
2788 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2789
877e9aa4
ET
2790 bnx2x_attn_int_deasserted3(bp,
2791 attn.sig[3] & group_mask.sig[3]);
2792 bnx2x_attn_int_deasserted1(bp,
2793 attn.sig[1] & group_mask.sig[1]);
2794 bnx2x_attn_int_deasserted2(bp,
2795 attn.sig[2] & group_mask.sig[2]);
2796 bnx2x_attn_int_deasserted0(bp,
2797 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2798
a2fbb9ea
ET
2799 if ((attn.sig[0] & group_mask.sig[0] &
2800 HW_PRTY_ASSERT_SET_0) ||
2801 (attn.sig[1] & group_mask.sig[1] &
2802 HW_PRTY_ASSERT_SET_1) ||
2803 (attn.sig[2] & group_mask.sig[2] &
2804 HW_PRTY_ASSERT_SET_2))
6378c025 2805 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2806 }
2807 }
2808
4a37fb66 2809 bnx2x_release_alr(bp);
a2fbb9ea 2810
5c862848 2811 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2812
2813 val = ~deasserted;
3fcaf2e5
EG
2814 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2815 val, reg_addr);
5c862848 2816 REG_WR(bp, reg_addr, val);
a2fbb9ea 2817
a2fbb9ea 2818 if (~bp->attn_state & deasserted)
3fcaf2e5 2819 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2820
2821 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2822 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2823
3fcaf2e5
EG
2824 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2825 aeu_mask = REG_RD(bp, reg_addr);
2826
2827 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2828 aeu_mask, deasserted);
2829 aeu_mask |= (deasserted & 0xff);
2830 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2831
3fcaf2e5
EG
2832 REG_WR(bp, reg_addr, aeu_mask);
2833 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2834
2835 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2836 bp->attn_state &= ~deasserted;
2837 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2838}
2839
2840static void bnx2x_attn_int(struct bnx2x *bp)
2841{
2842 /* read local copy of bits */
68d59484
EG
2843 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2844 attn_bits);
2845 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2846 attn_bits_ack);
a2fbb9ea
ET
2847 u32 attn_state = bp->attn_state;
2848
2849 /* look for changed bits */
2850 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2851 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2852
2853 DP(NETIF_MSG_HW,
2854 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2855 attn_bits, attn_ack, asserted, deasserted);
2856
2857 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2858 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2859
2860 /* handle bits that were raised */
2861 if (asserted)
2862 bnx2x_attn_int_asserted(bp, asserted);
2863
2864 if (deasserted)
2865 bnx2x_attn_int_deasserted(bp, deasserted);
2866}
2867
2868static void bnx2x_sp_task(struct work_struct *work)
2869{
1cf167f2 2870 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2871 u16 status;
2872
34f80b04 2873
a2fbb9ea
ET
2874 /* Return here if interrupt is disabled */
2875 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2876 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2877 return;
2878 }
2879
2880 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2881/* if (status == 0) */
2882/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2883
3196a88a 2884 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2885
877e9aa4
ET
2886 /* HW attentions */
2887 if (status & 0x1)
a2fbb9ea 2888 bnx2x_attn_int(bp);
a2fbb9ea 2889
68d59484 2890 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2891 IGU_INT_NOP, 1);
2892 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2893 IGU_INT_NOP, 1);
2894 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2895 IGU_INT_NOP, 1);
2896 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2897 IGU_INT_NOP, 1);
2898 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2899 IGU_INT_ENABLE, 1);
877e9aa4 2900
a2fbb9ea
ET
2901}
2902
2903static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2904{
2905 struct net_device *dev = dev_instance;
2906 struct bnx2x *bp = netdev_priv(dev);
2907
2908 /* Return here if interrupt is disabled */
2909 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2910 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2911 return IRQ_HANDLED;
2912 }
2913
8d9c5f34 2914 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2915
2916#ifdef BNX2X_STOP_ON_ERROR
2917 if (unlikely(bp->panic))
2918 return IRQ_HANDLED;
2919#endif
2920
1cf167f2 2921 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2922
2923 return IRQ_HANDLED;
2924}
2925
2926/* end of slow path */
2927
2928/* Statistics */
2929
2930/****************************************************************************
2931* Macros
2932****************************************************************************/
2933
a2fbb9ea
ET
2934/* sum[hi:lo] += add[hi:lo] */
2935#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2936 do { \
2937 s_lo += a_lo; \
f5ba6772 2938 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2939 } while (0)
2940
2941/* difference = minuend - subtrahend */
2942#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2943 do { \
bb2a0f7a
YG
2944 if (m_lo < s_lo) { \
2945 /* underflow */ \
a2fbb9ea 2946 d_hi = m_hi - s_hi; \
bb2a0f7a 2947 if (d_hi > 0) { \
6378c025 2948 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2949 d_hi--; \
2950 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2951 } else { \
6378c025 2952 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2953 d_hi = 0; \
2954 d_lo = 0; \
2955 } \
bb2a0f7a
YG
2956 } else { \
2957 /* m_lo >= s_lo */ \
a2fbb9ea 2958 if (m_hi < s_hi) { \
bb2a0f7a
YG
2959 d_hi = 0; \
2960 d_lo = 0; \
2961 } else { \
6378c025 2962 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2963 d_hi = m_hi - s_hi; \
2964 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2965 } \
2966 } \
2967 } while (0)
2968
bb2a0f7a 2969#define UPDATE_STAT64(s, t) \
a2fbb9ea 2970 do { \
bb2a0f7a
YG
2971 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2972 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2973 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2974 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2975 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2976 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2977 } while (0)
2978
bb2a0f7a 2979#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2980 do { \
bb2a0f7a
YG
2981 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2982 diff.lo, new->s##_lo, old->s##_lo); \
2983 ADD_64(estats->t##_hi, diff.hi, \
2984 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2985 } while (0)
2986
2987/* sum[hi:lo] += add */
2988#define ADD_EXTEND_64(s_hi, s_lo, a) \
2989 do { \
2990 s_lo += a; \
2991 s_hi += (s_lo < a) ? 1 : 0; \
2992 } while (0)
2993
bb2a0f7a 2994#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2995 do { \
bb2a0f7a
YG
2996 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2997 pstats->mac_stx[1].s##_lo, \
2998 new->s); \
a2fbb9ea
ET
2999 } while (0)
3000
bb2a0f7a 3001#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
3002 do { \
3003 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3004 old_tclient->s = le32_to_cpu(tclient->s); \
de832a55
EG
3005 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3006 } while (0)
3007
3008#define UPDATE_EXTEND_USTAT(s, t) \
3009 do { \
3010 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3011 old_uclient->s = uclient->s; \
3012 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3013 } while (0)
3014
3015#define UPDATE_EXTEND_XSTAT(s, t) \
3016 do { \
3017 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3018 old_xclient->s = le32_to_cpu(xclient->s); \
de832a55
EG
3019 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3020 } while (0)
3021
3022/* minuend -= subtrahend */
3023#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3024 do { \
3025 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3026 } while (0)
3027
3028/* minuend[hi:lo] -= subtrahend */
3029#define SUB_EXTEND_64(m_hi, m_lo, s) \
3030 do { \
3031 SUB_64(m_hi, 0, m_lo, s); \
3032 } while (0)
3033
3034#define SUB_EXTEND_USTAT(s, t) \
3035 do { \
3036 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3037 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3038 } while (0)
3039
3040/*
3041 * General service functions
3042 */
3043
3044static inline long bnx2x_hilo(u32 *hiref)
3045{
3046 u32 lo = *(hiref + 1);
3047#if (BITS_PER_LONG == 64)
3048 u32 hi = *hiref;
3049
3050 return HILO_U64(hi, lo);
3051#else
3052 return lo;
3053#endif
3054}
3055
3056/*
3057 * Init service functions
3058 */
3059
bb2a0f7a
YG
3060static void bnx2x_storm_stats_post(struct bnx2x *bp)
3061{
3062 if (!bp->stats_pending) {
3063 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3064 int i, rc;
bb2a0f7a
YG
3065
3066 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3067 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3068 for_each_queue(bp, i)
3069 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3070
3071 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3072 ((u32 *)&ramrod_data)[1],
3073 ((u32 *)&ramrod_data)[0], 0);
3074 if (rc == 0) {
3075 /* stats ramrod has it's own slot on the spq */
3076 bp->spq_left++;
3077 bp->stats_pending = 1;
3078 }
3079 }
3080}
3081
3082static void bnx2x_stats_init(struct bnx2x *bp)
3083{
3084 int port = BP_PORT(bp);
de832a55 3085 int i;
bb2a0f7a 3086
de832a55 3087 bp->stats_pending = 0;
bb2a0f7a
YG
3088 bp->executer_idx = 0;
3089 bp->stats_counter = 0;
3090
3091 /* port stats */
3092 if (!BP_NOMCP(bp))
3093 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3094 else
3095 bp->port.port_stx = 0;
3096 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3097
3098 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3099 bp->port.old_nig_stats.brb_discard =
3100 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3101 bp->port.old_nig_stats.brb_truncate =
3102 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3103 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3104 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3105 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3106 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3107
3108 /* function stats */
de832a55
EG
3109 for_each_queue(bp, i) {
3110 struct bnx2x_fastpath *fp = &bp->fp[i];
3111
3112 memset(&fp->old_tclient, 0,
3113 sizeof(struct tstorm_per_client_stats));
3114 memset(&fp->old_uclient, 0,
3115 sizeof(struct ustorm_per_client_stats));
3116 memset(&fp->old_xclient, 0,
3117 sizeof(struct xstorm_per_client_stats));
3118 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3119 }
3120
bb2a0f7a 3121 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3122 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3123
3124 bp->stats_state = STATS_STATE_DISABLED;
3125 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3126 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3127}
3128
3129static void bnx2x_hw_stats_post(struct bnx2x *bp)
3130{
3131 struct dmae_command *dmae = &bp->stats_dmae;
3132 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3133
3134 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3135 if (CHIP_REV_IS_SLOW(bp))
3136 return;
bb2a0f7a
YG
3137
3138 /* loader */
3139 if (bp->executer_idx) {
3140 int loader_idx = PMF_DMAE_C(bp);
3141
3142 memset(dmae, 0, sizeof(struct dmae_command));
3143
3144 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3145 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3146 DMAE_CMD_DST_RESET |
3147#ifdef __BIG_ENDIAN
3148 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3149#else
3150 DMAE_CMD_ENDIANITY_DW_SWAP |
3151#endif
3152 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3153 DMAE_CMD_PORT_0) |
3154 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3155 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3156 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3157 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3158 sizeof(struct dmae_command) *
3159 (loader_idx + 1)) >> 2;
3160 dmae->dst_addr_hi = 0;
3161 dmae->len = sizeof(struct dmae_command) >> 2;
3162 if (CHIP_IS_E1(bp))
3163 dmae->len--;
3164 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3165 dmae->comp_addr_hi = 0;
3166 dmae->comp_val = 1;
3167
3168 *stats_comp = 0;
3169 bnx2x_post_dmae(bp, dmae, loader_idx);
3170
3171 } else if (bp->func_stx) {
3172 *stats_comp = 0;
3173 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3174 }
3175}
3176
3177static int bnx2x_stats_comp(struct bnx2x *bp)
3178{
3179 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180 int cnt = 10;
3181
3182 might_sleep();
3183 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3184 if (!cnt) {
3185 BNX2X_ERR("timeout waiting for stats finished\n");
3186 break;
3187 }
3188 cnt--;
12469401 3189 msleep(1);
bb2a0f7a
YG
3190 }
3191 return 1;
3192}
3193
3194/*
3195 * Statistics service functions
3196 */
3197
3198static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3199{
3200 struct dmae_command *dmae;
3201 u32 opcode;
3202 int loader_idx = PMF_DMAE_C(bp);
3203 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3204
3205 /* sanity */
3206 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3207 BNX2X_ERR("BUG!\n");
3208 return;
3209 }
3210
3211 bp->executer_idx = 0;
3212
3213 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3214 DMAE_CMD_C_ENABLE |
3215 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3216#ifdef __BIG_ENDIAN
3217 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3218#else
3219 DMAE_CMD_ENDIANITY_DW_SWAP |
3220#endif
3221 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3222 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3223
3224 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3225 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3226 dmae->src_addr_lo = bp->port.port_stx >> 2;
3227 dmae->src_addr_hi = 0;
3228 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3229 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3230 dmae->len = DMAE_LEN32_RD_MAX;
3231 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3232 dmae->comp_addr_hi = 0;
3233 dmae->comp_val = 1;
3234
3235 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3236 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3237 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3238 dmae->src_addr_hi = 0;
7a9b2557
VZ
3239 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3240 DMAE_LEN32_RD_MAX * 4);
3241 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3242 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3243 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3244 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3245 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3246 dmae->comp_val = DMAE_COMP_VAL;
3247
3248 *stats_comp = 0;
3249 bnx2x_hw_stats_post(bp);
3250 bnx2x_stats_comp(bp);
3251}
3252
3253static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3254{
3255 struct dmae_command *dmae;
34f80b04 3256 int port = BP_PORT(bp);
bb2a0f7a 3257 int vn = BP_E1HVN(bp);
a2fbb9ea 3258 u32 opcode;
bb2a0f7a 3259 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3260 u32 mac_addr;
bb2a0f7a
YG
3261 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3262
3263 /* sanity */
3264 if (!bp->link_vars.link_up || !bp->port.pmf) {
3265 BNX2X_ERR("BUG!\n");
3266 return;
3267 }
a2fbb9ea
ET
3268
3269 bp->executer_idx = 0;
bb2a0f7a
YG
3270
3271 /* MCP */
3272 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3273 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3274 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3275#ifdef __BIG_ENDIAN
bb2a0f7a 3276 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3277#else
bb2a0f7a 3278 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3279#endif
bb2a0f7a
YG
3280 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3281 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3282
bb2a0f7a 3283 if (bp->port.port_stx) {
a2fbb9ea
ET
3284
3285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3286 dmae->opcode = opcode;
bb2a0f7a
YG
3287 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3288 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3289 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3290 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3291 dmae->len = sizeof(struct host_port_stats) >> 2;
3292 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293 dmae->comp_addr_hi = 0;
3294 dmae->comp_val = 1;
a2fbb9ea
ET
3295 }
3296
bb2a0f7a
YG
3297 if (bp->func_stx) {
3298
3299 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3300 dmae->opcode = opcode;
3301 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3302 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3303 dmae->dst_addr_lo = bp->func_stx >> 2;
3304 dmae->dst_addr_hi = 0;
3305 dmae->len = sizeof(struct host_func_stats) >> 2;
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3308 dmae->comp_val = 1;
a2fbb9ea
ET
3309 }
3310
bb2a0f7a 3311 /* MAC */
a2fbb9ea
ET
3312 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3313 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3314 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3315#ifdef __BIG_ENDIAN
3316 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3317#else
3318 DMAE_CMD_ENDIANITY_DW_SWAP |
3319#endif
bb2a0f7a
YG
3320 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3321 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3322
c18487ee 3323 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3324
3325 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3326 NIG_REG_INGRESS_BMAC0_MEM);
3327
3328 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3329 BIGMAC_REGISTER_TX_STAT_GTBYT */
3330 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3331 dmae->opcode = opcode;
3332 dmae->src_addr_lo = (mac_addr +
3333 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3334 dmae->src_addr_hi = 0;
3335 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3336 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3337 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3338 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3339 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3340 dmae->comp_addr_hi = 0;
3341 dmae->comp_val = 1;
3342
3343 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3344 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3345 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3346 dmae->opcode = opcode;
3347 dmae->src_addr_lo = (mac_addr +
3348 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3349 dmae->src_addr_hi = 0;
3350 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3351 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3352 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3353 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3354 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3355 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3356 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3357 dmae->comp_addr_hi = 0;
3358 dmae->comp_val = 1;
3359
c18487ee 3360 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3361
3362 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3363
3364 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366 dmae->opcode = opcode;
3367 dmae->src_addr_lo = (mac_addr +
3368 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3369 dmae->src_addr_hi = 0;
3370 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3371 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3372 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3373 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3374 dmae->comp_addr_hi = 0;
3375 dmae->comp_val = 1;
3376
3377 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3378 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3379 dmae->opcode = opcode;
3380 dmae->src_addr_lo = (mac_addr +
3381 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3382 dmae->src_addr_hi = 0;
3383 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3384 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3385 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3386 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3387 dmae->len = 1;
3388 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3389 dmae->comp_addr_hi = 0;
3390 dmae->comp_val = 1;
3391
3392 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3393 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3394 dmae->opcode = opcode;
3395 dmae->src_addr_lo = (mac_addr +
3396 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3397 dmae->src_addr_hi = 0;
3398 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3399 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3400 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3401 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3402 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3404 dmae->comp_addr_hi = 0;
3405 dmae->comp_val = 1;
3406 }
3407
3408 /* NIG */
bb2a0f7a
YG
3409 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3410 dmae->opcode = opcode;
3411 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3412 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3413 dmae->src_addr_hi = 0;
3414 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3415 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3416 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3417 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418 dmae->comp_addr_hi = 0;
3419 dmae->comp_val = 1;
3420
3421 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3422 dmae->opcode = opcode;
3423 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3424 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3425 dmae->src_addr_hi = 0;
3426 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3427 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3428 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3429 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3430 dmae->len = (2*sizeof(u32)) >> 2;
3431 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3432 dmae->comp_addr_hi = 0;
3433 dmae->comp_val = 1;
3434
a2fbb9ea
ET
3435 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3436 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3437 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3438 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3439#ifdef __BIG_ENDIAN
3440 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3441#else
3442 DMAE_CMD_ENDIANITY_DW_SWAP |
3443#endif
bb2a0f7a
YG
3444 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3445 (vn << DMAE_CMD_E1HVN_SHIFT));
3446 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3447 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3448 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3449 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3450 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3451 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3452 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3453 dmae->len = (2*sizeof(u32)) >> 2;
3454 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3455 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3456 dmae->comp_val = DMAE_COMP_VAL;
3457
3458 *stats_comp = 0;
a2fbb9ea
ET
3459}
3460
bb2a0f7a 3461static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3462{
bb2a0f7a
YG
3463 struct dmae_command *dmae = &bp->stats_dmae;
3464 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3465
bb2a0f7a
YG
3466 /* sanity */
3467 if (!bp->func_stx) {
3468 BNX2X_ERR("BUG!\n");
3469 return;
3470 }
a2fbb9ea 3471
bb2a0f7a
YG
3472 bp->executer_idx = 0;
3473 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3474
bb2a0f7a
YG
3475 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3476 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3477 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3478#ifdef __BIG_ENDIAN
3479 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3480#else
3481 DMAE_CMD_ENDIANITY_DW_SWAP |
3482#endif
3483 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3484 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3485 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3486 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3487 dmae->dst_addr_lo = bp->func_stx >> 2;
3488 dmae->dst_addr_hi = 0;
3489 dmae->len = sizeof(struct host_func_stats) >> 2;
3490 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3491 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3492 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3493
bb2a0f7a
YG
3494 *stats_comp = 0;
3495}
a2fbb9ea 3496
bb2a0f7a
YG
3497static void bnx2x_stats_start(struct bnx2x *bp)
3498{
3499 if (bp->port.pmf)
3500 bnx2x_port_stats_init(bp);
3501
3502 else if (bp->func_stx)
3503 bnx2x_func_stats_init(bp);
3504
3505 bnx2x_hw_stats_post(bp);
3506 bnx2x_storm_stats_post(bp);
3507}
3508
3509static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3510{
3511 bnx2x_stats_comp(bp);
3512 bnx2x_stats_pmf_update(bp);
3513 bnx2x_stats_start(bp);
3514}
3515
3516static void bnx2x_stats_restart(struct bnx2x *bp)
3517{
3518 bnx2x_stats_comp(bp);
3519 bnx2x_stats_start(bp);
3520}
3521
3522static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3523{
3524 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3525 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3526 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3527 struct regpair diff;
3528
3529 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3530 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3531 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3532 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3533 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3534 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3535 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3536 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3537 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3538 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3539 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3540 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3541 UPDATE_STAT64(tx_stat_gt127,
3542 tx_stat_etherstatspkts65octetsto127octets);
3543 UPDATE_STAT64(tx_stat_gt255,
3544 tx_stat_etherstatspkts128octetsto255octets);
3545 UPDATE_STAT64(tx_stat_gt511,
3546 tx_stat_etherstatspkts256octetsto511octets);
3547 UPDATE_STAT64(tx_stat_gt1023,
3548 tx_stat_etherstatspkts512octetsto1023octets);
3549 UPDATE_STAT64(tx_stat_gt1518,
3550 tx_stat_etherstatspkts1024octetsto1522octets);
3551 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3552 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3553 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3554 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3555 UPDATE_STAT64(tx_stat_gterr,
3556 tx_stat_dot3statsinternalmactransmiterrors);
3557 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3558
3559 estats->pause_frames_received_hi =
3560 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3561 estats->pause_frames_received_lo =
3562 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3563
3564 estats->pause_frames_sent_hi =
3565 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3566 estats->pause_frames_sent_lo =
3567 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3568}
3569
3570static void bnx2x_emac_stats_update(struct bnx2x *bp)
3571{
3572 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3573 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3574 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3575
3576 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3577 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3578 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3579 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3580 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3581 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3582 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3583 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3584 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3585 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3586 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3587 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3588 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3589 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3590 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3591 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3592 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3593 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3594 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3595 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3596 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3597 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3598 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3599 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3600 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3601 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3602 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3603 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3604 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3605 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3606 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3607
3608 estats->pause_frames_received_hi =
3609 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3610 estats->pause_frames_received_lo =
3611 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3612 ADD_64(estats->pause_frames_received_hi,
3613 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3614 estats->pause_frames_received_lo,
3615 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3616
3617 estats->pause_frames_sent_hi =
3618 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3619 estats->pause_frames_sent_lo =
3620 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3621 ADD_64(estats->pause_frames_sent_hi,
3622 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3623 estats->pause_frames_sent_lo,
3624 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3625}
3626
3627static int bnx2x_hw_stats_update(struct bnx2x *bp)
3628{
3629 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3630 struct nig_stats *old = &(bp->port.old_nig_stats);
3631 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3632 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3633 struct regpair diff;
de832a55 3634 u32 nig_timer_max;
bb2a0f7a
YG
3635
3636 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3637 bnx2x_bmac_stats_update(bp);
3638
3639 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3640 bnx2x_emac_stats_update(bp);
3641
3642 else { /* unreached */
3643 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3644 return -1;
3645 }
a2fbb9ea 3646
bb2a0f7a
YG
3647 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3648 new->brb_discard - old->brb_discard);
66e855f3
YG
3649 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3650 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3651
bb2a0f7a
YG
3652 UPDATE_STAT64_NIG(egress_mac_pkt0,
3653 etherstatspkts1024octetsto1522octets);
3654 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3655
bb2a0f7a 3656 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3657
bb2a0f7a
YG
3658 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3659 sizeof(struct mac_stx));
3660 estats->brb_drop_hi = pstats->brb_drop_hi;
3661 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3662
bb2a0f7a 3663 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3664
de832a55
EG
3665 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3666 if (nig_timer_max != estats->nig_timer_max) {
3667 estats->nig_timer_max = nig_timer_max;
3668 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3669 }
3670
bb2a0f7a 3671 return 0;
a2fbb9ea
ET
3672}
3673
bb2a0f7a 3674static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3675{
3676 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3677 struct tstorm_per_port_stats *tport =
de832a55 3678 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3679 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3680 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3681 int i;
3682
3683 memset(&(fstats->total_bytes_received_hi), 0,
3684 sizeof(struct host_func_stats) - 2*sizeof(u32));
3685 estats->error_bytes_received_hi = 0;
3686 estats->error_bytes_received_lo = 0;
3687 estats->etherstatsoverrsizepkts_hi = 0;
3688 estats->etherstatsoverrsizepkts_lo = 0;
3689 estats->no_buff_discard_hi = 0;
3690 estats->no_buff_discard_lo = 0;
a2fbb9ea 3691
de832a55
EG
3692 for_each_queue(bp, i) {
3693 struct bnx2x_fastpath *fp = &bp->fp[i];
3694 int cl_id = fp->cl_id;
3695 struct tstorm_per_client_stats *tclient =
3696 &stats->tstorm_common.client_statistics[cl_id];
3697 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3698 struct ustorm_per_client_stats *uclient =
3699 &stats->ustorm_common.client_statistics[cl_id];
3700 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3701 struct xstorm_per_client_stats *xclient =
3702 &stats->xstorm_common.client_statistics[cl_id];
3703 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3704 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3705 u32 diff;
3706
3707 /* are storm stats valid? */
3708 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3709 bp->stats_counter) {
de832a55
EG
3710 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3711 " xstorm counter (%d) != stats_counter (%d)\n",
3712 i, xclient->stats_counter, bp->stats_counter);
3713 return -1;
3714 }
3715 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3716 bp->stats_counter) {
de832a55
EG
3717 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3718 " tstorm counter (%d) != stats_counter (%d)\n",
3719 i, tclient->stats_counter, bp->stats_counter);
3720 return -2;
3721 }
3722 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3723 bp->stats_counter) {
3724 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3725 " ustorm counter (%d) != stats_counter (%d)\n",
3726 i, uclient->stats_counter, bp->stats_counter);
3727 return -4;
3728 }
a2fbb9ea 3729
de832a55
EG
3730 qstats->total_bytes_received_hi =
3731 qstats->valid_bytes_received_hi =
a2fbb9ea 3732 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3733 qstats->total_bytes_received_lo =
3734 qstats->valid_bytes_received_lo =
a2fbb9ea 3735 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3736
de832a55 3737 qstats->error_bytes_received_hi =
bb2a0f7a 3738 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3739 qstats->error_bytes_received_lo =
bb2a0f7a 3740 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3741
de832a55
EG
3742 ADD_64(qstats->total_bytes_received_hi,
3743 qstats->error_bytes_received_hi,
3744 qstats->total_bytes_received_lo,
3745 qstats->error_bytes_received_lo);
3746
3747 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3748 total_unicast_packets_received);
3749 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3750 total_multicast_packets_received);
3751 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3752 total_broadcast_packets_received);
3753 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3754 etherstatsoverrsizepkts);
3755 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3756
3757 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3758 total_unicast_packets_received);
3759 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3760 total_multicast_packets_received);
3761 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3762 total_broadcast_packets_received);
3763 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3764 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3765 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3766
3767 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3768 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3769 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3770 le32_to_cpu(xclient->total_sent_bytes.lo);
3771
de832a55
EG
3772 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3773 total_unicast_packets_transmitted);
3774 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3775 total_multicast_packets_transmitted);
3776 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3777 total_broadcast_packets_transmitted);
3778
3779 old_tclient->checksum_discard = tclient->checksum_discard;
3780 old_tclient->ttl0_discard = tclient->ttl0_discard;
3781
3782 ADD_64(fstats->total_bytes_received_hi,
3783 qstats->total_bytes_received_hi,
3784 fstats->total_bytes_received_lo,
3785 qstats->total_bytes_received_lo);
3786 ADD_64(fstats->total_bytes_transmitted_hi,
3787 qstats->total_bytes_transmitted_hi,
3788 fstats->total_bytes_transmitted_lo,
3789 qstats->total_bytes_transmitted_lo);
3790 ADD_64(fstats->total_unicast_packets_received_hi,
3791 qstats->total_unicast_packets_received_hi,
3792 fstats->total_unicast_packets_received_lo,
3793 qstats->total_unicast_packets_received_lo);
3794 ADD_64(fstats->total_multicast_packets_received_hi,
3795 qstats->total_multicast_packets_received_hi,
3796 fstats->total_multicast_packets_received_lo,
3797 qstats->total_multicast_packets_received_lo);
3798 ADD_64(fstats->total_broadcast_packets_received_hi,
3799 qstats->total_broadcast_packets_received_hi,
3800 fstats->total_broadcast_packets_received_lo,
3801 qstats->total_broadcast_packets_received_lo);
3802 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3803 qstats->total_unicast_packets_transmitted_hi,
3804 fstats->total_unicast_packets_transmitted_lo,
3805 qstats->total_unicast_packets_transmitted_lo);
3806 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3807 qstats->total_multicast_packets_transmitted_hi,
3808 fstats->total_multicast_packets_transmitted_lo,
3809 qstats->total_multicast_packets_transmitted_lo);
3810 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3811 qstats->total_broadcast_packets_transmitted_hi,
3812 fstats->total_broadcast_packets_transmitted_lo,
3813 qstats->total_broadcast_packets_transmitted_lo);
3814 ADD_64(fstats->valid_bytes_received_hi,
3815 qstats->valid_bytes_received_hi,
3816 fstats->valid_bytes_received_lo,
3817 qstats->valid_bytes_received_lo);
3818
3819 ADD_64(estats->error_bytes_received_hi,
3820 qstats->error_bytes_received_hi,
3821 estats->error_bytes_received_lo,
3822 qstats->error_bytes_received_lo);
3823 ADD_64(estats->etherstatsoverrsizepkts_hi,
3824 qstats->etherstatsoverrsizepkts_hi,
3825 estats->etherstatsoverrsizepkts_lo,
3826 qstats->etherstatsoverrsizepkts_lo);
3827 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3828 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3829 }
3830
3831 ADD_64(fstats->total_bytes_received_hi,
3832 estats->rx_stat_ifhcinbadoctets_hi,
3833 fstats->total_bytes_received_lo,
3834 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3835
3836 memcpy(estats, &(fstats->total_bytes_received_hi),
3837 sizeof(struct host_func_stats) - 2*sizeof(u32));
3838
de832a55
EG
3839 ADD_64(estats->etherstatsoverrsizepkts_hi,
3840 estats->rx_stat_dot3statsframestoolong_hi,
3841 estats->etherstatsoverrsizepkts_lo,
3842 estats->rx_stat_dot3statsframestoolong_lo);
3843 ADD_64(estats->error_bytes_received_hi,
3844 estats->rx_stat_ifhcinbadoctets_hi,
3845 estats->error_bytes_received_lo,
3846 estats->rx_stat_ifhcinbadoctets_lo);
3847
3848 if (bp->port.pmf) {
3849 estats->mac_filter_discard =
3850 le32_to_cpu(tport->mac_filter_discard);
3851 estats->xxoverflow_discard =
3852 le32_to_cpu(tport->xxoverflow_discard);
3853 estats->brb_truncate_discard =
bb2a0f7a 3854 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3855 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3856 }
bb2a0f7a
YG
3857
3858 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3859
de832a55
EG
3860 bp->stats_pending = 0;
3861
a2fbb9ea
ET
3862 return 0;
3863}
3864
bb2a0f7a 3865static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3866{
bb2a0f7a 3867 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3868 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3869 int i;
a2fbb9ea
ET
3870
3871 nstats->rx_packets =
3872 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3873 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3874 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3875
3876 nstats->tx_packets =
3877 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3878 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3879 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3880
de832a55 3881 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3882
0e39e645 3883 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3884
de832a55
EG
3885 nstats->rx_dropped = estats->mac_discard;
3886 for_each_queue(bp, i)
3887 nstats->rx_dropped +=
3888 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3889
a2fbb9ea
ET
3890 nstats->tx_dropped = 0;
3891
3892 nstats->multicast =
de832a55 3893 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3894
bb2a0f7a 3895 nstats->collisions =
de832a55 3896 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3897
3898 nstats->rx_length_errors =
de832a55
EG
3899 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3900 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3901 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3902 bnx2x_hilo(&estats->brb_truncate_hi);
3903 nstats->rx_crc_errors =
3904 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3905 nstats->rx_frame_errors =
3906 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3907 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3908 nstats->rx_missed_errors = estats->xxoverflow_discard;
3909
3910 nstats->rx_errors = nstats->rx_length_errors +
3911 nstats->rx_over_errors +
3912 nstats->rx_crc_errors +
3913 nstats->rx_frame_errors +
0e39e645
ET
3914 nstats->rx_fifo_errors +
3915 nstats->rx_missed_errors;
a2fbb9ea 3916
bb2a0f7a 3917 nstats->tx_aborted_errors =
de832a55
EG
3918 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3919 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3920 nstats->tx_carrier_errors =
3921 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3922 nstats->tx_fifo_errors = 0;
3923 nstats->tx_heartbeat_errors = 0;
3924 nstats->tx_window_errors = 0;
3925
3926 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3927 nstats->tx_carrier_errors +
3928 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3929}
3930
3931static void bnx2x_drv_stats_update(struct bnx2x *bp)
3932{
3933 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3934 int i;
3935
3936 estats->driver_xoff = 0;
3937 estats->rx_err_discard_pkt = 0;
3938 estats->rx_skb_alloc_failed = 0;
3939 estats->hw_csum_err = 0;
3940 for_each_queue(bp, i) {
3941 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3942
3943 estats->driver_xoff += qstats->driver_xoff;
3944 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3945 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3946 estats->hw_csum_err += qstats->hw_csum_err;
3947 }
a2fbb9ea
ET
3948}
3949
bb2a0f7a 3950static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3951{
bb2a0f7a 3952 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3953
bb2a0f7a
YG
3954 if (*stats_comp != DMAE_COMP_VAL)
3955 return;
3956
3957 if (bp->port.pmf)
de832a55 3958 bnx2x_hw_stats_update(bp);
a2fbb9ea 3959
de832a55
EG
3960 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3961 BNX2X_ERR("storm stats were not updated for 3 times\n");
3962 bnx2x_panic();
3963 return;
a2fbb9ea
ET
3964 }
3965
de832a55
EG
3966 bnx2x_net_stats_update(bp);
3967 bnx2x_drv_stats_update(bp);
3968
a2fbb9ea 3969 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3970 struct tstorm_per_client_stats *old_tclient =
3971 &bp->fp->old_tclient;
3972 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3973 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3974 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3975 int i;
a2fbb9ea
ET
3976
3977 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3978 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3979 " tx pkt (%lx)\n",
3980 bnx2x_tx_avail(bp->fp),
7a9b2557 3981 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3982 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3983 " rx pkt (%lx)\n",
7a9b2557
VZ
3984 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3985 bp->fp->rx_comp_cons),
3986 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3987 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3988 "brb truncate %u\n",
3989 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3990 qstats->driver_xoff,
3991 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 3992 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 3993 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
3994 "mac_discard %u mac_filter_discard %u "
3995 "xxovrflow_discard %u brb_truncate_discard %u "
3996 "ttl0_discard %u\n",
bb2a0f7a 3997 old_tclient->checksum_discard,
de832a55
EG
3998 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3999 bnx2x_hilo(&qstats->no_buff_discard_hi),
4000 estats->mac_discard, estats->mac_filter_discard,
4001 estats->xxoverflow_discard, estats->brb_truncate_discard,
bb2a0f7a 4002 old_tclient->ttl0_discard);
a2fbb9ea
ET
4003
4004 for_each_queue(bp, i) {
4005 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4006 bnx2x_fp(bp, i, tx_pkt),
4007 bnx2x_fp(bp, i, rx_pkt),
4008 bnx2x_fp(bp, i, rx_calls));
4009 }
4010 }
4011
bb2a0f7a
YG
4012 bnx2x_hw_stats_post(bp);
4013 bnx2x_storm_stats_post(bp);
4014}
a2fbb9ea 4015
bb2a0f7a
YG
4016static void bnx2x_port_stats_stop(struct bnx2x *bp)
4017{
4018 struct dmae_command *dmae;
4019 u32 opcode;
4020 int loader_idx = PMF_DMAE_C(bp);
4021 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4022
bb2a0f7a 4023 bp->executer_idx = 0;
a2fbb9ea 4024
bb2a0f7a
YG
4025 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4026 DMAE_CMD_C_ENABLE |
4027 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4028#ifdef __BIG_ENDIAN
bb2a0f7a 4029 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4030#else
bb2a0f7a 4031 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4032#endif
bb2a0f7a
YG
4033 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4034 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4035
4036 if (bp->port.port_stx) {
4037
4038 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4039 if (bp->func_stx)
4040 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4041 else
4042 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4043 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4044 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4045 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4046 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4047 dmae->len = sizeof(struct host_port_stats) >> 2;
4048 if (bp->func_stx) {
4049 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4050 dmae->comp_addr_hi = 0;
4051 dmae->comp_val = 1;
4052 } else {
4053 dmae->comp_addr_lo =
4054 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4055 dmae->comp_addr_hi =
4056 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4057 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4058
bb2a0f7a
YG
4059 *stats_comp = 0;
4060 }
a2fbb9ea
ET
4061 }
4062
bb2a0f7a
YG
4063 if (bp->func_stx) {
4064
4065 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4067 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4068 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4069 dmae->dst_addr_lo = bp->func_stx >> 2;
4070 dmae->dst_addr_hi = 0;
4071 dmae->len = sizeof(struct host_func_stats) >> 2;
4072 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4073 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4074 dmae->comp_val = DMAE_COMP_VAL;
4075
4076 *stats_comp = 0;
a2fbb9ea 4077 }
bb2a0f7a
YG
4078}
4079
4080static void bnx2x_stats_stop(struct bnx2x *bp)
4081{
4082 int update = 0;
4083
4084 bnx2x_stats_comp(bp);
4085
4086 if (bp->port.pmf)
4087 update = (bnx2x_hw_stats_update(bp) == 0);
4088
4089 update |= (bnx2x_storm_stats_update(bp) == 0);
4090
4091 if (update) {
4092 bnx2x_net_stats_update(bp);
a2fbb9ea 4093
bb2a0f7a
YG
4094 if (bp->port.pmf)
4095 bnx2x_port_stats_stop(bp);
4096
4097 bnx2x_hw_stats_post(bp);
4098 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4099 }
4100}
4101
bb2a0f7a
YG
4102static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4103{
4104}
4105
4106static const struct {
4107 void (*action)(struct bnx2x *bp);
4108 enum bnx2x_stats_state next_state;
4109} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4110/* state event */
4111{
4112/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4113/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4114/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4115/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4116},
4117{
4118/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4119/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4120/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4121/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4122}
4123};
4124
4125static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4126{
4127 enum bnx2x_stats_state state = bp->stats_state;
4128
4129 bnx2x_stats_stm[state][event].action(bp);
4130 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4131
4132 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4133 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4134 state, event, bp->stats_state);
4135}
4136
a2fbb9ea
ET
4137static void bnx2x_timer(unsigned long data)
4138{
4139 struct bnx2x *bp = (struct bnx2x *) data;
4140
4141 if (!netif_running(bp->dev))
4142 return;
4143
4144 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4145 goto timer_restart;
a2fbb9ea
ET
4146
4147 if (poll) {
4148 struct bnx2x_fastpath *fp = &bp->fp[0];
4149 int rc;
4150
4151 bnx2x_tx_int(fp, 1000);
4152 rc = bnx2x_rx_int(fp, 1000);
4153 }
4154
34f80b04
EG
4155 if (!BP_NOMCP(bp)) {
4156 int func = BP_FUNC(bp);
a2fbb9ea
ET
4157 u32 drv_pulse;
4158 u32 mcp_pulse;
4159
4160 ++bp->fw_drv_pulse_wr_seq;
4161 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4162 /* TBD - add SYSTEM_TIME */
4163 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4164 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4165
34f80b04 4166 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4167 MCP_PULSE_SEQ_MASK);
4168 /* The delta between driver pulse and mcp response
4169 * should be 1 (before mcp response) or 0 (after mcp response)
4170 */
4171 if ((drv_pulse != mcp_pulse) &&
4172 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4173 /* someone lost a heartbeat... */
4174 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4175 drv_pulse, mcp_pulse);
4176 }
4177 }
4178
bb2a0f7a
YG
4179 if ((bp->state == BNX2X_STATE_OPEN) ||
4180 (bp->state == BNX2X_STATE_DISABLED))
4181 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4182
f1410647 4183timer_restart:
a2fbb9ea
ET
4184 mod_timer(&bp->timer, jiffies + bp->current_interval);
4185}
4186
4187/* end of Statistics */
4188
4189/* nic init */
4190
4191/*
4192 * nic init service functions
4193 */
4194
34f80b04 4195static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4196{
34f80b04
EG
4197 int port = BP_PORT(bp);
4198
4199 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4200 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4201 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4202 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4203 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4204 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4205}
4206
5c862848
EG
4207static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4208 dma_addr_t mapping, int sb_id)
34f80b04
EG
4209{
4210 int port = BP_PORT(bp);
bb2a0f7a 4211 int func = BP_FUNC(bp);
a2fbb9ea 4212 int index;
34f80b04 4213 u64 section;
a2fbb9ea
ET
4214
4215 /* USTORM */
4216 section = ((u64)mapping) + offsetof(struct host_status_block,
4217 u_status_block);
34f80b04 4218 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4219
4220 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4221 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4222 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4223 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4224 U64_HI(section));
bb2a0f7a
YG
4225 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4226 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4227
4228 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4229 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4230 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4231
4232 /* CSTORM */
4233 section = ((u64)mapping) + offsetof(struct host_status_block,
4234 c_status_block);
34f80b04 4235 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4236
4237 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4238 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4239 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4240 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4241 U64_HI(section));
7a9b2557
VZ
4242 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4243 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4244
4245 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4246 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4247 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4248
4249 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4250}
4251
4252static void bnx2x_zero_def_sb(struct bnx2x *bp)
4253{
4254 int func = BP_FUNC(bp);
a2fbb9ea 4255
34f80b04
EG
4256 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4257 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4258 sizeof(struct ustorm_def_status_block)/4);
4259 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4260 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4261 sizeof(struct cstorm_def_status_block)/4);
4262 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4263 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4264 sizeof(struct xstorm_def_status_block)/4);
4265 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4266 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4267 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4268}
4269
4270static void bnx2x_init_def_sb(struct bnx2x *bp,
4271 struct host_def_status_block *def_sb,
34f80b04 4272 dma_addr_t mapping, int sb_id)
a2fbb9ea 4273{
34f80b04
EG
4274 int port = BP_PORT(bp);
4275 int func = BP_FUNC(bp);
a2fbb9ea
ET
4276 int index, val, reg_offset;
4277 u64 section;
4278
4279 /* ATTN */
4280 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4281 atten_status_block);
34f80b04 4282 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4283
49d66772
ET
4284 bp->attn_state = 0;
4285
a2fbb9ea
ET
4286 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4287 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4288
34f80b04 4289 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4290 bp->attn_group[index].sig[0] = REG_RD(bp,
4291 reg_offset + 0x10*index);
4292 bp->attn_group[index].sig[1] = REG_RD(bp,
4293 reg_offset + 0x4 + 0x10*index);
4294 bp->attn_group[index].sig[2] = REG_RD(bp,
4295 reg_offset + 0x8 + 0x10*index);
4296 bp->attn_group[index].sig[3] = REG_RD(bp,
4297 reg_offset + 0xc + 0x10*index);
4298 }
4299
a2fbb9ea
ET
4300 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4301 HC_REG_ATTN_MSG0_ADDR_L);
4302
4303 REG_WR(bp, reg_offset, U64_LO(section));
4304 REG_WR(bp, reg_offset + 4, U64_HI(section));
4305
4306 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4307
4308 val = REG_RD(bp, reg_offset);
34f80b04 4309 val |= sb_id;
a2fbb9ea
ET
4310 REG_WR(bp, reg_offset, val);
4311
4312 /* USTORM */
4313 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4314 u_def_status_block);
34f80b04 4315 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4316
4317 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4318 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4319 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4320 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4321 U64_HI(section));
5c862848 4322 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4323 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4324
4325 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4326 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4327 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4328
4329 /* CSTORM */
4330 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4331 c_def_status_block);
34f80b04 4332 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4333
4334 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4335 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4336 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4337 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4338 U64_HI(section));
5c862848 4339 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4340 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4341
4342 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4343 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4344 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4345
4346 /* TSTORM */
4347 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4348 t_def_status_block);
34f80b04 4349 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4350
4351 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4352 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4353 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4354 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4355 U64_HI(section));
5c862848 4356 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4357 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4358
4359 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4360 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4361 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4362
4363 /* XSTORM */
4364 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4365 x_def_status_block);
34f80b04 4366 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4367
4368 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4369 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4370 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4371 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4372 U64_HI(section));
5c862848 4373 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4374 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4375
4376 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4377 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4378 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4379
bb2a0f7a 4380 bp->stats_pending = 0;
66e855f3 4381 bp->set_mac_pending = 0;
bb2a0f7a 4382
34f80b04 4383 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4384}
4385
4386static void bnx2x_update_coalesce(struct bnx2x *bp)
4387{
34f80b04 4388 int port = BP_PORT(bp);
a2fbb9ea
ET
4389 int i;
4390
4391 for_each_queue(bp, i) {
34f80b04 4392 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4393
4394 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4395 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4396 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4397 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4398 bp->rx_ticks/12);
a2fbb9ea 4399 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4400 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4401 U_SB_ETH_RX_CQ_INDEX),
4402 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4403
4404 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4405 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4406 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4407 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4408 bp->tx_ticks/12);
a2fbb9ea 4409 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4410 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4411 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4412 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4413 }
4414}
4415
7a9b2557
VZ
4416static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4417 struct bnx2x_fastpath *fp, int last)
4418{
4419 int i;
4420
4421 for (i = 0; i < last; i++) {
4422 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4423 struct sk_buff *skb = rx_buf->skb;
4424
4425 if (skb == NULL) {
4426 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4427 continue;
4428 }
4429
4430 if (fp->tpa_state[i] == BNX2X_TPA_START)
4431 pci_unmap_single(bp->pdev,
4432 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4433 bp->rx_buf_size,
7a9b2557
VZ
4434 PCI_DMA_FROMDEVICE);
4435
4436 dev_kfree_skb(skb);
4437 rx_buf->skb = NULL;
4438 }
4439}
4440
a2fbb9ea
ET
4441static void bnx2x_init_rx_rings(struct bnx2x *bp)
4442{
7a9b2557 4443 int func = BP_FUNC(bp);
32626230
EG
4444 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4445 ETH_MAX_AGGREGATION_QUEUES_E1H;
4446 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4447 int i, j;
a2fbb9ea 4448
87942b46 4449 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4450 DP(NETIF_MSG_IFUP,
4451 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4452
7a9b2557 4453 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4454
555f6c78 4455 for_each_rx_queue(bp, j) {
32626230 4456 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4457
32626230 4458 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4459 fp->tpa_pool[i].skb =
4460 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4461 if (!fp->tpa_pool[i].skb) {
4462 BNX2X_ERR("Failed to allocate TPA "
4463 "skb pool for queue[%d] - "
4464 "disabling TPA on this "
4465 "queue!\n", j);
4466 bnx2x_free_tpa_pool(bp, fp, i);
4467 fp->disable_tpa = 1;
4468 break;
4469 }
4470 pci_unmap_addr_set((struct sw_rx_bd *)
4471 &bp->fp->tpa_pool[i],
4472 mapping, 0);
4473 fp->tpa_state[i] = BNX2X_TPA_STOP;
4474 }
4475 }
4476 }
4477
555f6c78 4478 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4479 struct bnx2x_fastpath *fp = &bp->fp[j];
4480
4481 fp->rx_bd_cons = 0;
4482 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4483 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4484
4485 /* "next page" elements initialization */
4486 /* SGE ring */
4487 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4488 struct eth_rx_sge *sge;
4489
4490 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4491 sge->addr_hi =
4492 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4493 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4494 sge->addr_lo =
4495 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4496 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4497 }
4498
4499 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4500
7a9b2557 4501 /* RX BD ring */
a2fbb9ea
ET
4502 for (i = 1; i <= NUM_RX_RINGS; i++) {
4503 struct eth_rx_bd *rx_bd;
4504
4505 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4506 rx_bd->addr_hi =
4507 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4508 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4509 rx_bd->addr_lo =
4510 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4511 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4512 }
4513
34f80b04 4514 /* CQ ring */
a2fbb9ea
ET
4515 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4516 struct eth_rx_cqe_next_page *nextpg;
4517
4518 nextpg = (struct eth_rx_cqe_next_page *)
4519 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4520 nextpg->addr_hi =
4521 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4522 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4523 nextpg->addr_lo =
4524 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4525 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4526 }
4527
7a9b2557
VZ
4528 /* Allocate SGEs and initialize the ring elements */
4529 for (i = 0, ring_prod = 0;
4530 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4531
7a9b2557
VZ
4532 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4533 BNX2X_ERR("was only able to allocate "
4534 "%d rx sges\n", i);
4535 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4536 /* Cleanup already allocated elements */
4537 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4538 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4539 fp->disable_tpa = 1;
4540 ring_prod = 0;
4541 break;
4542 }
4543 ring_prod = NEXT_SGE_IDX(ring_prod);
4544 }
4545 fp->rx_sge_prod = ring_prod;
4546
4547 /* Allocate BDs and initialize BD ring */
66e855f3 4548 fp->rx_comp_cons = 0;
7a9b2557 4549 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4550 for (i = 0; i < bp->rx_ring_size; i++) {
4551 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4552 BNX2X_ERR("was only able to allocate "
de832a55
EG
4553 "%d rx skbs on queue[%d]\n", i, j);
4554 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4555 break;
4556 }
4557 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4558 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4559 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4560 }
4561
7a9b2557
VZ
4562 fp->rx_bd_prod = ring_prod;
4563 /* must not have more available CQEs than BDs */
4564 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4565 cqe_ring_prod);
a2fbb9ea
ET
4566 fp->rx_pkt = fp->rx_calls = 0;
4567
7a9b2557
VZ
4568 /* Warning!
4569 * this will generate an interrupt (to the TSTORM)
4570 * must only be done after chip is initialized
4571 */
4572 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4573 fp->rx_sge_prod);
a2fbb9ea
ET
4574 if (j != 0)
4575 continue;
4576
4577 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4578 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4579 U64_LO(fp->rx_comp_mapping));
4580 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4581 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4582 U64_HI(fp->rx_comp_mapping));
4583 }
4584}
4585
4586static void bnx2x_init_tx_ring(struct bnx2x *bp)
4587{
4588 int i, j;
4589
555f6c78 4590 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4591 struct bnx2x_fastpath *fp = &bp->fp[j];
4592
4593 for (i = 1; i <= NUM_TX_RINGS; i++) {
4594 struct eth_tx_bd *tx_bd =
4595 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4596
4597 tx_bd->addr_hi =
4598 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4599 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4600 tx_bd->addr_lo =
4601 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4602 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4603 }
4604
4605 fp->tx_pkt_prod = 0;
4606 fp->tx_pkt_cons = 0;
4607 fp->tx_bd_prod = 0;
4608 fp->tx_bd_cons = 0;
4609 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4610 fp->tx_pkt = 0;
4611 }
4612}
4613
4614static void bnx2x_init_sp_ring(struct bnx2x *bp)
4615{
34f80b04 4616 int func = BP_FUNC(bp);
a2fbb9ea
ET
4617
4618 spin_lock_init(&bp->spq_lock);
4619
4620 bp->spq_left = MAX_SPQ_PENDING;
4621 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4622 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4623 bp->spq_prod_bd = bp->spq;
4624 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4625
34f80b04 4626 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4627 U64_LO(bp->spq_mapping));
34f80b04
EG
4628 REG_WR(bp,
4629 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4630 U64_HI(bp->spq_mapping));
4631
34f80b04 4632 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4633 bp->spq_prod_idx);
4634}
4635
4636static void bnx2x_init_context(struct bnx2x *bp)
4637{
4638 int i;
4639
4640 for_each_queue(bp, i) {
4641 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4642 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4643 u8 cl_id = fp->cl_id;
34f80b04 4644 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4645
34f80b04
EG
4646 context->ustorm_st_context.common.sb_index_numbers =
4647 BNX2X_RX_SB_INDEX_NUM;
4648 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4649 context->ustorm_st_context.common.status_block_id = sb_id;
4650 context->ustorm_st_context.common.flags =
de832a55
EG
4651 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4652 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4653 context->ustorm_st_context.common.statistics_counter_id =
4654 cl_id;
8d9c5f34 4655 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4656 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4657 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4658 bp->rx_buf_size;
34f80b04 4659 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4660 U64_HI(fp->rx_desc_mapping);
34f80b04 4661 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4662 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4663 if (!fp->disable_tpa) {
4664 context->ustorm_st_context.common.flags |=
4665 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4666 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4667 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4668 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4669 (u32)0xffff);
7a9b2557
VZ
4670 context->ustorm_st_context.common.sge_page_base_hi =
4671 U64_HI(fp->rx_sge_mapping);
4672 context->ustorm_st_context.common.sge_page_base_lo =
4673 U64_LO(fp->rx_sge_mapping);
4674 }
4675
8d9c5f34
EG
4676 context->ustorm_ag_context.cdu_usage =
4677 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4678 CDU_REGION_NUMBER_UCM_AG,
4679 ETH_CONNECTION_TYPE);
4680
4681 context->xstorm_st_context.tx_bd_page_base_hi =
4682 U64_HI(fp->tx_desc_mapping);
4683 context->xstorm_st_context.tx_bd_page_base_lo =
4684 U64_LO(fp->tx_desc_mapping);
4685 context->xstorm_st_context.db_data_addr_hi =
4686 U64_HI(fp->tx_prods_mapping);
4687 context->xstorm_st_context.db_data_addr_lo =
4688 U64_LO(fp->tx_prods_mapping);
4689 context->xstorm_st_context.statistics_data = (fp->cl_id |
4690 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4691 context->cstorm_st_context.sb_index_number =
5c862848 4692 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4693 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4694
4695 context->xstorm_ag_context.cdu_reserved =
4696 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4697 CDU_REGION_NUMBER_XCM_AG,
4698 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4699 }
4700}
4701
4702static void bnx2x_init_ind_table(struct bnx2x *bp)
4703{
26c8fa4d 4704 int func = BP_FUNC(bp);
a2fbb9ea
ET
4705 int i;
4706
555f6c78 4707 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4708 return;
4709
555f6c78
EG
4710 DP(NETIF_MSG_IFUP,
4711 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4712 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4713 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4714 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4715 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4716}
4717
49d66772
ET
4718static void bnx2x_set_client_config(struct bnx2x *bp)
4719{
49d66772 4720 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4721 int port = BP_PORT(bp);
4722 int i;
49d66772 4723
e7799c5f 4724 tstorm_client.mtu = bp->dev->mtu;
49d66772 4725 tstorm_client.config_flags =
de832a55
EG
4726 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4727 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4728#ifdef BCM_VLAN
0c6671b0 4729 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4730 tstorm_client.config_flags |=
8d9c5f34 4731 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4732 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4733 }
4734#endif
49d66772 4735
7a9b2557
VZ
4736 if (bp->flags & TPA_ENABLE_FLAG) {
4737 tstorm_client.max_sges_for_packet =
4f40f2cb 4738 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4739 tstorm_client.max_sges_for_packet =
4740 ((tstorm_client.max_sges_for_packet +
4741 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4742 PAGES_PER_SGE_SHIFT;
4743
4744 tstorm_client.config_flags |=
4745 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4746 }
4747
49d66772 4748 for_each_queue(bp, i) {
de832a55
EG
4749 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4750
49d66772 4751 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4752 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4753 ((u32 *)&tstorm_client)[0]);
4754 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4755 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4756 ((u32 *)&tstorm_client)[1]);
4757 }
4758
34f80b04
EG
4759 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4760 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4761}
4762
a2fbb9ea
ET
4763static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4764{
a2fbb9ea 4765 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4766 int mode = bp->rx_mode;
4767 int mask = (1 << BP_L_ID(bp));
4768 int func = BP_FUNC(bp);
a2fbb9ea
ET
4769 int i;
4770
3196a88a 4771 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4772
4773 switch (mode) {
4774 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4775 tstorm_mac_filter.ucast_drop_all = mask;
4776 tstorm_mac_filter.mcast_drop_all = mask;
4777 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4778 break;
4779 case BNX2X_RX_MODE_NORMAL:
34f80b04 4780 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4781 break;
4782 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4783 tstorm_mac_filter.mcast_accept_all = mask;
4784 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4785 break;
4786 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4787 tstorm_mac_filter.ucast_accept_all = mask;
4788 tstorm_mac_filter.mcast_accept_all = mask;
4789 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4790 break;
4791 default:
34f80b04
EG
4792 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4793 break;
a2fbb9ea
ET
4794 }
4795
4796 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4797 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4798 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4799 ((u32 *)&tstorm_mac_filter)[i]);
4800
34f80b04 4801/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4802 ((u32 *)&tstorm_mac_filter)[i]); */
4803 }
a2fbb9ea 4804
49d66772
ET
4805 if (mode != BNX2X_RX_MODE_NONE)
4806 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4807}
4808
471de716
EG
4809static void bnx2x_init_internal_common(struct bnx2x *bp)
4810{
4811 int i;
4812
3cdf1db7
YG
4813 if (bp->flags & TPA_ENABLE_FLAG) {
4814 struct tstorm_eth_tpa_exist tpa = {0};
4815
4816 tpa.tpa_exist = 1;
4817
4818 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4819 ((u32 *)&tpa)[0]);
4820 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4821 ((u32 *)&tpa)[1]);
4822 }
4823
471de716
EG
4824 /* Zero this manually as its initialization is
4825 currently missing in the initTool */
4826 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4827 REG_WR(bp, BAR_USTRORM_INTMEM +
4828 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4829}
4830
4831static void bnx2x_init_internal_port(struct bnx2x *bp)
4832{
4833 int port = BP_PORT(bp);
4834
4835 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4836 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4837 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4838 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4839}
4840
8a1c38d1
EG
4841/* Calculates the sum of vn_min_rates.
4842 It's needed for further normalizing of the min_rates.
4843 Returns:
4844 sum of vn_min_rates.
4845 or
4846 0 - if all the min_rates are 0.
4847 In the later case fainess algorithm should be deactivated.
4848 If not all min_rates are zero then those that are zeroes will be set to 1.
4849 */
4850static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4851{
4852 int all_zero = 1;
4853 int port = BP_PORT(bp);
4854 int vn;
4855
4856 bp->vn_weight_sum = 0;
4857 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4858 int func = 2*vn + port;
4859 u32 vn_cfg =
4860 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4861 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4862 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4863
4864 /* Skip hidden vns */
4865 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4866 continue;
4867
4868 /* If min rate is zero - set it to 1 */
4869 if (!vn_min_rate)
4870 vn_min_rate = DEF_MIN_RATE;
4871 else
4872 all_zero = 0;
4873
4874 bp->vn_weight_sum += vn_min_rate;
4875 }
4876
4877 /* ... only if all min rates are zeros - disable fairness */
4878 if (all_zero)
4879 bp->vn_weight_sum = 0;
4880}
4881
471de716 4882static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4883{
a2fbb9ea
ET
4884 struct tstorm_eth_function_common_config tstorm_config = {0};
4885 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4886 int port = BP_PORT(bp);
4887 int func = BP_FUNC(bp);
de832a55
EG
4888 int i, j;
4889 u32 offset;
471de716 4890 u16 max_agg_size;
a2fbb9ea
ET
4891
4892 if (is_multi(bp)) {
555f6c78 4893 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4894 tstorm_config.rss_result_mask = MULTI_MASK;
4895 }
8d9c5f34
EG
4896 if (IS_E1HMF(bp))
4897 tstorm_config.config_flags |=
4898 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4899
34f80b04
EG
4900 tstorm_config.leading_client_id = BP_L_ID(bp);
4901
a2fbb9ea 4902 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4903 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4904 (*(u32 *)&tstorm_config));
4905
c14423fe 4906 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4907 bnx2x_set_storm_rx_mode(bp);
4908
de832a55
EG
4909 for_each_queue(bp, i) {
4910 u8 cl_id = bp->fp[i].cl_id;
4911
4912 /* reset xstorm per client statistics */
4913 offset = BAR_XSTRORM_INTMEM +
4914 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4915 for (j = 0;
4916 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4917 REG_WR(bp, offset + j*4, 0);
4918
4919 /* reset tstorm per client statistics */
4920 offset = BAR_TSTRORM_INTMEM +
4921 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4922 for (j = 0;
4923 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4924 REG_WR(bp, offset + j*4, 0);
4925
4926 /* reset ustorm per client statistics */
4927 offset = BAR_USTRORM_INTMEM +
4928 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4929 for (j = 0;
4930 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4931 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4932 }
4933
4934 /* Init statistics related context */
34f80b04 4935 stats_flags.collect_eth = 1;
a2fbb9ea 4936
66e855f3 4937 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4938 ((u32 *)&stats_flags)[0]);
66e855f3 4939 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4940 ((u32 *)&stats_flags)[1]);
4941
66e855f3 4942 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4943 ((u32 *)&stats_flags)[0]);
66e855f3 4944 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4945 ((u32 *)&stats_flags)[1]);
4946
de832a55
EG
4947 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4948 ((u32 *)&stats_flags)[0]);
4949 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4950 ((u32 *)&stats_flags)[1]);
4951
66e855f3 4952 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4953 ((u32 *)&stats_flags)[0]);
66e855f3 4954 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4955 ((u32 *)&stats_flags)[1]);
4956
66e855f3
YG
4957 REG_WR(bp, BAR_XSTRORM_INTMEM +
4958 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4959 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4960 REG_WR(bp, BAR_XSTRORM_INTMEM +
4961 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4962 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4963
4964 REG_WR(bp, BAR_TSTRORM_INTMEM +
4965 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4966 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4967 REG_WR(bp, BAR_TSTRORM_INTMEM +
4968 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4969 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4970
de832a55
EG
4971 REG_WR(bp, BAR_USTRORM_INTMEM +
4972 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4973 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4974 REG_WR(bp, BAR_USTRORM_INTMEM +
4975 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4976 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4977
34f80b04
EG
4978 if (CHIP_IS_E1H(bp)) {
4979 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4980 IS_E1HMF(bp));
4981 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4982 IS_E1HMF(bp));
4983 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4984 IS_E1HMF(bp));
4985 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4986 IS_E1HMF(bp));
4987
7a9b2557
VZ
4988 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4989 bp->e1hov);
34f80b04
EG
4990 }
4991
4f40f2cb
EG
4992 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4993 max_agg_size =
4994 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4995 SGE_PAGE_SIZE * PAGES_PER_SGE),
4996 (u32)0xffff);
555f6c78 4997 for_each_rx_queue(bp, i) {
7a9b2557 4998 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4999
5000 REG_WR(bp, BAR_USTRORM_INTMEM +
5001 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
5002 U64_LO(fp->rx_comp_mapping));
5003 REG_WR(bp, BAR_USTRORM_INTMEM +
5004 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
5005 U64_HI(fp->rx_comp_mapping));
5006
7a9b2557
VZ
5007 REG_WR16(bp, BAR_USTRORM_INTMEM +
5008 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
5009 max_agg_size);
5010 }
8a1c38d1 5011
1c06328c
EG
5012 /* dropless flow control */
5013 if (CHIP_IS_E1H(bp)) {
5014 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5015
5016 rx_pause.bd_thr_low = 250;
5017 rx_pause.cqe_thr_low = 250;
5018 rx_pause.cos = 1;
5019 rx_pause.sge_thr_low = 0;
5020 rx_pause.bd_thr_high = 350;
5021 rx_pause.cqe_thr_high = 350;
5022 rx_pause.sge_thr_high = 0;
5023
5024 for_each_rx_queue(bp, i) {
5025 struct bnx2x_fastpath *fp = &bp->fp[i];
5026
5027 if (!fp->disable_tpa) {
5028 rx_pause.sge_thr_low = 150;
5029 rx_pause.sge_thr_high = 250;
5030 }
5031
5032
5033 offset = BAR_USTRORM_INTMEM +
5034 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5035 fp->cl_id);
5036 for (j = 0;
5037 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5038 j++)
5039 REG_WR(bp, offset + j*4,
5040 ((u32 *)&rx_pause)[j]);
5041 }
5042 }
5043
8a1c38d1
EG
5044 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5045
5046 /* Init rate shaping and fairness contexts */
5047 if (IS_E1HMF(bp)) {
5048 int vn;
5049
5050 /* During init there is no active link
5051 Until link is up, set link rate to 10Gbps */
5052 bp->link_vars.line_speed = SPEED_10000;
5053 bnx2x_init_port_minmax(bp);
5054
5055 bnx2x_calc_vn_weight_sum(bp);
5056
5057 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5058 bnx2x_init_vn_minmax(bp, 2*vn + port);
5059
5060 /* Enable rate shaping and fairness */
5061 bp->cmng.flags.cmng_enables =
5062 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5063 if (bp->vn_weight_sum)
5064 bp->cmng.flags.cmng_enables |=
5065 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5066 else
5067 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5068 " fairness will be disabled\n");
5069 } else {
5070 /* rate shaping and fairness are disabled */
5071 DP(NETIF_MSG_IFUP,
5072 "single function mode minmax will be disabled\n");
5073 }
5074
5075
5076 /* Store it to internal memory */
5077 if (bp->port.pmf)
5078 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5079 REG_WR(bp, BAR_XSTRORM_INTMEM +
5080 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5081 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5082}
5083
471de716
EG
5084static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5085{
5086 switch (load_code) {
5087 case FW_MSG_CODE_DRV_LOAD_COMMON:
5088 bnx2x_init_internal_common(bp);
5089 /* no break */
5090
5091 case FW_MSG_CODE_DRV_LOAD_PORT:
5092 bnx2x_init_internal_port(bp);
5093 /* no break */
5094
5095 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5096 bnx2x_init_internal_func(bp);
5097 break;
5098
5099 default:
5100 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5101 break;
5102 }
5103}
5104
5105static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5106{
5107 int i;
5108
5109 for_each_queue(bp, i) {
5110 struct bnx2x_fastpath *fp = &bp->fp[i];
5111
34f80b04 5112 fp->bp = bp;
a2fbb9ea 5113 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5114 fp->index = i;
34f80b04
EG
5115 fp->cl_id = BP_L_ID(bp) + i;
5116 fp->sb_id = fp->cl_id;
5117 DP(NETIF_MSG_IFUP,
5118 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5119 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
5120 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5121 FP_SB_ID(fp));
5122 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5123 }
5124
5c862848
EG
5125 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5126 DEF_SB_ID);
5127 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5128 bnx2x_update_coalesce(bp);
5129 bnx2x_init_rx_rings(bp);
5130 bnx2x_init_tx_ring(bp);
5131 bnx2x_init_sp_ring(bp);
5132 bnx2x_init_context(bp);
471de716 5133 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5134 bnx2x_init_ind_table(bp);
0ef00459
EG
5135 bnx2x_stats_init(bp);
5136
5137 /* At this point, we are ready for interrupts */
5138 atomic_set(&bp->intr_sem, 0);
5139
5140 /* flush all before enabling interrupts */
5141 mb();
5142 mmiowb();
5143
615f8fd9 5144 bnx2x_int_enable(bp);
a2fbb9ea
ET
5145}
5146
5147/* end of nic init */
5148
5149/*
5150 * gzip service functions
5151 */
5152
5153static int bnx2x_gunzip_init(struct bnx2x *bp)
5154{
5155 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5156 &bp->gunzip_mapping);
5157 if (bp->gunzip_buf == NULL)
5158 goto gunzip_nomem1;
5159
5160 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5161 if (bp->strm == NULL)
5162 goto gunzip_nomem2;
5163
5164 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5165 GFP_KERNEL);
5166 if (bp->strm->workspace == NULL)
5167 goto gunzip_nomem3;
5168
5169 return 0;
5170
5171gunzip_nomem3:
5172 kfree(bp->strm);
5173 bp->strm = NULL;
5174
5175gunzip_nomem2:
5176 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5177 bp->gunzip_mapping);
5178 bp->gunzip_buf = NULL;
5179
5180gunzip_nomem1:
5181 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5182 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5183 return -ENOMEM;
5184}
5185
5186static void bnx2x_gunzip_end(struct bnx2x *bp)
5187{
5188 kfree(bp->strm->workspace);
5189
5190 kfree(bp->strm);
5191 bp->strm = NULL;
5192
5193 if (bp->gunzip_buf) {
5194 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5195 bp->gunzip_mapping);
5196 bp->gunzip_buf = NULL;
5197 }
5198}
5199
5200static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5201{
5202 int n, rc;
5203
5204 /* check gzip header */
5205 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5206 return -EINVAL;
5207
5208 n = 10;
5209
34f80b04 5210#define FNAME 0x8
a2fbb9ea
ET
5211
5212 if (zbuf[3] & FNAME)
5213 while ((zbuf[n++] != 0) && (n < len));
5214
5215 bp->strm->next_in = zbuf + n;
5216 bp->strm->avail_in = len - n;
5217 bp->strm->next_out = bp->gunzip_buf;
5218 bp->strm->avail_out = FW_BUF_SIZE;
5219
5220 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5221 if (rc != Z_OK)
5222 return rc;
5223
5224 rc = zlib_inflate(bp->strm, Z_FINISH);
5225 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5226 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5227 bp->dev->name, bp->strm->msg);
5228
5229 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5230 if (bp->gunzip_outlen & 0x3)
5231 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5232 " gunzip_outlen (%d) not aligned\n",
5233 bp->dev->name, bp->gunzip_outlen);
5234 bp->gunzip_outlen >>= 2;
5235
5236 zlib_inflateEnd(bp->strm);
5237
5238 if (rc == Z_STREAM_END)
5239 return 0;
5240
5241 return rc;
5242}
5243
5244/* nic load/unload */
5245
5246/*
34f80b04 5247 * General service functions
a2fbb9ea
ET
5248 */
5249
5250/* send a NIG loopback debug packet */
5251static void bnx2x_lb_pckt(struct bnx2x *bp)
5252{
a2fbb9ea 5253 u32 wb_write[3];
a2fbb9ea
ET
5254
5255 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5256 wb_write[0] = 0x55555555;
5257 wb_write[1] = 0x55555555;
34f80b04 5258 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5259 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5260
5261 /* NON-IP protocol */
a2fbb9ea
ET
5262 wb_write[0] = 0x09000000;
5263 wb_write[1] = 0x55555555;
34f80b04 5264 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5265 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5266}
5267
5268/* some of the internal memories
5269 * are not directly readable from the driver
5270 * to test them we send debug packets
5271 */
5272static int bnx2x_int_mem_test(struct bnx2x *bp)
5273{
5274 int factor;
5275 int count, i;
5276 u32 val = 0;
5277
ad8d3948 5278 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5279 factor = 120;
ad8d3948
EG
5280 else if (CHIP_REV_IS_EMUL(bp))
5281 factor = 200;
5282 else
a2fbb9ea 5283 factor = 1;
a2fbb9ea
ET
5284
5285 DP(NETIF_MSG_HW, "start part1\n");
5286
5287 /* Disable inputs of parser neighbor blocks */
5288 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5289 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5290 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5291 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5292
5293 /* Write 0 to parser credits for CFC search request */
5294 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5295
5296 /* send Ethernet packet */
5297 bnx2x_lb_pckt(bp);
5298
5299 /* TODO do i reset NIG statistic? */
5300 /* Wait until NIG register shows 1 packet of size 0x10 */
5301 count = 1000 * factor;
5302 while (count) {
34f80b04 5303
a2fbb9ea
ET
5304 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5305 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5306 if (val == 0x10)
5307 break;
5308
5309 msleep(10);
5310 count--;
5311 }
5312 if (val != 0x10) {
5313 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5314 return -1;
5315 }
5316
5317 /* Wait until PRS register shows 1 packet */
5318 count = 1000 * factor;
5319 while (count) {
5320 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5321 if (val == 1)
5322 break;
5323
5324 msleep(10);
5325 count--;
5326 }
5327 if (val != 0x1) {
5328 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5329 return -2;
5330 }
5331
5332 /* Reset and init BRB, PRS */
34f80b04 5333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5334 msleep(50);
34f80b04 5335 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5336 msleep(50);
5337 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5338 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5339
5340 DP(NETIF_MSG_HW, "part2\n");
5341
5342 /* Disable inputs of parser neighbor blocks */
5343 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5344 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5345 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5346 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5347
5348 /* Write 0 to parser credits for CFC search request */
5349 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5350
5351 /* send 10 Ethernet packets */
5352 for (i = 0; i < 10; i++)
5353 bnx2x_lb_pckt(bp);
5354
5355 /* Wait until NIG register shows 10 + 1
5356 packets of size 11*0x10 = 0xb0 */
5357 count = 1000 * factor;
5358 while (count) {
34f80b04 5359
a2fbb9ea
ET
5360 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5361 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5362 if (val == 0xb0)
5363 break;
5364
5365 msleep(10);
5366 count--;
5367 }
5368 if (val != 0xb0) {
5369 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5370 return -3;
5371 }
5372
5373 /* Wait until PRS register shows 2 packets */
5374 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5375 if (val != 2)
5376 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5377
5378 /* Write 1 to parser credits for CFC search request */
5379 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5380
5381 /* Wait until PRS register shows 3 packets */
5382 msleep(10 * factor);
5383 /* Wait until NIG register shows 1 packet of size 0x10 */
5384 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5385 if (val != 3)
5386 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5387
5388 /* clear NIG EOP FIFO */
5389 for (i = 0; i < 11; i++)
5390 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5391 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5392 if (val != 1) {
5393 BNX2X_ERR("clear of NIG failed\n");
5394 return -4;
5395 }
5396
5397 /* Reset and init BRB, PRS, NIG */
5398 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5399 msleep(50);
5400 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5401 msleep(50);
5402 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5403 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5404#ifndef BCM_ISCSI
5405 /* set NIC mode */
5406 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5407#endif
5408
5409 /* Enable inputs of parser neighbor blocks */
5410 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5411 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5412 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5413 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5414
5415 DP(NETIF_MSG_HW, "done\n");
5416
5417 return 0; /* OK */
5418}
5419
5420static void enable_blocks_attention(struct bnx2x *bp)
5421{
5422 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5423 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5424 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5425 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5426 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5427 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5428 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5429 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5430 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5431/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5432/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5433 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5434 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5435 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5436/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5437/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5438 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5439 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5440 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5441 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5442/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5443/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5444 if (CHIP_REV_IS_FPGA(bp))
5445 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5446 else
5447 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5448 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5449 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5450 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5451/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5452/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5453 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5454 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5455/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5456 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5457}
5458
34f80b04 5459
81f75bbf
EG
5460static void bnx2x_reset_common(struct bnx2x *bp)
5461{
5462 /* reset_common */
5463 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5464 0xd3ffff7f);
5465 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5466}
5467
34f80b04 5468static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5469{
a2fbb9ea 5470 u32 val, i;
a2fbb9ea 5471
34f80b04 5472 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5473
81f75bbf 5474 bnx2x_reset_common(bp);
34f80b04
EG
5475 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5476 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5477
34f80b04
EG
5478 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5479 if (CHIP_IS_E1H(bp))
5480 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5481
34f80b04
EG
5482 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5483 msleep(30);
5484 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5485
34f80b04
EG
5486 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5487 if (CHIP_IS_E1(bp)) {
5488 /* enable HW interrupt from PXP on USDM overflow
5489 bit 16 on INT_MASK_0 */
5490 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5491 }
a2fbb9ea 5492
34f80b04
EG
5493 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5494 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5495
5496#ifdef __BIG_ENDIAN
34f80b04
EG
5497 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5498 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5499 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5500 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5501 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5502 /* make sure this value is 0 */
5503 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5504
5505/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5506 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5507 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5508 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5509 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5510#endif
5511
34f80b04 5512 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5513#ifdef BCM_ISCSI
34f80b04
EG
5514 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5515 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5516 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5517#endif
5518
34f80b04
EG
5519 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5520 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5521
34f80b04
EG
5522 /* let the HW do it's magic ... */
5523 msleep(100);
5524 /* finish PXP init */
5525 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5526 if (val != 1) {
5527 BNX2X_ERR("PXP2 CFG failed\n");
5528 return -EBUSY;
5529 }
5530 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5531 if (val != 1) {
5532 BNX2X_ERR("PXP2 RD_INIT failed\n");
5533 return -EBUSY;
5534 }
a2fbb9ea 5535
34f80b04
EG
5536 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5537 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5538
34f80b04 5539 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5540
34f80b04
EG
5541 /* clean the DMAE memory */
5542 bp->dmae_ready = 1;
5543 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5544
34f80b04
EG
5545 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5546 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5547 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5548 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5549
34f80b04
EG
5550 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5551 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5552 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5553 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5554
5555 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5556 /* soft reset pulse */
5557 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5558 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5559
5560#ifdef BCM_ISCSI
34f80b04 5561 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5562#endif
a2fbb9ea 5563
34f80b04
EG
5564 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5565 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5566 if (!CHIP_REV_IS_SLOW(bp)) {
5567 /* enable hw interrupt from doorbell Q */
5568 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5569 }
a2fbb9ea 5570
34f80b04 5571 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5572 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5573 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5574 /* set NIC mode */
5575 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5576 if (CHIP_IS_E1H(bp))
5577 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5578
34f80b04
EG
5579 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5580 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5581 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5582 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5583
34f80b04
EG
5584 if (CHIP_IS_E1H(bp)) {
5585 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5586 STORM_INTMEM_SIZE_E1H/2);
5587 bnx2x_init_fill(bp,
5588 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5589 0, STORM_INTMEM_SIZE_E1H/2);
5590 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5591 STORM_INTMEM_SIZE_E1H/2);
5592 bnx2x_init_fill(bp,
5593 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5594 0, STORM_INTMEM_SIZE_E1H/2);
5595 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5596 STORM_INTMEM_SIZE_E1H/2);
5597 bnx2x_init_fill(bp,
5598 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5599 0, STORM_INTMEM_SIZE_E1H/2);
5600 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5601 STORM_INTMEM_SIZE_E1H/2);
5602 bnx2x_init_fill(bp,
5603 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5604 0, STORM_INTMEM_SIZE_E1H/2);
5605 } else { /* E1 */
ad8d3948
EG
5606 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5607 STORM_INTMEM_SIZE_E1);
5608 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5609 STORM_INTMEM_SIZE_E1);
5610 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5611 STORM_INTMEM_SIZE_E1);
5612 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5613 STORM_INTMEM_SIZE_E1);
34f80b04 5614 }
a2fbb9ea 5615
34f80b04
EG
5616 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5617 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5618 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5619 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5620
34f80b04
EG
5621 /* sync semi rtc */
5622 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5623 0x80000000);
5624 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5625 0x80000000);
a2fbb9ea 5626
34f80b04
EG
5627 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5628 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5629 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5630
34f80b04
EG
5631 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5632 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5633 REG_WR(bp, i, 0xc0cac01a);
5634 /* TODO: replace with something meaningful */
5635 }
8d9c5f34 5636 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5637 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5638
34f80b04
EG
5639 if (sizeof(union cdu_context) != 1024)
5640 /* we currently assume that a context is 1024 bytes */
5641 printk(KERN_ALERT PFX "please adjust the size of"
5642 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5643
34f80b04
EG
5644 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5645 val = (4 << 24) + (0 << 12) + 1024;
5646 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5647 if (CHIP_IS_E1(bp)) {
5648 /* !!! fix pxp client crdit until excel update */
5649 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5650 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5651 }
a2fbb9ea 5652
34f80b04
EG
5653 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5654 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5655 /* enable context validation interrupt from CFC */
5656 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5657
5658 /* set the thresholds to prevent CFC/CDU race */
5659 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5660
34f80b04
EG
5661 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5662 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5663
34f80b04
EG
5664 /* PXPCS COMMON comes here */
5665 /* Reset PCIE errors for debug */
5666 REG_WR(bp, 0x2814, 0xffffffff);
5667 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5668
34f80b04
EG
5669 /* EMAC0 COMMON comes here */
5670 /* EMAC1 COMMON comes here */
5671 /* DBU COMMON comes here */
5672 /* DBG COMMON comes here */
5673
5674 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5675 if (CHIP_IS_E1H(bp)) {
5676 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5677 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5678 }
5679
5680 if (CHIP_REV_IS_SLOW(bp))
5681 msleep(200);
5682
5683 /* finish CFC init */
5684 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5685 if (val != 1) {
5686 BNX2X_ERR("CFC LL_INIT failed\n");
5687 return -EBUSY;
5688 }
5689 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5690 if (val != 1) {
5691 BNX2X_ERR("CFC AC_INIT failed\n");
5692 return -EBUSY;
5693 }
5694 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5695 if (val != 1) {
5696 BNX2X_ERR("CFC CAM_INIT failed\n");
5697 return -EBUSY;
5698 }
5699 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5700
34f80b04
EG
5701 /* read NIG statistic
5702 to see if this is our first up since powerup */
5703 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5704 val = *bnx2x_sp(bp, wb_data[0]);
5705
5706 /* do internal memory self test */
5707 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5708 BNX2X_ERR("internal mem self test failed\n");
5709 return -EBUSY;
5710 }
5711
35b19ba5 5712 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5713 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5714 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5715 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5716 bp->port.need_hw_lock = 1;
5717 break;
5718
35b19ba5 5719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5720 /* Fan failure is indicated by SPIO 5 */
5721 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5722 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5723
5724 /* set to active low mode */
5725 val = REG_RD(bp, MISC_REG_SPIO_INT);
5726 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5727 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5728 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5729
34f80b04
EG
5730 /* enable interrupt to signal the IGU */
5731 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5732 val |= (1 << MISC_REGISTERS_SPIO_5);
5733 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5734 break;
f1410647 5735
34f80b04
EG
5736 default:
5737 break;
5738 }
f1410647 5739
34f80b04
EG
5740 /* clear PXP2 attentions */
5741 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5742
34f80b04 5743 enable_blocks_attention(bp);
a2fbb9ea 5744
6bbca910
YR
5745 if (!BP_NOMCP(bp)) {
5746 bnx2x_acquire_phy_lock(bp);
5747 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5748 bnx2x_release_phy_lock(bp);
5749 } else
5750 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5751
34f80b04
EG
5752 return 0;
5753}
a2fbb9ea 5754
34f80b04
EG
5755static int bnx2x_init_port(struct bnx2x *bp)
5756{
5757 int port = BP_PORT(bp);
1c06328c 5758 u32 low, high;
34f80b04 5759 u32 val;
a2fbb9ea 5760
34f80b04
EG
5761 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5762
5763 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5764
5765 /* Port PXP comes here */
5766 /* Port PXP2 comes here */
a2fbb9ea
ET
5767#ifdef BCM_ISCSI
5768 /* Port0 1
5769 * Port1 385 */
5770 i++;
5771 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5772 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5773 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5774 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5775
5776 /* Port0 2
5777 * Port1 386 */
5778 i++;
5779 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5780 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5781 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5782 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5783
5784 /* Port0 3
5785 * Port1 387 */
5786 i++;
5787 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5788 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5789 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5790 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5791#endif
34f80b04 5792 /* Port CMs come here */
8d9c5f34
EG
5793 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5794 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5795
5796 /* Port QM comes here */
a2fbb9ea
ET
5797#ifdef BCM_ISCSI
5798 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5799 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5800
5801 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5802 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5803#endif
5804 /* Port DQ comes here */
1c06328c
EG
5805
5806 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5807 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5808 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5809 /* no pause for emulation and FPGA */
5810 low = 0;
5811 high = 513;
5812 } else {
5813 if (IS_E1HMF(bp))
5814 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5815 else if (bp->dev->mtu > 4096) {
5816 if (bp->flags & ONE_PORT_FLAG)
5817 low = 160;
5818 else {
5819 val = bp->dev->mtu;
5820 /* (24*1024 + val*4)/256 */
5821 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5822 }
5823 } else
5824 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5825 high = low + 56; /* 14*1024/256 */
5826 }
5827 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5828 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5829
5830
ad8d3948 5831 /* Port PRS comes here */
a2fbb9ea
ET
5832 /* Port TSDM comes here */
5833 /* Port CSDM comes here */
5834 /* Port USDM comes here */
5835 /* Port XSDM comes here */
34f80b04
EG
5836 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5837 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5838 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5839 port ? USEM_PORT1_END : USEM_PORT0_END);
5840 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5841 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5842 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5843 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5844 /* Port UPB comes here */
34f80b04
EG
5845 /* Port XPB comes here */
5846
5847 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5848 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5849
5850 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5851 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5852
5853 /* update threshold */
34f80b04 5854 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5855 /* update init credit */
34f80b04 5856 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5857
5858 /* probe changes */
34f80b04 5859 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5860 msleep(5);
34f80b04 5861 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5862
5863#ifdef BCM_ISCSI
5864 /* tell the searcher where the T2 table is */
5865 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5866
5867 wb_write[0] = U64_LO(bp->t2_mapping);
5868 wb_write[1] = U64_HI(bp->t2_mapping);
5869 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5870 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5871 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5872 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5873
5874 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5875 /* Port SRCH comes here */
5876#endif
5877 /* Port CDU comes here */
5878 /* Port CFC comes here */
34f80b04
EG
5879
5880 if (CHIP_IS_E1(bp)) {
5881 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5882 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5883 }
5884 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5885 port ? HC_PORT1_END : HC_PORT0_END);
5886
5887 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5888 MISC_AEU_PORT0_START,
34f80b04
EG
5889 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5890 /* init aeu_mask_attn_func_0/1:
5891 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5892 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5893 * bits 4-7 are used for "per vn group attention" */
5894 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5895 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5896
a2fbb9ea
ET
5897 /* Port PXPCS comes here */
5898 /* Port EMAC0 comes here */
5899 /* Port EMAC1 comes here */
5900 /* Port DBU comes here */
5901 /* Port DBG comes here */
34f80b04
EG
5902 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5903 port ? NIG_PORT1_END : NIG_PORT0_END);
5904
5905 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5906
5907 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5908 /* 0x2 disable e1hov, 0x1 enable */
5909 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5910 (IS_E1HMF(bp) ? 0x1 : 0x2));
5911
1c06328c
EG
5912 /* support pause requests from USDM, TSDM and BRB */
5913 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5914
5915 {
5916 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5917 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5918 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5919 }
34f80b04
EG
5920 }
5921
a2fbb9ea
ET
5922 /* Port MCP comes here */
5923 /* Port DMAE comes here */
5924
35b19ba5 5925 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5926 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5927 {
5928 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5929
5930 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5931 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5932
5933 /* The GPIO should be swapped if the swap register is
5934 set and active */
5935 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5936 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5937
5938 /* Select function upon port-swap configuration */
5939 if (port == 0) {
5940 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5941 aeu_gpio_mask = (swap_val && swap_override) ?
5942 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5943 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5944 } else {
5945 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5946 aeu_gpio_mask = (swap_val && swap_override) ?
5947 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5948 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5949 }
5950 val = REG_RD(bp, offset);
5951 /* add GPIO3 to group */
5952 val |= aeu_gpio_mask;
5953 REG_WR(bp, offset, val);
5954 }
5955 break;
5956
35b19ba5 5957 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5958 /* add SPIO 5 to group 0 */
5959 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5960 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5961 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5962 break;
5963
5964 default:
5965 break;
5966 }
5967
c18487ee 5968 bnx2x__link_reset(bp);
a2fbb9ea 5969
34f80b04
EG
5970 return 0;
5971}
5972
5973#define ILT_PER_FUNC (768/2)
5974#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5975/* the phys address is shifted right 12 bits and has an added
5976 1=valid bit added to the 53rd bit
5977 then since this is a wide register(TM)
5978 we split it into two 32 bit writes
5979 */
5980#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5981#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5982#define PXP_ONE_ILT(x) (((x) << 10) | x)
5983#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5984
5985#define CNIC_ILT_LINES 0
5986
5987static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5988{
5989 int reg;
5990
5991 if (CHIP_IS_E1H(bp))
5992 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5993 else /* E1 */
5994 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5995
5996 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5997}
5998
5999static int bnx2x_init_func(struct bnx2x *bp)
6000{
6001 int port = BP_PORT(bp);
6002 int func = BP_FUNC(bp);
8badd27a 6003 u32 addr, val;
34f80b04
EG
6004 int i;
6005
6006 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6007
8badd27a
EG
6008 /* set MSI reconfigure capability */
6009 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6010 val = REG_RD(bp, addr);
6011 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6012 REG_WR(bp, addr, val);
6013
34f80b04
EG
6014 i = FUNC_ILT_BASE(func);
6015
6016 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6017 if (CHIP_IS_E1H(bp)) {
6018 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6019 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6020 } else /* E1 */
6021 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6022 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6023
6024
6025 if (CHIP_IS_E1H(bp)) {
6026 for (i = 0; i < 9; i++)
6027 bnx2x_init_block(bp,
6028 cm_start[func][i], cm_end[func][i]);
6029
6030 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6031 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6032 }
6033
6034 /* HC init per function */
6035 if (CHIP_IS_E1H(bp)) {
6036 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6037
6038 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6039 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6040 }
6041 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6042
c14423fe 6043 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6044 REG_WR(bp, 0x2114, 0xffffffff);
6045 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6046
34f80b04
EG
6047 return 0;
6048}
6049
6050static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6051{
6052 int i, rc = 0;
a2fbb9ea 6053
34f80b04
EG
6054 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6055 BP_FUNC(bp), load_code);
a2fbb9ea 6056
34f80b04
EG
6057 bp->dmae_ready = 0;
6058 mutex_init(&bp->dmae_mutex);
6059 bnx2x_gunzip_init(bp);
a2fbb9ea 6060
34f80b04
EG
6061 switch (load_code) {
6062 case FW_MSG_CODE_DRV_LOAD_COMMON:
6063 rc = bnx2x_init_common(bp);
6064 if (rc)
6065 goto init_hw_err;
6066 /* no break */
6067
6068 case FW_MSG_CODE_DRV_LOAD_PORT:
6069 bp->dmae_ready = 1;
6070 rc = bnx2x_init_port(bp);
6071 if (rc)
6072 goto init_hw_err;
6073 /* no break */
6074
6075 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6076 bp->dmae_ready = 1;
6077 rc = bnx2x_init_func(bp);
6078 if (rc)
6079 goto init_hw_err;
6080 break;
6081
6082 default:
6083 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6084 break;
6085 }
6086
6087 if (!BP_NOMCP(bp)) {
6088 int func = BP_FUNC(bp);
a2fbb9ea
ET
6089
6090 bp->fw_drv_pulse_wr_seq =
34f80b04 6091 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6092 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6093 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6094 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6095 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6096 } else
6097 bp->func_stx = 0;
a2fbb9ea 6098
34f80b04
EG
6099 /* this needs to be done before gunzip end */
6100 bnx2x_zero_def_sb(bp);
6101 for_each_queue(bp, i)
6102 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6103
6104init_hw_err:
6105 bnx2x_gunzip_end(bp);
6106
6107 return rc;
a2fbb9ea
ET
6108}
6109
c14423fe 6110/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6111static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6112{
34f80b04 6113 int func = BP_FUNC(bp);
f1410647
ET
6114 u32 seq = ++bp->fw_seq;
6115 u32 rc = 0;
19680c48
EG
6116 u32 cnt = 1;
6117 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6118
34f80b04 6119 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6120 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6121
19680c48
EG
6122 do {
6123 /* let the FW do it's magic ... */
6124 msleep(delay);
a2fbb9ea 6125
19680c48 6126 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6127
19680c48
EG
6128 /* Give the FW up to 2 second (200*10ms) */
6129 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6130
6131 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6132 cnt*delay, rc, seq);
a2fbb9ea
ET
6133
6134 /* is this a reply to our command? */
6135 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6136 rc &= FW_MSG_CODE_MASK;
f1410647 6137
a2fbb9ea
ET
6138 } else {
6139 /* FW BUG! */
6140 BNX2X_ERR("FW failed to respond!\n");
6141 bnx2x_fw_dump(bp);
6142 rc = 0;
6143 }
f1410647 6144
a2fbb9ea
ET
6145 return rc;
6146}
6147
6148static void bnx2x_free_mem(struct bnx2x *bp)
6149{
6150
6151#define BNX2X_PCI_FREE(x, y, size) \
6152 do { \
6153 if (x) { \
6154 pci_free_consistent(bp->pdev, size, x, y); \
6155 x = NULL; \
6156 y = 0; \
6157 } \
6158 } while (0)
6159
6160#define BNX2X_FREE(x) \
6161 do { \
6162 if (x) { \
6163 vfree(x); \
6164 x = NULL; \
6165 } \
6166 } while (0)
6167
6168 int i;
6169
6170 /* fastpath */
555f6c78 6171 /* Common */
a2fbb9ea
ET
6172 for_each_queue(bp, i) {
6173
555f6c78 6174 /* status blocks */
a2fbb9ea
ET
6175 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6176 bnx2x_fp(bp, i, status_blk_mapping),
6177 sizeof(struct host_status_block) +
6178 sizeof(struct eth_tx_db_data));
555f6c78
EG
6179 }
6180 /* Rx */
6181 for_each_rx_queue(bp, i) {
a2fbb9ea 6182
555f6c78 6183 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6184 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6185 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6186 bnx2x_fp(bp, i, rx_desc_mapping),
6187 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6188
6189 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6190 bnx2x_fp(bp, i, rx_comp_mapping),
6191 sizeof(struct eth_fast_path_rx_cqe) *
6192 NUM_RCQ_BD);
a2fbb9ea 6193
7a9b2557 6194 /* SGE ring */
32626230 6195 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6196 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6197 bnx2x_fp(bp, i, rx_sge_mapping),
6198 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6199 }
555f6c78
EG
6200 /* Tx */
6201 for_each_tx_queue(bp, i) {
6202
6203 /* fastpath tx rings: tx_buf tx_desc */
6204 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6205 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6206 bnx2x_fp(bp, i, tx_desc_mapping),
6207 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6208 }
a2fbb9ea
ET
6209 /* end of fastpath */
6210
6211 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6212 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6213
6214 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6215 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6216
6217#ifdef BCM_ISCSI
6218 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6219 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6220 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6221 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6222#endif
7a9b2557 6223 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6224
6225#undef BNX2X_PCI_FREE
6226#undef BNX2X_KFREE
6227}
6228
6229static int bnx2x_alloc_mem(struct bnx2x *bp)
6230{
6231
6232#define BNX2X_PCI_ALLOC(x, y, size) \
6233 do { \
6234 x = pci_alloc_consistent(bp->pdev, size, y); \
6235 if (x == NULL) \
6236 goto alloc_mem_err; \
6237 memset(x, 0, size); \
6238 } while (0)
6239
6240#define BNX2X_ALLOC(x, size) \
6241 do { \
6242 x = vmalloc(size); \
6243 if (x == NULL) \
6244 goto alloc_mem_err; \
6245 memset(x, 0, size); \
6246 } while (0)
6247
6248 int i;
6249
6250 /* fastpath */
555f6c78 6251 /* Common */
a2fbb9ea
ET
6252 for_each_queue(bp, i) {
6253 bnx2x_fp(bp, i, bp) = bp;
6254
555f6c78 6255 /* status blocks */
a2fbb9ea
ET
6256 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6257 &bnx2x_fp(bp, i, status_blk_mapping),
6258 sizeof(struct host_status_block) +
6259 sizeof(struct eth_tx_db_data));
555f6c78
EG
6260 }
6261 /* Rx */
6262 for_each_rx_queue(bp, i) {
a2fbb9ea 6263
555f6c78 6264 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6265 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6266 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6267 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6268 &bnx2x_fp(bp, i, rx_desc_mapping),
6269 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6270
6271 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6272 &bnx2x_fp(bp, i, rx_comp_mapping),
6273 sizeof(struct eth_fast_path_rx_cqe) *
6274 NUM_RCQ_BD);
6275
7a9b2557
VZ
6276 /* SGE ring */
6277 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6278 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6279 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6280 &bnx2x_fp(bp, i, rx_sge_mapping),
6281 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6282 }
555f6c78
EG
6283 /* Tx */
6284 for_each_tx_queue(bp, i) {
6285
6286 bnx2x_fp(bp, i, hw_tx_prods) =
6287 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6288
6289 bnx2x_fp(bp, i, tx_prods_mapping) =
6290 bnx2x_fp(bp, i, status_blk_mapping) +
6291 sizeof(struct host_status_block);
6292
6293 /* fastpath tx rings: tx_buf tx_desc */
6294 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6295 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6296 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6297 &bnx2x_fp(bp, i, tx_desc_mapping),
6298 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6299 }
a2fbb9ea
ET
6300 /* end of fastpath */
6301
6302 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6303 sizeof(struct host_def_status_block));
6304
6305 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6306 sizeof(struct bnx2x_slowpath));
6307
6308#ifdef BCM_ISCSI
6309 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6310
6311 /* Initialize T1 */
6312 for (i = 0; i < 64*1024; i += 64) {
6313 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6314 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6315 }
6316
6317 /* allocate searcher T2 table
6318 we allocate 1/4 of alloc num for T2
6319 (which is not entered into the ILT) */
6320 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6321
6322 /* Initialize T2 */
6323 for (i = 0; i < 16*1024; i += 64)
6324 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6325
c14423fe 6326 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6327 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6328
6329 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6330 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6331
6332 /* QM queues (128*MAX_CONN) */
6333 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6334#endif
6335
6336 /* Slow path ring */
6337 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6338
6339 return 0;
6340
6341alloc_mem_err:
6342 bnx2x_free_mem(bp);
6343 return -ENOMEM;
6344
6345#undef BNX2X_PCI_ALLOC
6346#undef BNX2X_ALLOC
6347}
6348
6349static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6350{
6351 int i;
6352
555f6c78 6353 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6354 struct bnx2x_fastpath *fp = &bp->fp[i];
6355
6356 u16 bd_cons = fp->tx_bd_cons;
6357 u16 sw_prod = fp->tx_pkt_prod;
6358 u16 sw_cons = fp->tx_pkt_cons;
6359
a2fbb9ea
ET
6360 while (sw_cons != sw_prod) {
6361 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6362 sw_cons++;
6363 }
6364 }
6365}
6366
6367static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6368{
6369 int i, j;
6370
555f6c78 6371 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6372 struct bnx2x_fastpath *fp = &bp->fp[j];
6373
a2fbb9ea
ET
6374 for (i = 0; i < NUM_RX_BD; i++) {
6375 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6376 struct sk_buff *skb = rx_buf->skb;
6377
6378 if (skb == NULL)
6379 continue;
6380
6381 pci_unmap_single(bp->pdev,
6382 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6383 bp->rx_buf_size,
a2fbb9ea
ET
6384 PCI_DMA_FROMDEVICE);
6385
6386 rx_buf->skb = NULL;
6387 dev_kfree_skb(skb);
6388 }
7a9b2557 6389 if (!fp->disable_tpa)
32626230
EG
6390 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6391 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6392 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6393 }
6394}
6395
6396static void bnx2x_free_skbs(struct bnx2x *bp)
6397{
6398 bnx2x_free_tx_skbs(bp);
6399 bnx2x_free_rx_skbs(bp);
6400}
6401
6402static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6403{
34f80b04 6404 int i, offset = 1;
a2fbb9ea
ET
6405
6406 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6407 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6408 bp->msix_table[0].vector);
6409
6410 for_each_queue(bp, i) {
c14423fe 6411 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6412 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6413 bnx2x_fp(bp, i, state));
6414
34f80b04 6415 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6416 }
a2fbb9ea
ET
6417}
6418
6419static void bnx2x_free_irq(struct bnx2x *bp)
6420{
a2fbb9ea 6421 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6422 bnx2x_free_msix_irqs(bp);
6423 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6424 bp->flags &= ~USING_MSIX_FLAG;
6425
8badd27a
EG
6426 } else if (bp->flags & USING_MSI_FLAG) {
6427 free_irq(bp->pdev->irq, bp->dev);
6428 pci_disable_msi(bp->pdev);
6429 bp->flags &= ~USING_MSI_FLAG;
6430
a2fbb9ea
ET
6431 } else
6432 free_irq(bp->pdev->irq, bp->dev);
6433}
6434
6435static int bnx2x_enable_msix(struct bnx2x *bp)
6436{
8badd27a
EG
6437 int i, rc, offset = 1;
6438 int igu_vec = 0;
a2fbb9ea 6439
8badd27a
EG
6440 bp->msix_table[0].entry = igu_vec;
6441 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6442
34f80b04 6443 for_each_queue(bp, i) {
8badd27a 6444 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6445 bp->msix_table[i + offset].entry = igu_vec;
6446 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6447 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6448 }
6449
34f80b04 6450 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6451 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6452 if (rc) {
8badd27a
EG
6453 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6454 return rc;
34f80b04 6455 }
8badd27a 6456
a2fbb9ea
ET
6457 bp->flags |= USING_MSIX_FLAG;
6458
6459 return 0;
a2fbb9ea
ET
6460}
6461
a2fbb9ea
ET
6462static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6463{
34f80b04 6464 int i, rc, offset = 1;
a2fbb9ea 6465
a2fbb9ea
ET
6466 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6467 bp->dev->name, bp->dev);
a2fbb9ea
ET
6468 if (rc) {
6469 BNX2X_ERR("request sp irq failed\n");
6470 return -EBUSY;
6471 }
6472
6473 for_each_queue(bp, i) {
555f6c78
EG
6474 struct bnx2x_fastpath *fp = &bp->fp[i];
6475
6476 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6477 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6478 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6479 if (rc) {
555f6c78 6480 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6481 bnx2x_free_msix_irqs(bp);
6482 return -EBUSY;
6483 }
6484
555f6c78 6485 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6486 }
6487
555f6c78
EG
6488 i = BNX2X_NUM_QUEUES(bp);
6489 if (is_multi(bp))
6490 printk(KERN_INFO PFX
6491 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6492 bp->dev->name, bp->msix_table[0].vector,
6493 bp->msix_table[offset].vector,
6494 bp->msix_table[offset + i - 1].vector);
6495 else
6496 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6497 bp->dev->name, bp->msix_table[0].vector,
6498 bp->msix_table[offset + i - 1].vector);
6499
a2fbb9ea 6500 return 0;
a2fbb9ea
ET
6501}
6502
8badd27a
EG
6503static int bnx2x_enable_msi(struct bnx2x *bp)
6504{
6505 int rc;
6506
6507 rc = pci_enable_msi(bp->pdev);
6508 if (rc) {
6509 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6510 return -1;
6511 }
6512 bp->flags |= USING_MSI_FLAG;
6513
6514 return 0;
6515}
6516
a2fbb9ea
ET
6517static int bnx2x_req_irq(struct bnx2x *bp)
6518{
8badd27a 6519 unsigned long flags;
34f80b04 6520 int rc;
a2fbb9ea 6521
8badd27a
EG
6522 if (bp->flags & USING_MSI_FLAG)
6523 flags = 0;
6524 else
6525 flags = IRQF_SHARED;
6526
6527 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6528 bp->dev->name, bp->dev);
a2fbb9ea
ET
6529 if (!rc)
6530 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6531
6532 return rc;
a2fbb9ea
ET
6533}
6534
65abd74d
YG
6535static void bnx2x_napi_enable(struct bnx2x *bp)
6536{
6537 int i;
6538
555f6c78 6539 for_each_rx_queue(bp, i)
65abd74d
YG
6540 napi_enable(&bnx2x_fp(bp, i, napi));
6541}
6542
6543static void bnx2x_napi_disable(struct bnx2x *bp)
6544{
6545 int i;
6546
555f6c78 6547 for_each_rx_queue(bp, i)
65abd74d
YG
6548 napi_disable(&bnx2x_fp(bp, i, napi));
6549}
6550
6551static void bnx2x_netif_start(struct bnx2x *bp)
6552{
6553 if (atomic_dec_and_test(&bp->intr_sem)) {
6554 if (netif_running(bp->dev)) {
65abd74d
YG
6555 bnx2x_napi_enable(bp);
6556 bnx2x_int_enable(bp);
555f6c78
EG
6557 if (bp->state == BNX2X_STATE_OPEN)
6558 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6559 }
6560 }
6561}
6562
f8ef6e44 6563static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6564{
f8ef6e44 6565 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6566 bnx2x_napi_disable(bp);
65abd74d 6567 if (netif_running(bp->dev)) {
65abd74d
YG
6568 netif_tx_disable(bp->dev);
6569 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6570 }
6571}
6572
a2fbb9ea
ET
6573/*
6574 * Init service functions
6575 */
6576
3101c2bc 6577static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6578{
6579 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6580 int port = BP_PORT(bp);
a2fbb9ea
ET
6581
6582 /* CAM allocation
6583 * unicasts 0-31:port0 32-63:port1
6584 * multicast 64-127:port0 128-191:port1
6585 */
8d9c5f34 6586 config->hdr.length = 2;
af246401 6587 config->hdr.offset = port ? 32 : 0;
34f80b04 6588 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6589 config->hdr.reserved1 = 0;
6590
6591 /* primary MAC */
6592 config->config_table[0].cam_entry.msb_mac_addr =
6593 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6594 config->config_table[0].cam_entry.middle_mac_addr =
6595 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6596 config->config_table[0].cam_entry.lsb_mac_addr =
6597 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6598 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6599 if (set)
6600 config->config_table[0].target_table_entry.flags = 0;
6601 else
6602 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6603 config->config_table[0].target_table_entry.client_id = 0;
6604 config->config_table[0].target_table_entry.vlan_id = 0;
6605
3101c2bc
YG
6606 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6607 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6608 config->config_table[0].cam_entry.msb_mac_addr,
6609 config->config_table[0].cam_entry.middle_mac_addr,
6610 config->config_table[0].cam_entry.lsb_mac_addr);
6611
6612 /* broadcast */
6613 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6614 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6615 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6616 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6617 if (set)
6618 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6619 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6620 else
6621 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6622 config->config_table[1].target_table_entry.client_id = 0;
6623 config->config_table[1].target_table_entry.vlan_id = 0;
6624
6625 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6626 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6627 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6628}
6629
3101c2bc 6630static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6631{
6632 struct mac_configuration_cmd_e1h *config =
6633 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6634
3101c2bc 6635 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6636 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6637 return;
6638 }
6639
6640 /* CAM allocation for E1H
6641 * unicasts: by func number
6642 * multicast: 20+FUNC*20, 20 each
6643 */
8d9c5f34 6644 config->hdr.length = 1;
34f80b04
EG
6645 config->hdr.offset = BP_FUNC(bp);
6646 config->hdr.client_id = BP_CL_ID(bp);
6647 config->hdr.reserved1 = 0;
6648
6649 /* primary MAC */
6650 config->config_table[0].msb_mac_addr =
6651 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6652 config->config_table[0].middle_mac_addr =
6653 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6654 config->config_table[0].lsb_mac_addr =
6655 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6656 config->config_table[0].client_id = BP_L_ID(bp);
6657 config->config_table[0].vlan_id = 0;
6658 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6659 if (set)
6660 config->config_table[0].flags = BP_PORT(bp);
6661 else
6662 config->config_table[0].flags =
6663 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6664
3101c2bc
YG
6665 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6666 (set ? "setting" : "clearing"),
34f80b04
EG
6667 config->config_table[0].msb_mac_addr,
6668 config->config_table[0].middle_mac_addr,
6669 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6670
6671 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6672 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6673 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6674}
6675
a2fbb9ea
ET
6676static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6677 int *state_p, int poll)
6678{
6679 /* can take a while if any port is running */
8b3a0f0b 6680 int cnt = 5000;
a2fbb9ea 6681
c14423fe
ET
6682 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6683 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6684
6685 might_sleep();
34f80b04 6686 while (cnt--) {
a2fbb9ea
ET
6687 if (poll) {
6688 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6689 /* if index is different from 0
6690 * the reply for some commands will
3101c2bc 6691 * be on the non default queue
a2fbb9ea
ET
6692 */
6693 if (idx)
6694 bnx2x_rx_int(&bp->fp[idx], 10);
6695 }
a2fbb9ea 6696
3101c2bc 6697 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6698 if (*state_p == state) {
6699#ifdef BNX2X_STOP_ON_ERROR
6700 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6701#endif
a2fbb9ea 6702 return 0;
8b3a0f0b 6703 }
a2fbb9ea 6704
a2fbb9ea 6705 msleep(1);
a2fbb9ea
ET
6706 }
6707
a2fbb9ea 6708 /* timeout! */
49d66772
ET
6709 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6710 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6711#ifdef BNX2X_STOP_ON_ERROR
6712 bnx2x_panic();
6713#endif
a2fbb9ea 6714
49d66772 6715 return -EBUSY;
a2fbb9ea
ET
6716}
6717
6718static int bnx2x_setup_leading(struct bnx2x *bp)
6719{
34f80b04 6720 int rc;
a2fbb9ea 6721
c14423fe 6722 /* reset IGU state */
34f80b04 6723 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6724
6725 /* SETUP ramrod */
6726 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6727
34f80b04
EG
6728 /* Wait for completion */
6729 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6730
34f80b04 6731 return rc;
a2fbb9ea
ET
6732}
6733
6734static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6735{
555f6c78
EG
6736 struct bnx2x_fastpath *fp = &bp->fp[index];
6737
a2fbb9ea 6738 /* reset IGU state */
555f6c78 6739 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6740
228241eb 6741 /* SETUP ramrod */
555f6c78
EG
6742 fp->state = BNX2X_FP_STATE_OPENING;
6743 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6744 fp->cl_id, 0);
a2fbb9ea
ET
6745
6746 /* Wait for completion */
6747 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6748 &(fp->state), 0);
a2fbb9ea
ET
6749}
6750
a2fbb9ea 6751static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6752
8badd27a 6753static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6754{
555f6c78 6755 int num_queues;
a2fbb9ea 6756
8badd27a
EG
6757 switch (int_mode) {
6758 case INT_MODE_INTx:
6759 case INT_MODE_MSI:
555f6c78
EG
6760 num_queues = 1;
6761 bp->num_rx_queues = num_queues;
6762 bp->num_tx_queues = num_queues;
6763 DP(NETIF_MSG_IFUP,
6764 "set number of queues to %d\n", num_queues);
8badd27a
EG
6765 break;
6766
6767 case INT_MODE_MSIX:
6768 default:
555f6c78
EG
6769 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6770 num_queues = min_t(u32, num_online_cpus(),
6771 BNX2X_MAX_QUEUES(bp));
34f80b04 6772 else
555f6c78
EG
6773 num_queues = 1;
6774 bp->num_rx_queues = num_queues;
6775 bp->num_tx_queues = num_queues;
6776 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6777 " number of tx queues to %d\n",
6778 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6779 /* if we can't use MSI-X we only need one fp,
6780 * so try to enable MSI-X with the requested number of fp's
6781 * and fallback to MSI or legacy INTx with one fp
6782 */
8badd27a 6783 if (bnx2x_enable_msix(bp)) {
34f80b04 6784 /* failed to enable MSI-X */
555f6c78
EG
6785 num_queues = 1;
6786 bp->num_rx_queues = num_queues;
6787 bp->num_tx_queues = num_queues;
6788 if (bp->multi_mode)
6789 BNX2X_ERR("Multi requested but failed to "
6790 "enable MSI-X set number of "
6791 "queues to %d\n", num_queues);
a2fbb9ea 6792 }
8badd27a 6793 break;
a2fbb9ea 6794 }
555f6c78 6795 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6796}
6797
6798static void bnx2x_set_rx_mode(struct net_device *dev);
6799
6800/* must be called with rtnl_lock */
6801static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6802{
6803 u32 load_code;
6804 int i, rc = 0;
6805#ifdef BNX2X_STOP_ON_ERROR
6806 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6807 if (unlikely(bp->panic))
6808 return -EPERM;
6809#endif
6810
6811 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6812
6813 bnx2x_set_int_mode(bp);
c14423fe 6814
a2fbb9ea
ET
6815 if (bnx2x_alloc_mem(bp))
6816 return -ENOMEM;
6817
555f6c78 6818 for_each_rx_queue(bp, i)
7a9b2557
VZ
6819 bnx2x_fp(bp, i, disable_tpa) =
6820 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6821
555f6c78 6822 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6823 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6824 bnx2x_poll, 128);
6825
6826#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6827 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6828 struct bnx2x_fastpath *fp = &bp->fp[i];
6829
6830 fp->poll_no_work = 0;
6831 fp->poll_calls = 0;
6832 fp->poll_max_calls = 0;
6833 fp->poll_complete = 0;
6834 fp->poll_exit = 0;
6835 }
6836#endif
6837 bnx2x_napi_enable(bp);
6838
34f80b04
EG
6839 if (bp->flags & USING_MSIX_FLAG) {
6840 rc = bnx2x_req_msix_irqs(bp);
6841 if (rc) {
6842 pci_disable_msix(bp->pdev);
2dfe0e1f 6843 goto load_error1;
34f80b04
EG
6844 }
6845 } else {
8badd27a
EG
6846 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6847 bnx2x_enable_msi(bp);
34f80b04
EG
6848 bnx2x_ack_int(bp);
6849 rc = bnx2x_req_irq(bp);
6850 if (rc) {
2dfe0e1f 6851 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6852 if (bp->flags & USING_MSI_FLAG)
6853 pci_disable_msi(bp->pdev);
2dfe0e1f 6854 goto load_error1;
a2fbb9ea 6855 }
8badd27a
EG
6856 if (bp->flags & USING_MSI_FLAG) {
6857 bp->dev->irq = bp->pdev->irq;
6858 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6859 bp->dev->name, bp->pdev->irq);
6860 }
a2fbb9ea
ET
6861 }
6862
2dfe0e1f
EG
6863 /* Send LOAD_REQUEST command to MCP
6864 Returns the type of LOAD command:
6865 if it is the first port to be initialized
6866 common blocks should be initialized, otherwise - not
6867 */
6868 if (!BP_NOMCP(bp)) {
6869 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6870 if (!load_code) {
6871 BNX2X_ERR("MCP response failure, aborting\n");
6872 rc = -EBUSY;
6873 goto load_error2;
6874 }
6875 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6876 rc = -EBUSY; /* other port in diagnostic mode */
6877 goto load_error2;
6878 }
6879
6880 } else {
6881 int port = BP_PORT(bp);
6882
6883 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6884 load_count[0], load_count[1], load_count[2]);
6885 load_count[0]++;
6886 load_count[1 + port]++;
6887 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6888 load_count[0], load_count[1], load_count[2]);
6889 if (load_count[0] == 1)
6890 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6891 else if (load_count[1 + port] == 1)
6892 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6893 else
6894 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6895 }
6896
6897 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6898 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6899 bp->port.pmf = 1;
6900 else
6901 bp->port.pmf = 0;
6902 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6903
a2fbb9ea 6904 /* Initialize HW */
34f80b04
EG
6905 rc = bnx2x_init_hw(bp, load_code);
6906 if (rc) {
a2fbb9ea 6907 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6908 goto load_error2;
a2fbb9ea
ET
6909 }
6910
a2fbb9ea 6911 /* Setup NIC internals and enable interrupts */
471de716 6912 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6913
6914 /* Send LOAD_DONE command to MCP */
34f80b04 6915 if (!BP_NOMCP(bp)) {
228241eb
ET
6916 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6917 if (!load_code) {
da5a662a 6918 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6919 rc = -EBUSY;
2dfe0e1f 6920 goto load_error3;
a2fbb9ea
ET
6921 }
6922 }
6923
6924 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6925
34f80b04
EG
6926 rc = bnx2x_setup_leading(bp);
6927 if (rc) {
da5a662a 6928 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6929 goto load_error3;
34f80b04 6930 }
a2fbb9ea 6931
34f80b04
EG
6932 if (CHIP_IS_E1H(bp))
6933 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6934 BNX2X_ERR("!!! mf_cfg function disabled\n");
6935 bp->state = BNX2X_STATE_DISABLED;
6936 }
a2fbb9ea 6937
34f80b04
EG
6938 if (bp->state == BNX2X_STATE_OPEN)
6939 for_each_nondefault_queue(bp, i) {
6940 rc = bnx2x_setup_multi(bp, i);
6941 if (rc)
2dfe0e1f 6942 goto load_error3;
34f80b04 6943 }
a2fbb9ea 6944
34f80b04 6945 if (CHIP_IS_E1(bp))
3101c2bc 6946 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6947 else
3101c2bc 6948 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6949
6950 if (bp->port.pmf)
b5bf9068 6951 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6952
6953 /* Start fast path */
34f80b04
EG
6954 switch (load_mode) {
6955 case LOAD_NORMAL:
6956 /* Tx queue should be only reenabled */
555f6c78 6957 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6958 /* Initialize the receive filter. */
34f80b04
EG
6959 bnx2x_set_rx_mode(bp->dev);
6960 break;
6961
6962 case LOAD_OPEN:
555f6c78 6963 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6964 /* Initialize the receive filter. */
34f80b04 6965 bnx2x_set_rx_mode(bp->dev);
34f80b04 6966 break;
a2fbb9ea 6967
34f80b04 6968 case LOAD_DIAG:
2dfe0e1f 6969 /* Initialize the receive filter. */
a2fbb9ea 6970 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6971 bp->state = BNX2X_STATE_DIAG;
6972 break;
6973
6974 default:
6975 break;
a2fbb9ea
ET
6976 }
6977
34f80b04
EG
6978 if (!bp->port.pmf)
6979 bnx2x__link_status_update(bp);
6980
a2fbb9ea
ET
6981 /* start the timer */
6982 mod_timer(&bp->timer, jiffies + bp->current_interval);
6983
34f80b04 6984
a2fbb9ea
ET
6985 return 0;
6986
2dfe0e1f
EG
6987load_error3:
6988 bnx2x_int_disable_sync(bp, 1);
6989 if (!BP_NOMCP(bp)) {
6990 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6991 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6992 }
6993 bp->port.pmf = 0;
7a9b2557
VZ
6994 /* Free SKBs, SGEs, TPA pool and driver internals */
6995 bnx2x_free_skbs(bp);
555f6c78 6996 for_each_rx_queue(bp, i)
3196a88a 6997 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6998load_error2:
d1014634
YG
6999 /* Release IRQs */
7000 bnx2x_free_irq(bp);
2dfe0e1f
EG
7001load_error1:
7002 bnx2x_napi_disable(bp);
555f6c78 7003 for_each_rx_queue(bp, i)
7cde1c8b 7004 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7005 bnx2x_free_mem(bp);
7006
7007 /* TBD we really need to reset the chip
7008 if we want to recover from this */
34f80b04 7009 return rc;
a2fbb9ea
ET
7010}
7011
7012static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7013{
555f6c78 7014 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7015 int rc;
7016
c14423fe 7017 /* halt the connection */
555f6c78
EG
7018 fp->state = BNX2X_FP_STATE_HALTING;
7019 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7020
34f80b04 7021 /* Wait for completion */
a2fbb9ea 7022 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7023 &(fp->state), 1);
c14423fe 7024 if (rc) /* timeout */
a2fbb9ea
ET
7025 return rc;
7026
7027 /* delete cfc entry */
7028 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7029
34f80b04
EG
7030 /* Wait for completion */
7031 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7032 &(fp->state), 1);
34f80b04 7033 return rc;
a2fbb9ea
ET
7034}
7035
da5a662a 7036static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7037{
49d66772 7038 u16 dsb_sp_prod_idx;
c14423fe 7039 /* if the other port is handling traffic,
a2fbb9ea 7040 this can take a lot of time */
34f80b04
EG
7041 int cnt = 500;
7042 int rc;
a2fbb9ea
ET
7043
7044 might_sleep();
7045
7046 /* Send HALT ramrod */
7047 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 7048 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 7049
34f80b04
EG
7050 /* Wait for completion */
7051 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7052 &(bp->fp[0].state), 1);
7053 if (rc) /* timeout */
da5a662a 7054 return rc;
a2fbb9ea 7055
49d66772 7056 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7057
228241eb 7058 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7059 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7060
49d66772 7061 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7062 we are going to reset the chip anyway
7063 so there is not much to do if this times out
7064 */
34f80b04 7065 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7066 if (!cnt) {
7067 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7068 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7069 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7070#ifdef BNX2X_STOP_ON_ERROR
7071 bnx2x_panic();
7072#endif
36e552ab 7073 rc = -EBUSY;
34f80b04
EG
7074 break;
7075 }
7076 cnt--;
da5a662a 7077 msleep(1);
5650d9d4 7078 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7079 }
7080 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7081 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7082
7083 return rc;
a2fbb9ea
ET
7084}
7085
34f80b04
EG
7086static void bnx2x_reset_func(struct bnx2x *bp)
7087{
7088 int port = BP_PORT(bp);
7089 int func = BP_FUNC(bp);
7090 int base, i;
7091
7092 /* Configure IGU */
7093 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7094 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7095
34f80b04
EG
7096 /* Clear ILT */
7097 base = FUNC_ILT_BASE(func);
7098 for (i = base; i < base + ILT_PER_FUNC; i++)
7099 bnx2x_ilt_wr(bp, i, 0);
7100}
7101
7102static void bnx2x_reset_port(struct bnx2x *bp)
7103{
7104 int port = BP_PORT(bp);
7105 u32 val;
7106
7107 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7108
7109 /* Do not rcv packets to BRB */
7110 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7111 /* Do not direct rcv packets that are not for MCP to the BRB */
7112 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7113 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7114
7115 /* Configure AEU */
7116 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7117
7118 msleep(100);
7119 /* Check for BRB port occupancy */
7120 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7121 if (val)
7122 DP(NETIF_MSG_IFDOWN,
33471629 7123 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7124
7125 /* TODO: Close Doorbell port? */
7126}
7127
34f80b04
EG
7128static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7129{
7130 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7131 BP_FUNC(bp), reset_code);
7132
7133 switch (reset_code) {
7134 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7135 bnx2x_reset_port(bp);
7136 bnx2x_reset_func(bp);
7137 bnx2x_reset_common(bp);
7138 break;
7139
7140 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7141 bnx2x_reset_port(bp);
7142 bnx2x_reset_func(bp);
7143 break;
7144
7145 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7146 bnx2x_reset_func(bp);
7147 break;
49d66772 7148
34f80b04
EG
7149 default:
7150 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7151 break;
7152 }
7153}
7154
33471629 7155/* must be called with rtnl_lock */
34f80b04 7156static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7157{
da5a662a 7158 int port = BP_PORT(bp);
a2fbb9ea 7159 u32 reset_code = 0;
da5a662a 7160 int i, cnt, rc;
a2fbb9ea
ET
7161
7162 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7163
228241eb
ET
7164 bp->rx_mode = BNX2X_RX_MODE_NONE;
7165 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7166
f8ef6e44 7167 bnx2x_netif_stop(bp, 1);
e94d8af3 7168
34f80b04
EG
7169 del_timer_sync(&bp->timer);
7170 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7171 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7172 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7173
70b9986c
EG
7174 /* Release IRQs */
7175 bnx2x_free_irq(bp);
7176
555f6c78
EG
7177 /* Wait until tx fastpath tasks complete */
7178 for_each_tx_queue(bp, i) {
228241eb
ET
7179 struct bnx2x_fastpath *fp = &bp->fp[i];
7180
34f80b04 7181 cnt = 1000;
3e5b510e 7182 smp_mb();
e8b5fc51 7183 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7184
65abd74d 7185 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7186 if (!cnt) {
7187 BNX2X_ERR("timeout waiting for queue[%d]\n",
7188 i);
7189#ifdef BNX2X_STOP_ON_ERROR
7190 bnx2x_panic();
7191 return -EBUSY;
7192#else
7193 break;
7194#endif
7195 }
7196 cnt--;
da5a662a 7197 msleep(1);
3e5b510e 7198 smp_mb();
34f80b04 7199 }
228241eb 7200 }
da5a662a
VZ
7201 /* Give HW time to discard old tx messages */
7202 msleep(1);
a2fbb9ea 7203
3101c2bc
YG
7204 if (CHIP_IS_E1(bp)) {
7205 struct mac_configuration_cmd *config =
7206 bnx2x_sp(bp, mcast_config);
7207
7208 bnx2x_set_mac_addr_e1(bp, 0);
7209
8d9c5f34 7210 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7211 CAM_INVALIDATE(config->config_table[i]);
7212
8d9c5f34 7213 config->hdr.length = i;
3101c2bc
YG
7214 if (CHIP_REV_IS_SLOW(bp))
7215 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7216 else
7217 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7218 config->hdr.client_id = BP_CL_ID(bp);
7219 config->hdr.reserved1 = 0;
7220
7221 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7222 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7223 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7224
7225 } else { /* E1H */
65abd74d
YG
7226 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7227
3101c2bc
YG
7228 bnx2x_set_mac_addr_e1h(bp, 0);
7229
7230 for (i = 0; i < MC_HASH_SIZE; i++)
7231 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7232 }
7233
65abd74d
YG
7234 if (unload_mode == UNLOAD_NORMAL)
7235 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7236
7237 else if (bp->flags & NO_WOL_FLAG) {
7238 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7239 if (CHIP_IS_E1H(bp))
7240 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7241
7242 } else if (bp->wol) {
7243 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7244 u8 *mac_addr = bp->dev->dev_addr;
7245 u32 val;
7246 /* The mac address is written to entries 1-4 to
7247 preserve entry 0 which is used by the PMF */
7248 u8 entry = (BP_E1HVN(bp) + 1)*8;
7249
7250 val = (mac_addr[0] << 8) | mac_addr[1];
7251 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7252
7253 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7254 (mac_addr[4] << 8) | mac_addr[5];
7255 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7256
7257 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7258
7259 } else
7260 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7261
34f80b04
EG
7262 /* Close multi and leading connections
7263 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7264 for_each_nondefault_queue(bp, i)
7265 if (bnx2x_stop_multi(bp, i))
228241eb 7266 goto unload_error;
a2fbb9ea 7267
da5a662a
VZ
7268 rc = bnx2x_stop_leading(bp);
7269 if (rc) {
34f80b04 7270 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7271#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7272 return -EBUSY;
da5a662a
VZ
7273#else
7274 goto unload_error;
34f80b04 7275#endif
228241eb
ET
7276 }
7277
7278unload_error:
34f80b04 7279 if (!BP_NOMCP(bp))
228241eb 7280 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7281 else {
7282 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7283 load_count[0], load_count[1], load_count[2]);
7284 load_count[0]--;
da5a662a 7285 load_count[1 + port]--;
34f80b04
EG
7286 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7287 load_count[0], load_count[1], load_count[2]);
7288 if (load_count[0] == 0)
7289 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7290 else if (load_count[1 + port] == 0)
34f80b04
EG
7291 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7292 else
7293 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7294 }
a2fbb9ea 7295
34f80b04
EG
7296 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7297 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7298 bnx2x__link_reset(bp);
a2fbb9ea
ET
7299
7300 /* Reset the chip */
228241eb 7301 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7302
7303 /* Report UNLOAD_DONE to MCP */
34f80b04 7304 if (!BP_NOMCP(bp))
a2fbb9ea 7305 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7306 bp->port.pmf = 0;
a2fbb9ea 7307
7a9b2557 7308 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7309 bnx2x_free_skbs(bp);
555f6c78 7310 for_each_rx_queue(bp, i)
3196a88a 7311 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7312 for_each_rx_queue(bp, i)
7cde1c8b 7313 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7314 bnx2x_free_mem(bp);
7315
7316 bp->state = BNX2X_STATE_CLOSED;
228241eb 7317
a2fbb9ea
ET
7318 netif_carrier_off(bp->dev);
7319
7320 return 0;
7321}
7322
34f80b04
EG
7323static void bnx2x_reset_task(struct work_struct *work)
7324{
7325 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7326
7327#ifdef BNX2X_STOP_ON_ERROR
7328 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7329 " so reset not done to allow debug dump,\n"
7330 KERN_ERR " you will need to reboot when done\n");
7331 return;
7332#endif
7333
7334 rtnl_lock();
7335
7336 if (!netif_running(bp->dev))
7337 goto reset_task_exit;
7338
7339 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7340 bnx2x_nic_load(bp, LOAD_NORMAL);
7341
7342reset_task_exit:
7343 rtnl_unlock();
7344}
7345
a2fbb9ea
ET
7346/* end of nic load/unload */
7347
7348/* ethtool_ops */
7349
7350/*
7351 * Init service functions
7352 */
7353
f1ef27ef
EG
7354static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7355{
7356 switch (func) {
7357 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7358 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7359 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7360 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7361 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7362 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7363 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7364 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7365 default:
7366 BNX2X_ERR("Unsupported function index: %d\n", func);
7367 return (u32)(-1);
7368 }
7369}
7370
7371static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7372{
7373 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7374
7375 /* Flush all outstanding writes */
7376 mmiowb();
7377
7378 /* Pretend to be function 0 */
7379 REG_WR(bp, reg, 0);
7380 /* Flush the GRC transaction (in the chip) */
7381 new_val = REG_RD(bp, reg);
7382 if (new_val != 0) {
7383 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7384 new_val);
7385 BUG();
7386 }
7387
7388 /* From now we are in the "like-E1" mode */
7389 bnx2x_int_disable(bp);
7390
7391 /* Flush all outstanding writes */
7392 mmiowb();
7393
7394 /* Restore the original funtion settings */
7395 REG_WR(bp, reg, orig_func);
7396 new_val = REG_RD(bp, reg);
7397 if (new_val != orig_func) {
7398 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7399 orig_func, new_val);
7400 BUG();
7401 }
7402}
7403
7404static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7405{
7406 if (CHIP_IS_E1H(bp))
7407 bnx2x_undi_int_disable_e1h(bp, func);
7408 else
7409 bnx2x_int_disable(bp);
7410}
7411
34f80b04
EG
7412static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7413{
7414 u32 val;
7415
7416 /* Check if there is any driver already loaded */
7417 val = REG_RD(bp, MISC_REG_UNPREPARED);
7418 if (val == 0x1) {
7419 /* Check if it is the UNDI driver
7420 * UNDI driver initializes CID offset for normal bell to 0x7
7421 */
4a37fb66 7422 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7423 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7424 if (val == 0x7) {
7425 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7426 /* save our func */
34f80b04 7427 int func = BP_FUNC(bp);
da5a662a
VZ
7428 u32 swap_en;
7429 u32 swap_val;
34f80b04 7430
b4661739
EG
7431 /* clear the UNDI indication */
7432 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7433
34f80b04
EG
7434 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7435
7436 /* try unload UNDI on port 0 */
7437 bp->func = 0;
da5a662a
VZ
7438 bp->fw_seq =
7439 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7440 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7441 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7442
7443 /* if UNDI is loaded on the other port */
7444 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7445
da5a662a
VZ
7446 /* send "DONE" for previous unload */
7447 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7448
7449 /* unload UNDI on port 1 */
34f80b04 7450 bp->func = 1;
da5a662a
VZ
7451 bp->fw_seq =
7452 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7453 DRV_MSG_SEQ_NUMBER_MASK);
7454 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7455
7456 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7457 }
7458
b4661739
EG
7459 /* now it's safe to release the lock */
7460 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7461
f1ef27ef 7462 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7463
7464 /* close input traffic and wait for it */
7465 /* Do not rcv packets to BRB */
7466 REG_WR(bp,
7467 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7468 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7469 /* Do not direct rcv packets that are not for MCP to
7470 * the BRB */
7471 REG_WR(bp,
7472 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7473 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7474 /* clear AEU */
7475 REG_WR(bp,
7476 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7477 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7478 msleep(10);
7479
7480 /* save NIG port swap info */
7481 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7482 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7483 /* reset device */
7484 REG_WR(bp,
7485 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7486 0xd3ffffff);
34f80b04
EG
7487 REG_WR(bp,
7488 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7489 0x1403);
da5a662a
VZ
7490 /* take the NIG out of reset and restore swap values */
7491 REG_WR(bp,
7492 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7493 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7494 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7495 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7496
7497 /* send unload done to the MCP */
7498 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7499
7500 /* restore our func and fw_seq */
7501 bp->func = func;
7502 bp->fw_seq =
7503 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7504 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7505
7506 } else
7507 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7508 }
7509}
7510
7511static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7512{
7513 u32 val, val2, val3, val4, id;
72ce58c3 7514 u16 pmc;
34f80b04
EG
7515
7516 /* Get the chip revision id and number. */
7517 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7518 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7519 id = ((val & 0xffff) << 16);
7520 val = REG_RD(bp, MISC_REG_CHIP_REV);
7521 id |= ((val & 0xf) << 12);
7522 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7523 id |= ((val & 0xff) << 4);
5a40e08e 7524 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7525 id |= (val & 0xf);
7526 bp->common.chip_id = id;
7527 bp->link_params.chip_id = bp->common.chip_id;
7528 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7529
1c06328c
EG
7530 val = (REG_RD(bp, 0x2874) & 0x55);
7531 if ((bp->common.chip_id & 0x1) ||
7532 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7533 bp->flags |= ONE_PORT_FLAG;
7534 BNX2X_DEV_INFO("single port device\n");
7535 }
7536
34f80b04
EG
7537 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7538 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7539 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7540 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7541 bp->common.flash_size, bp->common.flash_size);
7542
7543 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7544 bp->link_params.shmem_base = bp->common.shmem_base;
7545 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7546
7547 if (!bp->common.shmem_base ||
7548 (bp->common.shmem_base < 0xA0000) ||
7549 (bp->common.shmem_base >= 0xC0000)) {
7550 BNX2X_DEV_INFO("MCP not active\n");
7551 bp->flags |= NO_MCP_FLAG;
7552 return;
7553 }
7554
7555 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7556 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7557 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7558 BNX2X_ERR("BAD MCP validity signature\n");
7559
7560 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7561 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7562
7563 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7564 SHARED_HW_CFG_LED_MODE_MASK) >>
7565 SHARED_HW_CFG_LED_MODE_SHIFT);
7566
c2c8b03e
EG
7567 bp->link_params.feature_config_flags = 0;
7568 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7569 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7570 bp->link_params.feature_config_flags |=
7571 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7572 else
7573 bp->link_params.feature_config_flags &=
7574 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7575
34f80b04
EG
7576 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7577 bp->common.bc_ver = val;
7578 BNX2X_DEV_INFO("bc_ver %X\n", val);
7579 if (val < BNX2X_BC_VER) {
7580 /* for now only warn
7581 * later we might need to enforce this */
7582 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7583 " please upgrade BC\n", BNX2X_BC_VER, val);
7584 }
72ce58c3
EG
7585
7586 if (BP_E1HVN(bp) == 0) {
7587 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7588 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7589 } else {
7590 /* no WOL capability for E1HVN != 0 */
7591 bp->flags |= NO_WOL_FLAG;
7592 }
7593 BNX2X_DEV_INFO("%sWoL capable\n",
7594 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7595
7596 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7597 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7598 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7599 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7600
7601 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7602 val, val2, val3, val4);
7603}
7604
7605static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7606 u32 switch_cfg)
a2fbb9ea 7607{
34f80b04 7608 int port = BP_PORT(bp);
a2fbb9ea
ET
7609 u32 ext_phy_type;
7610
a2fbb9ea
ET
7611 switch (switch_cfg) {
7612 case SWITCH_CFG_1G:
7613 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7614
c18487ee
YR
7615 ext_phy_type =
7616 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7617 switch (ext_phy_type) {
7618 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7619 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7620 ext_phy_type);
7621
34f80b04
EG
7622 bp->port.supported |= (SUPPORTED_10baseT_Half |
7623 SUPPORTED_10baseT_Full |
7624 SUPPORTED_100baseT_Half |
7625 SUPPORTED_100baseT_Full |
7626 SUPPORTED_1000baseT_Full |
7627 SUPPORTED_2500baseX_Full |
7628 SUPPORTED_TP |
7629 SUPPORTED_FIBRE |
7630 SUPPORTED_Autoneg |
7631 SUPPORTED_Pause |
7632 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7633 break;
7634
7635 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7636 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7637 ext_phy_type);
7638
34f80b04
EG
7639 bp->port.supported |= (SUPPORTED_10baseT_Half |
7640 SUPPORTED_10baseT_Full |
7641 SUPPORTED_100baseT_Half |
7642 SUPPORTED_100baseT_Full |
7643 SUPPORTED_1000baseT_Full |
7644 SUPPORTED_TP |
7645 SUPPORTED_FIBRE |
7646 SUPPORTED_Autoneg |
7647 SUPPORTED_Pause |
7648 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7649 break;
7650
7651 default:
7652 BNX2X_ERR("NVRAM config error. "
7653 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7654 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7655 return;
7656 }
7657
34f80b04
EG
7658 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7659 port*0x10);
7660 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7661 break;
7662
7663 case SWITCH_CFG_10G:
7664 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7665
c18487ee
YR
7666 ext_phy_type =
7667 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7668 switch (ext_phy_type) {
7669 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7670 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7671 ext_phy_type);
7672
34f80b04
EG
7673 bp->port.supported |= (SUPPORTED_10baseT_Half |
7674 SUPPORTED_10baseT_Full |
7675 SUPPORTED_100baseT_Half |
7676 SUPPORTED_100baseT_Full |
7677 SUPPORTED_1000baseT_Full |
7678 SUPPORTED_2500baseX_Full |
7679 SUPPORTED_10000baseT_Full |
7680 SUPPORTED_TP |
7681 SUPPORTED_FIBRE |
7682 SUPPORTED_Autoneg |
7683 SUPPORTED_Pause |
7684 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7685 break;
7686
589abe3a
EG
7687 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7688 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7689 ext_phy_type);
f1410647 7690
34f80b04 7691 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7692 SUPPORTED_1000baseT_Full |
34f80b04 7693 SUPPORTED_FIBRE |
589abe3a 7694 SUPPORTED_Autoneg |
34f80b04
EG
7695 SUPPORTED_Pause |
7696 SUPPORTED_Asym_Pause);
f1410647
ET
7697 break;
7698
589abe3a
EG
7699 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7700 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7701 ext_phy_type);
7702
34f80b04 7703 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7704 SUPPORTED_2500baseX_Full |
34f80b04 7705 SUPPORTED_1000baseT_Full |
589abe3a
EG
7706 SUPPORTED_FIBRE |
7707 SUPPORTED_Autoneg |
7708 SUPPORTED_Pause |
7709 SUPPORTED_Asym_Pause);
7710 break;
7711
7712 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7713 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7714 ext_phy_type);
7715
7716 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7717 SUPPORTED_FIBRE |
7718 SUPPORTED_Pause |
7719 SUPPORTED_Asym_Pause);
f1410647
ET
7720 break;
7721
589abe3a
EG
7722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7723 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7724 ext_phy_type);
7725
34f80b04
EG
7726 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7727 SUPPORTED_1000baseT_Full |
7728 SUPPORTED_FIBRE |
34f80b04
EG
7729 SUPPORTED_Pause |
7730 SUPPORTED_Asym_Pause);
f1410647
ET
7731 break;
7732
589abe3a
EG
7733 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7734 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7735 ext_phy_type);
7736
34f80b04 7737 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7738 SUPPORTED_1000baseT_Full |
34f80b04 7739 SUPPORTED_Autoneg |
589abe3a 7740 SUPPORTED_FIBRE |
34f80b04
EG
7741 SUPPORTED_Pause |
7742 SUPPORTED_Asym_Pause);
c18487ee
YR
7743 break;
7744
f1410647
ET
7745 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7746 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7747 ext_phy_type);
7748
34f80b04
EG
7749 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7750 SUPPORTED_TP |
7751 SUPPORTED_Autoneg |
7752 SUPPORTED_Pause |
7753 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7754 break;
7755
28577185
EG
7756 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7757 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7758 ext_phy_type);
7759
7760 bp->port.supported |= (SUPPORTED_10baseT_Half |
7761 SUPPORTED_10baseT_Full |
7762 SUPPORTED_100baseT_Half |
7763 SUPPORTED_100baseT_Full |
7764 SUPPORTED_1000baseT_Full |
7765 SUPPORTED_10000baseT_Full |
7766 SUPPORTED_TP |
7767 SUPPORTED_Autoneg |
7768 SUPPORTED_Pause |
7769 SUPPORTED_Asym_Pause);
7770 break;
7771
c18487ee
YR
7772 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7773 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7774 bp->link_params.ext_phy_config);
7775 break;
7776
a2fbb9ea
ET
7777 default:
7778 BNX2X_ERR("NVRAM config error. "
7779 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7780 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7781 return;
7782 }
7783
34f80b04
EG
7784 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7785 port*0x18);
7786 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7787
a2fbb9ea
ET
7788 break;
7789
7790 default:
7791 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7792 bp->port.link_config);
a2fbb9ea
ET
7793 return;
7794 }
34f80b04 7795 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7796
7797 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7798 if (!(bp->link_params.speed_cap_mask &
7799 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7800 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7801
c18487ee
YR
7802 if (!(bp->link_params.speed_cap_mask &
7803 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7804 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7805
c18487ee
YR
7806 if (!(bp->link_params.speed_cap_mask &
7807 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7808 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7809
c18487ee
YR
7810 if (!(bp->link_params.speed_cap_mask &
7811 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7812 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7813
c18487ee
YR
7814 if (!(bp->link_params.speed_cap_mask &
7815 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7816 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7817 SUPPORTED_1000baseT_Full);
a2fbb9ea 7818
c18487ee
YR
7819 if (!(bp->link_params.speed_cap_mask &
7820 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7821 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7822
c18487ee
YR
7823 if (!(bp->link_params.speed_cap_mask &
7824 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7825 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7826
34f80b04 7827 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7828}
7829
34f80b04 7830static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7831{
c18487ee 7832 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7833
34f80b04 7834 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7835 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7836 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7837 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7838 bp->port.advertising = bp->port.supported;
a2fbb9ea 7839 } else {
c18487ee
YR
7840 u32 ext_phy_type =
7841 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7842
7843 if ((ext_phy_type ==
7844 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7845 (ext_phy_type ==
7846 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7847 /* force 10G, no AN */
c18487ee 7848 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7849 bp->port.advertising =
a2fbb9ea
ET
7850 (ADVERTISED_10000baseT_Full |
7851 ADVERTISED_FIBRE);
7852 break;
7853 }
7854 BNX2X_ERR("NVRAM config error. "
7855 "Invalid link_config 0x%x"
7856 " Autoneg not supported\n",
34f80b04 7857 bp->port.link_config);
a2fbb9ea
ET
7858 return;
7859 }
7860 break;
7861
7862 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7863 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7864 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7865 bp->port.advertising = (ADVERTISED_10baseT_Full |
7866 ADVERTISED_TP);
a2fbb9ea
ET
7867 } else {
7868 BNX2X_ERR("NVRAM config error. "
7869 "Invalid link_config 0x%x"
7870 " speed_cap_mask 0x%x\n",
34f80b04 7871 bp->port.link_config,
c18487ee 7872 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7873 return;
7874 }
7875 break;
7876
7877 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7878 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7879 bp->link_params.req_line_speed = SPEED_10;
7880 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7881 bp->port.advertising = (ADVERTISED_10baseT_Half |
7882 ADVERTISED_TP);
a2fbb9ea
ET
7883 } else {
7884 BNX2X_ERR("NVRAM config error. "
7885 "Invalid link_config 0x%x"
7886 " speed_cap_mask 0x%x\n",
34f80b04 7887 bp->port.link_config,
c18487ee 7888 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7889 return;
7890 }
7891 break;
7892
7893 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7894 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7895 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7896 bp->port.advertising = (ADVERTISED_100baseT_Full |
7897 ADVERTISED_TP);
a2fbb9ea
ET
7898 } else {
7899 BNX2X_ERR("NVRAM config error. "
7900 "Invalid link_config 0x%x"
7901 " speed_cap_mask 0x%x\n",
34f80b04 7902 bp->port.link_config,
c18487ee 7903 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7904 return;
7905 }
7906 break;
7907
7908 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7909 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7910 bp->link_params.req_line_speed = SPEED_100;
7911 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7912 bp->port.advertising = (ADVERTISED_100baseT_Half |
7913 ADVERTISED_TP);
a2fbb9ea
ET
7914 } else {
7915 BNX2X_ERR("NVRAM config error. "
7916 "Invalid link_config 0x%x"
7917 " speed_cap_mask 0x%x\n",
34f80b04 7918 bp->port.link_config,
c18487ee 7919 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7920 return;
7921 }
7922 break;
7923
7924 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7925 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7926 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7927 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7928 ADVERTISED_TP);
a2fbb9ea
ET
7929 } else {
7930 BNX2X_ERR("NVRAM config error. "
7931 "Invalid link_config 0x%x"
7932 " speed_cap_mask 0x%x\n",
34f80b04 7933 bp->port.link_config,
c18487ee 7934 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7935 return;
7936 }
7937 break;
7938
7939 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7940 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7941 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7942 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7943 ADVERTISED_TP);
a2fbb9ea
ET
7944 } else {
7945 BNX2X_ERR("NVRAM config error. "
7946 "Invalid link_config 0x%x"
7947 " speed_cap_mask 0x%x\n",
34f80b04 7948 bp->port.link_config,
c18487ee 7949 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7950 return;
7951 }
7952 break;
7953
7954 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7955 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7956 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7957 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7958 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7959 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7960 ADVERTISED_FIBRE);
a2fbb9ea
ET
7961 } else {
7962 BNX2X_ERR("NVRAM config error. "
7963 "Invalid link_config 0x%x"
7964 " speed_cap_mask 0x%x\n",
34f80b04 7965 bp->port.link_config,
c18487ee 7966 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7967 return;
7968 }
7969 break;
7970
7971 default:
7972 BNX2X_ERR("NVRAM config error. "
7973 "BAD link speed link_config 0x%x\n",
34f80b04 7974 bp->port.link_config);
c18487ee 7975 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7976 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7977 break;
7978 }
a2fbb9ea 7979
34f80b04
EG
7980 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7981 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7982 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7983 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7984 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7985
c18487ee 7986 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7987 " advertising 0x%x\n",
c18487ee
YR
7988 bp->link_params.req_line_speed,
7989 bp->link_params.req_duplex,
34f80b04 7990 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7991}
7992
34f80b04 7993static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7994{
34f80b04
EG
7995 int port = BP_PORT(bp);
7996 u32 val, val2;
589abe3a 7997 u32 config;
c2c8b03e 7998 u16 i;
a2fbb9ea 7999
c18487ee 8000 bp->link_params.bp = bp;
34f80b04 8001 bp->link_params.port = port;
c18487ee 8002
c18487ee 8003 bp->link_params.lane_config =
a2fbb9ea 8004 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8005 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8006 SHMEM_RD(bp,
8007 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8008 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8009 SHMEM_RD(bp,
8010 dev_info.port_hw_config[port].speed_capability_mask);
8011
34f80b04 8012 bp->port.link_config =
a2fbb9ea
ET
8013 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8014
c2c8b03e
EG
8015 /* Get the 4 lanes xgxs config rx and tx */
8016 for (i = 0; i < 2; i++) {
8017 val = SHMEM_RD(bp,
8018 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8019 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8020 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8021
8022 val = SHMEM_RD(bp,
8023 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8024 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8025 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8026 }
8027
589abe3a
EG
8028 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8029 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8030 bp->link_params.feature_config_flags |=
8031 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8032 else
8033 bp->link_params.feature_config_flags &=
8034 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8035
3ce2c3f9
EG
8036 /* If the device is capable of WoL, set the default state according
8037 * to the HW
8038 */
8039 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8040 (config & PORT_FEATURE_WOL_ENABLED));
8041
c2c8b03e
EG
8042 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8043 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8044 bp->link_params.lane_config,
8045 bp->link_params.ext_phy_config,
34f80b04 8046 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8047
34f80b04 8048 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8049 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8050 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8051
8052 bnx2x_link_settings_requested(bp);
8053
8054 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8055 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8056 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8057 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8058 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8059 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8060 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8061 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8062 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8063 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8064}
8065
8066static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8067{
8068 int func = BP_FUNC(bp);
8069 u32 val, val2;
8070 int rc = 0;
a2fbb9ea 8071
34f80b04 8072 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8073
34f80b04
EG
8074 bp->e1hov = 0;
8075 bp->e1hmf = 0;
8076 if (CHIP_IS_E1H(bp)) {
8077 bp->mf_config =
8078 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8079
3196a88a
EG
8080 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8081 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8082 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8083
34f80b04
EG
8084 bp->e1hov = val;
8085 bp->e1hmf = 1;
8086 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8087 "(0x%04x)\n",
8088 func, bp->e1hov, bp->e1hov);
8089 } else {
8090 BNX2X_DEV_INFO("Single function mode\n");
8091 if (BP_E1HVN(bp)) {
8092 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8093 " aborting\n", func);
8094 rc = -EPERM;
8095 }
8096 }
8097 }
a2fbb9ea 8098
34f80b04
EG
8099 if (!BP_NOMCP(bp)) {
8100 bnx2x_get_port_hwinfo(bp);
8101
8102 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8103 DRV_MSG_SEQ_NUMBER_MASK);
8104 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8105 }
8106
8107 if (IS_E1HMF(bp)) {
8108 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8109 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8110 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8111 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8112 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8113 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8114 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8115 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8116 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8117 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8118 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8119 ETH_ALEN);
8120 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8121 ETH_ALEN);
a2fbb9ea 8122 }
34f80b04
EG
8123
8124 return rc;
a2fbb9ea
ET
8125 }
8126
34f80b04
EG
8127 if (BP_NOMCP(bp)) {
8128 /* only supposed to happen on emulation/FPGA */
33471629 8129 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8130 random_ether_addr(bp->dev->dev_addr);
8131 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8132 }
a2fbb9ea 8133
34f80b04
EG
8134 return rc;
8135}
8136
8137static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8138{
8139 int func = BP_FUNC(bp);
87942b46 8140 int timer_interval;
34f80b04
EG
8141 int rc;
8142
da5a662a
VZ
8143 /* Disable interrupt handling until HW is initialized */
8144 atomic_set(&bp->intr_sem, 1);
8145
34f80b04 8146 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8147
1cf167f2 8148 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8149 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8150
8151 rc = bnx2x_get_hwinfo(bp);
8152
8153 /* need to reset chip if undi was active */
8154 if (!BP_NOMCP(bp))
8155 bnx2x_undi_unload(bp);
8156
8157 if (CHIP_REV_IS_FPGA(bp))
8158 printk(KERN_ERR PFX "FPGA detected\n");
8159
8160 if (BP_NOMCP(bp) && (func == 0))
8161 printk(KERN_ERR PFX
8162 "MCP disabled, must load devices in order!\n");
8163
555f6c78 8164 /* Set multi queue mode */
8badd27a
EG
8165 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8166 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8167 printk(KERN_ERR PFX
8badd27a 8168 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8169 multi_mode = ETH_RSS_MODE_DISABLED;
8170 }
8171 bp->multi_mode = multi_mode;
8172
8173
7a9b2557
VZ
8174 /* Set TPA flags */
8175 if (disable_tpa) {
8176 bp->flags &= ~TPA_ENABLE_FLAG;
8177 bp->dev->features &= ~NETIF_F_LRO;
8178 } else {
8179 bp->flags |= TPA_ENABLE_FLAG;
8180 bp->dev->features |= NETIF_F_LRO;
8181 }
8182
8d5726c4 8183 bp->mrrs = mrrs;
7a9b2557 8184
34f80b04
EG
8185 bp->tx_ring_size = MAX_TX_AVAIL;
8186 bp->rx_ring_size = MAX_RX_AVAIL;
8187
8188 bp->rx_csum = 1;
34f80b04
EG
8189
8190 bp->tx_ticks = 50;
8191 bp->rx_ticks = 25;
8192
87942b46
EG
8193 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8194 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8195
8196 init_timer(&bp->timer);
8197 bp->timer.expires = jiffies + bp->current_interval;
8198 bp->timer.data = (unsigned long) bp;
8199 bp->timer.function = bnx2x_timer;
8200
8201 return rc;
a2fbb9ea
ET
8202}
8203
8204/*
8205 * ethtool service functions
8206 */
8207
8208/* All ethtool functions called with rtnl_lock */
8209
8210static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8211{
8212 struct bnx2x *bp = netdev_priv(dev);
8213
34f80b04
EG
8214 cmd->supported = bp->port.supported;
8215 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8216
8217 if (netif_carrier_ok(dev)) {
c18487ee
YR
8218 cmd->speed = bp->link_vars.line_speed;
8219 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8220 } else {
c18487ee
YR
8221 cmd->speed = bp->link_params.req_line_speed;
8222 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8223 }
34f80b04
EG
8224 if (IS_E1HMF(bp)) {
8225 u16 vn_max_rate;
8226
8227 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8228 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8229 if (vn_max_rate < cmd->speed)
8230 cmd->speed = vn_max_rate;
8231 }
a2fbb9ea 8232
c18487ee
YR
8233 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8234 u32 ext_phy_type =
8235 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8236
8237 switch (ext_phy_type) {
8238 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8239 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8240 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8241 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8242 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8244 cmd->port = PORT_FIBRE;
8245 break;
8246
8247 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8248 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8249 cmd->port = PORT_TP;
8250 break;
8251
c18487ee
YR
8252 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8253 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8254 bp->link_params.ext_phy_config);
8255 break;
8256
f1410647
ET
8257 default:
8258 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8259 bp->link_params.ext_phy_config);
8260 break;
f1410647
ET
8261 }
8262 } else
a2fbb9ea 8263 cmd->port = PORT_TP;
a2fbb9ea 8264
34f80b04 8265 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8266 cmd->transceiver = XCVR_INTERNAL;
8267
c18487ee 8268 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8269 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8270 else
a2fbb9ea 8271 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8272
8273 cmd->maxtxpkt = 0;
8274 cmd->maxrxpkt = 0;
8275
8276 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8277 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8278 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8279 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8280 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8281 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8282 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8283
8284 return 0;
8285}
8286
8287static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8288{
8289 struct bnx2x *bp = netdev_priv(dev);
8290 u32 advertising;
8291
34f80b04
EG
8292 if (IS_E1HMF(bp))
8293 return 0;
8294
a2fbb9ea
ET
8295 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8296 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8297 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8298 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8299 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8300 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8301 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8302
a2fbb9ea 8303 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8304 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8305 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8306 return -EINVAL;
f1410647 8307 }
a2fbb9ea
ET
8308
8309 /* advertise the requested speed and duplex if supported */
34f80b04 8310 cmd->advertising &= bp->port.supported;
a2fbb9ea 8311
c18487ee
YR
8312 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8313 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8314 bp->port.advertising |= (ADVERTISED_Autoneg |
8315 cmd->advertising);
a2fbb9ea
ET
8316
8317 } else { /* forced speed */
8318 /* advertise the requested speed and duplex if supported */
8319 switch (cmd->speed) {
8320 case SPEED_10:
8321 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8322 if (!(bp->port.supported &
f1410647
ET
8323 SUPPORTED_10baseT_Full)) {
8324 DP(NETIF_MSG_LINK,
8325 "10M full not supported\n");
a2fbb9ea 8326 return -EINVAL;
f1410647 8327 }
a2fbb9ea
ET
8328
8329 advertising = (ADVERTISED_10baseT_Full |
8330 ADVERTISED_TP);
8331 } else {
34f80b04 8332 if (!(bp->port.supported &
f1410647
ET
8333 SUPPORTED_10baseT_Half)) {
8334 DP(NETIF_MSG_LINK,
8335 "10M half not supported\n");
a2fbb9ea 8336 return -EINVAL;
f1410647 8337 }
a2fbb9ea
ET
8338
8339 advertising = (ADVERTISED_10baseT_Half |
8340 ADVERTISED_TP);
8341 }
8342 break;
8343
8344 case SPEED_100:
8345 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8346 if (!(bp->port.supported &
f1410647
ET
8347 SUPPORTED_100baseT_Full)) {
8348 DP(NETIF_MSG_LINK,
8349 "100M full not supported\n");
a2fbb9ea 8350 return -EINVAL;
f1410647 8351 }
a2fbb9ea
ET
8352
8353 advertising = (ADVERTISED_100baseT_Full |
8354 ADVERTISED_TP);
8355 } else {
34f80b04 8356 if (!(bp->port.supported &
f1410647
ET
8357 SUPPORTED_100baseT_Half)) {
8358 DP(NETIF_MSG_LINK,
8359 "100M half not supported\n");
a2fbb9ea 8360 return -EINVAL;
f1410647 8361 }
a2fbb9ea
ET
8362
8363 advertising = (ADVERTISED_100baseT_Half |
8364 ADVERTISED_TP);
8365 }
8366 break;
8367
8368 case SPEED_1000:
f1410647
ET
8369 if (cmd->duplex != DUPLEX_FULL) {
8370 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8371 return -EINVAL;
f1410647 8372 }
a2fbb9ea 8373
34f80b04 8374 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8375 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8376 return -EINVAL;
f1410647 8377 }
a2fbb9ea
ET
8378
8379 advertising = (ADVERTISED_1000baseT_Full |
8380 ADVERTISED_TP);
8381 break;
8382
8383 case SPEED_2500:
f1410647
ET
8384 if (cmd->duplex != DUPLEX_FULL) {
8385 DP(NETIF_MSG_LINK,
8386 "2.5G half not supported\n");
a2fbb9ea 8387 return -EINVAL;
f1410647 8388 }
a2fbb9ea 8389
34f80b04 8390 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8391 DP(NETIF_MSG_LINK,
8392 "2.5G full not supported\n");
a2fbb9ea 8393 return -EINVAL;
f1410647 8394 }
a2fbb9ea 8395
f1410647 8396 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8397 ADVERTISED_TP);
8398 break;
8399
8400 case SPEED_10000:
f1410647
ET
8401 if (cmd->duplex != DUPLEX_FULL) {
8402 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8403 return -EINVAL;
f1410647 8404 }
a2fbb9ea 8405
34f80b04 8406 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8407 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8408 return -EINVAL;
f1410647 8409 }
a2fbb9ea
ET
8410
8411 advertising = (ADVERTISED_10000baseT_Full |
8412 ADVERTISED_FIBRE);
8413 break;
8414
8415 default:
f1410647 8416 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8417 return -EINVAL;
8418 }
8419
c18487ee
YR
8420 bp->link_params.req_line_speed = cmd->speed;
8421 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8422 bp->port.advertising = advertising;
a2fbb9ea
ET
8423 }
8424
c18487ee 8425 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8426 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8427 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8428 bp->port.advertising);
a2fbb9ea 8429
34f80b04 8430 if (netif_running(dev)) {
bb2a0f7a 8431 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8432 bnx2x_link_set(bp);
8433 }
a2fbb9ea
ET
8434
8435 return 0;
8436}
8437
c18487ee
YR
8438#define PHY_FW_VER_LEN 10
8439
a2fbb9ea
ET
8440static void bnx2x_get_drvinfo(struct net_device *dev,
8441 struct ethtool_drvinfo *info)
8442{
8443 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8444 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8445
8446 strcpy(info->driver, DRV_MODULE_NAME);
8447 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8448
8449 phy_fw_ver[0] = '\0';
34f80b04 8450 if (bp->port.pmf) {
4a37fb66 8451 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8452 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8453 (bp->state != BNX2X_STATE_CLOSED),
8454 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8455 bnx2x_release_phy_lock(bp);
34f80b04 8456 }
c18487ee 8457
f0e53a84
EG
8458 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8459 (bp->common.bc_ver & 0xff0000) >> 16,
8460 (bp->common.bc_ver & 0xff00) >> 8,
8461 (bp->common.bc_ver & 0xff),
8462 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8463 strcpy(info->bus_info, pci_name(bp->pdev));
8464 info->n_stats = BNX2X_NUM_STATS;
8465 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8466 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8467 info->regdump_len = 0;
8468}
8469
8470static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8471{
8472 struct bnx2x *bp = netdev_priv(dev);
8473
8474 if (bp->flags & NO_WOL_FLAG) {
8475 wol->supported = 0;
8476 wol->wolopts = 0;
8477 } else {
8478 wol->supported = WAKE_MAGIC;
8479 if (bp->wol)
8480 wol->wolopts = WAKE_MAGIC;
8481 else
8482 wol->wolopts = 0;
8483 }
8484 memset(&wol->sopass, 0, sizeof(wol->sopass));
8485}
8486
8487static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8488{
8489 struct bnx2x *bp = netdev_priv(dev);
8490
8491 if (wol->wolopts & ~WAKE_MAGIC)
8492 return -EINVAL;
8493
8494 if (wol->wolopts & WAKE_MAGIC) {
8495 if (bp->flags & NO_WOL_FLAG)
8496 return -EINVAL;
8497
8498 bp->wol = 1;
34f80b04 8499 } else
a2fbb9ea 8500 bp->wol = 0;
34f80b04 8501
a2fbb9ea
ET
8502 return 0;
8503}
8504
8505static u32 bnx2x_get_msglevel(struct net_device *dev)
8506{
8507 struct bnx2x *bp = netdev_priv(dev);
8508
8509 return bp->msglevel;
8510}
8511
8512static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8513{
8514 struct bnx2x *bp = netdev_priv(dev);
8515
8516 if (capable(CAP_NET_ADMIN))
8517 bp->msglevel = level;
8518}
8519
8520static int bnx2x_nway_reset(struct net_device *dev)
8521{
8522 struct bnx2x *bp = netdev_priv(dev);
8523
34f80b04
EG
8524 if (!bp->port.pmf)
8525 return 0;
a2fbb9ea 8526
34f80b04 8527 if (netif_running(dev)) {
bb2a0f7a 8528 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8529 bnx2x_link_set(bp);
8530 }
a2fbb9ea
ET
8531
8532 return 0;
8533}
8534
8535static int bnx2x_get_eeprom_len(struct net_device *dev)
8536{
8537 struct bnx2x *bp = netdev_priv(dev);
8538
34f80b04 8539 return bp->common.flash_size;
a2fbb9ea
ET
8540}
8541
8542static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8543{
34f80b04 8544 int port = BP_PORT(bp);
a2fbb9ea
ET
8545 int count, i;
8546 u32 val = 0;
8547
8548 /* adjust timeout for emulation/FPGA */
8549 count = NVRAM_TIMEOUT_COUNT;
8550 if (CHIP_REV_IS_SLOW(bp))
8551 count *= 100;
8552
8553 /* request access to nvram interface */
8554 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8555 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8556
8557 for (i = 0; i < count*10; i++) {
8558 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8559 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8560 break;
8561
8562 udelay(5);
8563 }
8564
8565 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8566 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8567 return -EBUSY;
8568 }
8569
8570 return 0;
8571}
8572
8573static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8574{
34f80b04 8575 int port = BP_PORT(bp);
a2fbb9ea
ET
8576 int count, i;
8577 u32 val = 0;
8578
8579 /* adjust timeout for emulation/FPGA */
8580 count = NVRAM_TIMEOUT_COUNT;
8581 if (CHIP_REV_IS_SLOW(bp))
8582 count *= 100;
8583
8584 /* relinquish nvram interface */
8585 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8586 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8587
8588 for (i = 0; i < count*10; i++) {
8589 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8590 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8591 break;
8592
8593 udelay(5);
8594 }
8595
8596 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8597 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8598 return -EBUSY;
8599 }
8600
8601 return 0;
8602}
8603
8604static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8605{
8606 u32 val;
8607
8608 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8609
8610 /* enable both bits, even on read */
8611 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8612 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8613 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8614}
8615
8616static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8617{
8618 u32 val;
8619
8620 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8621
8622 /* disable both bits, even after read */
8623 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8624 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8625 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8626}
8627
8628static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8629 u32 cmd_flags)
8630{
f1410647 8631 int count, i, rc;
a2fbb9ea
ET
8632 u32 val;
8633
8634 /* build the command word */
8635 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8636
8637 /* need to clear DONE bit separately */
8638 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8639
8640 /* address of the NVRAM to read from */
8641 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8642 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8643
8644 /* issue a read command */
8645 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8646
8647 /* adjust timeout for emulation/FPGA */
8648 count = NVRAM_TIMEOUT_COUNT;
8649 if (CHIP_REV_IS_SLOW(bp))
8650 count *= 100;
8651
8652 /* wait for completion */
8653 *ret_val = 0;
8654 rc = -EBUSY;
8655 for (i = 0; i < count; i++) {
8656 udelay(5);
8657 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8658
8659 if (val & MCPR_NVM_COMMAND_DONE) {
8660 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8661 /* we read nvram data in cpu order
8662 * but ethtool sees it as an array of bytes
8663 * converting to big-endian will do the work */
8664 val = cpu_to_be32(val);
8665 *ret_val = val;
8666 rc = 0;
8667 break;
8668 }
8669 }
8670
8671 return rc;
8672}
8673
8674static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8675 int buf_size)
8676{
8677 int rc;
8678 u32 cmd_flags;
8679 u32 val;
8680
8681 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8682 DP(BNX2X_MSG_NVM,
c14423fe 8683 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8684 offset, buf_size);
8685 return -EINVAL;
8686 }
8687
34f80b04
EG
8688 if (offset + buf_size > bp->common.flash_size) {
8689 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8690 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8691 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8692 return -EINVAL;
8693 }
8694
8695 /* request access to nvram interface */
8696 rc = bnx2x_acquire_nvram_lock(bp);
8697 if (rc)
8698 return rc;
8699
8700 /* enable access to nvram interface */
8701 bnx2x_enable_nvram_access(bp);
8702
8703 /* read the first word(s) */
8704 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8705 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8706 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8707 memcpy(ret_buf, &val, 4);
8708
8709 /* advance to the next dword */
8710 offset += sizeof(u32);
8711 ret_buf += sizeof(u32);
8712 buf_size -= sizeof(u32);
8713 cmd_flags = 0;
8714 }
8715
8716 if (rc == 0) {
8717 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8718 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8719 memcpy(ret_buf, &val, 4);
8720 }
8721
8722 /* disable access to nvram interface */
8723 bnx2x_disable_nvram_access(bp);
8724 bnx2x_release_nvram_lock(bp);
8725
8726 return rc;
8727}
8728
8729static int bnx2x_get_eeprom(struct net_device *dev,
8730 struct ethtool_eeprom *eeprom, u8 *eebuf)
8731{
8732 struct bnx2x *bp = netdev_priv(dev);
8733 int rc;
8734
2add3acb
EG
8735 if (!netif_running(dev))
8736 return -EAGAIN;
8737
34f80b04 8738 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8739 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8740 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8741 eeprom->len, eeprom->len);
8742
8743 /* parameters already validated in ethtool_get_eeprom */
8744
8745 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8746
8747 return rc;
8748}
8749
8750static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8751 u32 cmd_flags)
8752{
f1410647 8753 int count, i, rc;
a2fbb9ea
ET
8754
8755 /* build the command word */
8756 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8757
8758 /* need to clear DONE bit separately */
8759 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8760
8761 /* write the data */
8762 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8763
8764 /* address of the NVRAM to write to */
8765 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8766 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8767
8768 /* issue the write command */
8769 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8770
8771 /* adjust timeout for emulation/FPGA */
8772 count = NVRAM_TIMEOUT_COUNT;
8773 if (CHIP_REV_IS_SLOW(bp))
8774 count *= 100;
8775
8776 /* wait for completion */
8777 rc = -EBUSY;
8778 for (i = 0; i < count; i++) {
8779 udelay(5);
8780 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8781 if (val & MCPR_NVM_COMMAND_DONE) {
8782 rc = 0;
8783 break;
8784 }
8785 }
8786
8787 return rc;
8788}
8789
f1410647 8790#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8791
8792static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8793 int buf_size)
8794{
8795 int rc;
8796 u32 cmd_flags;
8797 u32 align_offset;
8798 u32 val;
8799
34f80b04
EG
8800 if (offset + buf_size > bp->common.flash_size) {
8801 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8802 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8803 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8804 return -EINVAL;
8805 }
8806
8807 /* request access to nvram interface */
8808 rc = bnx2x_acquire_nvram_lock(bp);
8809 if (rc)
8810 return rc;
8811
8812 /* enable access to nvram interface */
8813 bnx2x_enable_nvram_access(bp);
8814
8815 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8816 align_offset = (offset & ~0x03);
8817 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8818
8819 if (rc == 0) {
8820 val &= ~(0xff << BYTE_OFFSET(offset));
8821 val |= (*data_buf << BYTE_OFFSET(offset));
8822
8823 /* nvram data is returned as an array of bytes
8824 * convert it back to cpu order */
8825 val = be32_to_cpu(val);
8826
a2fbb9ea
ET
8827 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8828 cmd_flags);
8829 }
8830
8831 /* disable access to nvram interface */
8832 bnx2x_disable_nvram_access(bp);
8833 bnx2x_release_nvram_lock(bp);
8834
8835 return rc;
8836}
8837
8838static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8839 int buf_size)
8840{
8841 int rc;
8842 u32 cmd_flags;
8843 u32 val;
8844 u32 written_so_far;
8845
34f80b04 8846 if (buf_size == 1) /* ethtool */
a2fbb9ea 8847 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8848
8849 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8850 DP(BNX2X_MSG_NVM,
c14423fe 8851 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8852 offset, buf_size);
8853 return -EINVAL;
8854 }
8855
34f80b04
EG
8856 if (offset + buf_size > bp->common.flash_size) {
8857 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8858 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8859 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8860 return -EINVAL;
8861 }
8862
8863 /* request access to nvram interface */
8864 rc = bnx2x_acquire_nvram_lock(bp);
8865 if (rc)
8866 return rc;
8867
8868 /* enable access to nvram interface */
8869 bnx2x_enable_nvram_access(bp);
8870
8871 written_so_far = 0;
8872 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8873 while ((written_so_far < buf_size) && (rc == 0)) {
8874 if (written_so_far == (buf_size - sizeof(u32)))
8875 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8876 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8877 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8878 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8879 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8880
8881 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8882
8883 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8884
8885 /* advance to the next dword */
8886 offset += sizeof(u32);
8887 data_buf += sizeof(u32);
8888 written_so_far += sizeof(u32);
8889 cmd_flags = 0;
8890 }
8891
8892 /* disable access to nvram interface */
8893 bnx2x_disable_nvram_access(bp);
8894 bnx2x_release_nvram_lock(bp);
8895
8896 return rc;
8897}
8898
8899static int bnx2x_set_eeprom(struct net_device *dev,
8900 struct ethtool_eeprom *eeprom, u8 *eebuf)
8901{
8902 struct bnx2x *bp = netdev_priv(dev);
8903 int rc;
8904
9f4c9583
EG
8905 if (!netif_running(dev))
8906 return -EAGAIN;
8907
34f80b04 8908 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8909 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8910 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8911 eeprom->len, eeprom->len);
8912
8913 /* parameters already validated in ethtool_set_eeprom */
8914
c18487ee 8915 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8916 if (eeprom->magic == 0x00504859)
8917 if (bp->port.pmf) {
8918
4a37fb66 8919 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8920 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8921 bp->link_params.ext_phy_config,
8922 (bp->state != BNX2X_STATE_CLOSED),
8923 eebuf, eeprom->len);
bb2a0f7a
YG
8924 if ((bp->state == BNX2X_STATE_OPEN) ||
8925 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8926 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8927 &bp->link_vars, 1);
34f80b04
EG
8928 rc |= bnx2x_phy_init(&bp->link_params,
8929 &bp->link_vars);
bb2a0f7a 8930 }
4a37fb66 8931 bnx2x_release_phy_lock(bp);
34f80b04
EG
8932
8933 } else /* Only the PMF can access the PHY */
8934 return -EINVAL;
8935 else
c18487ee 8936 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8937
8938 return rc;
8939}
8940
8941static int bnx2x_get_coalesce(struct net_device *dev,
8942 struct ethtool_coalesce *coal)
8943{
8944 struct bnx2x *bp = netdev_priv(dev);
8945
8946 memset(coal, 0, sizeof(struct ethtool_coalesce));
8947
8948 coal->rx_coalesce_usecs = bp->rx_ticks;
8949 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8950
8951 return 0;
8952}
8953
8954static int bnx2x_set_coalesce(struct net_device *dev,
8955 struct ethtool_coalesce *coal)
8956{
8957 struct bnx2x *bp = netdev_priv(dev);
8958
8959 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8960 if (bp->rx_ticks > 3000)
8961 bp->rx_ticks = 3000;
8962
8963 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8964 if (bp->tx_ticks > 0x3000)
8965 bp->tx_ticks = 0x3000;
8966
34f80b04 8967 if (netif_running(dev))
a2fbb9ea
ET
8968 bnx2x_update_coalesce(bp);
8969
8970 return 0;
8971}
8972
8973static void bnx2x_get_ringparam(struct net_device *dev,
8974 struct ethtool_ringparam *ering)
8975{
8976 struct bnx2x *bp = netdev_priv(dev);
8977
8978 ering->rx_max_pending = MAX_RX_AVAIL;
8979 ering->rx_mini_max_pending = 0;
8980 ering->rx_jumbo_max_pending = 0;
8981
8982 ering->rx_pending = bp->rx_ring_size;
8983 ering->rx_mini_pending = 0;
8984 ering->rx_jumbo_pending = 0;
8985
8986 ering->tx_max_pending = MAX_TX_AVAIL;
8987 ering->tx_pending = bp->tx_ring_size;
8988}
8989
8990static int bnx2x_set_ringparam(struct net_device *dev,
8991 struct ethtool_ringparam *ering)
8992{
8993 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8994 int rc = 0;
a2fbb9ea
ET
8995
8996 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8997 (ering->tx_pending > MAX_TX_AVAIL) ||
8998 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8999 return -EINVAL;
9000
9001 bp->rx_ring_size = ering->rx_pending;
9002 bp->tx_ring_size = ering->tx_pending;
9003
34f80b04
EG
9004 if (netif_running(dev)) {
9005 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9006 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9007 }
9008
34f80b04 9009 return rc;
a2fbb9ea
ET
9010}
9011
9012static void bnx2x_get_pauseparam(struct net_device *dev,
9013 struct ethtool_pauseparam *epause)
9014{
9015 struct bnx2x *bp = netdev_priv(dev);
9016
c0700f90 9017 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9018 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9019
c0700f90
DM
9020 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9021 BNX2X_FLOW_CTRL_RX);
9022 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9023 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9024
9025 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9026 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9027 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9028}
9029
9030static int bnx2x_set_pauseparam(struct net_device *dev,
9031 struct ethtool_pauseparam *epause)
9032{
9033 struct bnx2x *bp = netdev_priv(dev);
9034
34f80b04
EG
9035 if (IS_E1HMF(bp))
9036 return 0;
9037
a2fbb9ea
ET
9038 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9039 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9040 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9041
c0700f90 9042 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9043
f1410647 9044 if (epause->rx_pause)
c0700f90 9045 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9046
f1410647 9047 if (epause->tx_pause)
c0700f90 9048 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9049
c0700f90
DM
9050 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9051 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9052
c18487ee 9053 if (epause->autoneg) {
34f80b04 9054 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9055 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9056 return -EINVAL;
9057 }
a2fbb9ea 9058
c18487ee 9059 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9060 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9061 }
a2fbb9ea 9062
c18487ee
YR
9063 DP(NETIF_MSG_LINK,
9064 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9065
9066 if (netif_running(dev)) {
bb2a0f7a 9067 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9068 bnx2x_link_set(bp);
9069 }
a2fbb9ea
ET
9070
9071 return 0;
9072}
9073
df0f2343
VZ
9074static int bnx2x_set_flags(struct net_device *dev, u32 data)
9075{
9076 struct bnx2x *bp = netdev_priv(dev);
9077 int changed = 0;
9078 int rc = 0;
9079
9080 /* TPA requires Rx CSUM offloading */
9081 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9082 if (!(dev->features & NETIF_F_LRO)) {
9083 dev->features |= NETIF_F_LRO;
9084 bp->flags |= TPA_ENABLE_FLAG;
9085 changed = 1;
9086 }
9087
9088 } else if (dev->features & NETIF_F_LRO) {
9089 dev->features &= ~NETIF_F_LRO;
9090 bp->flags &= ~TPA_ENABLE_FLAG;
9091 changed = 1;
9092 }
9093
9094 if (changed && netif_running(dev)) {
9095 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9096 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9097 }
9098
9099 return rc;
9100}
9101
a2fbb9ea
ET
9102static u32 bnx2x_get_rx_csum(struct net_device *dev)
9103{
9104 struct bnx2x *bp = netdev_priv(dev);
9105
9106 return bp->rx_csum;
9107}
9108
9109static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9110{
9111 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9112 int rc = 0;
a2fbb9ea
ET
9113
9114 bp->rx_csum = data;
df0f2343
VZ
9115
9116 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9117 TPA'ed packets will be discarded due to wrong TCP CSUM */
9118 if (!data) {
9119 u32 flags = ethtool_op_get_flags(dev);
9120
9121 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9122 }
9123
9124 return rc;
a2fbb9ea
ET
9125}
9126
9127static int bnx2x_set_tso(struct net_device *dev, u32 data)
9128{
755735eb 9129 if (data) {
a2fbb9ea 9130 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9131 dev->features |= NETIF_F_TSO6;
9132 } else {
a2fbb9ea 9133 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9134 dev->features &= ~NETIF_F_TSO6;
9135 }
9136
a2fbb9ea
ET
9137 return 0;
9138}
9139
f3c87cdd 9140static const struct {
a2fbb9ea
ET
9141 char string[ETH_GSTRING_LEN];
9142} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9143 { "register_test (offline)" },
9144 { "memory_test (offline)" },
9145 { "loopback_test (offline)" },
9146 { "nvram_test (online)" },
9147 { "interrupt_test (online)" },
9148 { "link_test (online)" },
d3d4f495 9149 { "idle check (online)" }
a2fbb9ea
ET
9150};
9151
9152static int bnx2x_self_test_count(struct net_device *dev)
9153{
9154 return BNX2X_NUM_TESTS;
9155}
9156
f3c87cdd
YG
9157static int bnx2x_test_registers(struct bnx2x *bp)
9158{
9159 int idx, i, rc = -ENODEV;
9160 u32 wr_val = 0;
9dabc424 9161 int port = BP_PORT(bp);
f3c87cdd
YG
9162 static const struct {
9163 u32 offset0;
9164 u32 offset1;
9165 u32 mask;
9166 } reg_tbl[] = {
9167/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9168 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9169 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9170 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9171 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9172 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9173 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9174 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9175 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9176 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9177/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9178 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9179 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9180 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9181 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9182 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9183 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9184 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9185 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9186 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9187/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9188 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9189 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9190 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9191 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9192 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9193 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9194 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9195 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9196 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9197/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9198 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9199 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9200 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9201 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9202 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9203 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9204 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9205
9206 { 0xffffffff, 0, 0x00000000 }
9207 };
9208
9209 if (!netif_running(bp->dev))
9210 return rc;
9211
9212 /* Repeat the test twice:
9213 First by writing 0x00000000, second by writing 0xffffffff */
9214 for (idx = 0; idx < 2; idx++) {
9215
9216 switch (idx) {
9217 case 0:
9218 wr_val = 0;
9219 break;
9220 case 1:
9221 wr_val = 0xffffffff;
9222 break;
9223 }
9224
9225 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9226 u32 offset, mask, save_val, val;
f3c87cdd
YG
9227
9228 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9229 mask = reg_tbl[i].mask;
9230
9231 save_val = REG_RD(bp, offset);
9232
9233 REG_WR(bp, offset, wr_val);
9234 val = REG_RD(bp, offset);
9235
9236 /* Restore the original register's value */
9237 REG_WR(bp, offset, save_val);
9238
9239 /* verify that value is as expected value */
9240 if ((val & mask) != (wr_val & mask))
9241 goto test_reg_exit;
9242 }
9243 }
9244
9245 rc = 0;
9246
9247test_reg_exit:
9248 return rc;
9249}
9250
9251static int bnx2x_test_memory(struct bnx2x *bp)
9252{
9253 int i, j, rc = -ENODEV;
9254 u32 val;
9255 static const struct {
9256 u32 offset;
9257 int size;
9258 } mem_tbl[] = {
9259 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9260 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9261 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9262 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9263 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9264 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9265 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9266
9267 { 0xffffffff, 0 }
9268 };
9269 static const struct {
9270 char *name;
9271 u32 offset;
9dabc424
YG
9272 u32 e1_mask;
9273 u32 e1h_mask;
f3c87cdd 9274 } prty_tbl[] = {
9dabc424
YG
9275 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9276 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9277 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9278 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9279 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9280 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9281
9282 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9283 };
9284
9285 if (!netif_running(bp->dev))
9286 return rc;
9287
9288 /* Go through all the memories */
9289 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9290 for (j = 0; j < mem_tbl[i].size; j++)
9291 REG_RD(bp, mem_tbl[i].offset + j*4);
9292
9293 /* Check the parity status */
9294 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9295 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9296 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9297 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9298 DP(NETIF_MSG_HW,
9299 "%s is 0x%x\n", prty_tbl[i].name, val);
9300 goto test_mem_exit;
9301 }
9302 }
9303
9304 rc = 0;
9305
9306test_mem_exit:
9307 return rc;
9308}
9309
f3c87cdd
YG
9310static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9311{
9312 int cnt = 1000;
9313
9314 if (link_up)
9315 while (bnx2x_link_test(bp) && cnt--)
9316 msleep(10);
9317}
9318
9319static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9320{
9321 unsigned int pkt_size, num_pkts, i;
9322 struct sk_buff *skb;
9323 unsigned char *packet;
9324 struct bnx2x_fastpath *fp = &bp->fp[0];
9325 u16 tx_start_idx, tx_idx;
9326 u16 rx_start_idx, rx_idx;
9327 u16 pkt_prod;
9328 struct sw_tx_bd *tx_buf;
9329 struct eth_tx_bd *tx_bd;
9330 dma_addr_t mapping;
9331 union eth_rx_cqe *cqe;
9332 u8 cqe_fp_flags;
9333 struct sw_rx_bd *rx_buf;
9334 u16 len;
9335 int rc = -ENODEV;
9336
b5bf9068
EG
9337 /* check the loopback mode */
9338 switch (loopback_mode) {
9339 case BNX2X_PHY_LOOPBACK:
9340 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9341 return -EINVAL;
9342 break;
9343 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9344 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9345 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9346 break;
9347 default:
f3c87cdd 9348 return -EINVAL;
b5bf9068 9349 }
f3c87cdd 9350
b5bf9068
EG
9351 /* prepare the loopback packet */
9352 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9353 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9354 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9355 if (!skb) {
9356 rc = -ENOMEM;
9357 goto test_loopback_exit;
9358 }
9359 packet = skb_put(skb, pkt_size);
9360 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9361 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9362 for (i = ETH_HLEN; i < pkt_size; i++)
9363 packet[i] = (unsigned char) (i & 0xff);
9364
b5bf9068 9365 /* send the loopback packet */
f3c87cdd
YG
9366 num_pkts = 0;
9367 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9368 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9369
9370 pkt_prod = fp->tx_pkt_prod++;
9371 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9372 tx_buf->first_bd = fp->tx_bd_prod;
9373 tx_buf->skb = skb;
9374
9375 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9376 mapping = pci_map_single(bp->pdev, skb->data,
9377 skb_headlen(skb), PCI_DMA_TODEVICE);
9378 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9379 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9380 tx_bd->nbd = cpu_to_le16(1);
9381 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9382 tx_bd->vlan = cpu_to_le16(pkt_prod);
9383 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9384 ETH_TX_BD_FLAGS_END_BD);
9385 tx_bd->general_data = ((UNICAST_ADDRESS <<
9386 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9387
58f4c4cf
EG
9388 wmb();
9389
f3c87cdd
YG
9390 fp->hw_tx_prods->bds_prod =
9391 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9392 mb(); /* FW restriction: must not reorder writing nbd and packets */
9393 fp->hw_tx_prods->packets_prod =
9394 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9395 DOORBELL(bp, FP_IDX(fp), 0);
9396
9397 mmiowb();
9398
9399 num_pkts++;
9400 fp->tx_bd_prod++;
9401 bp->dev->trans_start = jiffies;
9402
9403 udelay(100);
9404
9405 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9406 if (tx_idx != tx_start_idx + num_pkts)
9407 goto test_loopback_exit;
9408
9409 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9410 if (rx_idx != rx_start_idx + num_pkts)
9411 goto test_loopback_exit;
9412
9413 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9414 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9415 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9416 goto test_loopback_rx_exit;
9417
9418 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9419 if (len != pkt_size)
9420 goto test_loopback_rx_exit;
9421
9422 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9423 skb = rx_buf->skb;
9424 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9425 for (i = ETH_HLEN; i < pkt_size; i++)
9426 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9427 goto test_loopback_rx_exit;
9428
9429 rc = 0;
9430
9431test_loopback_rx_exit:
f3c87cdd
YG
9432
9433 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9434 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9435 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9436 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9437
9438 /* Update producers */
9439 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9440 fp->rx_sge_prod);
f3c87cdd
YG
9441
9442test_loopback_exit:
9443 bp->link_params.loopback_mode = LOOPBACK_NONE;
9444
9445 return rc;
9446}
9447
9448static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9449{
b5bf9068 9450 int rc = 0, res;
f3c87cdd
YG
9451
9452 if (!netif_running(bp->dev))
9453 return BNX2X_LOOPBACK_FAILED;
9454
f8ef6e44 9455 bnx2x_netif_stop(bp, 1);
3910c8ae 9456 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9457
b5bf9068
EG
9458 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9459 if (res) {
9460 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9461 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9462 }
9463
b5bf9068
EG
9464 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9465 if (res) {
9466 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9467 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9468 }
9469
3910c8ae 9470 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9471 bnx2x_netif_start(bp);
9472
9473 return rc;
9474}
9475
9476#define CRC32_RESIDUAL 0xdebb20e3
9477
9478static int bnx2x_test_nvram(struct bnx2x *bp)
9479{
9480 static const struct {
9481 int offset;
9482 int size;
9483 } nvram_tbl[] = {
9484 { 0, 0x14 }, /* bootstrap */
9485 { 0x14, 0xec }, /* dir */
9486 { 0x100, 0x350 }, /* manuf_info */
9487 { 0x450, 0xf0 }, /* feature_info */
9488 { 0x640, 0x64 }, /* upgrade_key_info */
9489 { 0x6a4, 0x64 },
9490 { 0x708, 0x70 }, /* manuf_key_info */
9491 { 0x778, 0x70 },
9492 { 0, 0 }
9493 };
9494 u32 buf[0x350 / 4];
9495 u8 *data = (u8 *)buf;
9496 int i, rc;
9497 u32 magic, csum;
9498
9499 rc = bnx2x_nvram_read(bp, 0, data, 4);
9500 if (rc) {
9501 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9502 goto test_nvram_exit;
9503 }
9504
9505 magic = be32_to_cpu(buf[0]);
9506 if (magic != 0x669955aa) {
9507 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9508 rc = -ENODEV;
9509 goto test_nvram_exit;
9510 }
9511
9512 for (i = 0; nvram_tbl[i].size; i++) {
9513
9514 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9515 nvram_tbl[i].size);
9516 if (rc) {
9517 DP(NETIF_MSG_PROBE,
9518 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9519 goto test_nvram_exit;
9520 }
9521
9522 csum = ether_crc_le(nvram_tbl[i].size, data);
9523 if (csum != CRC32_RESIDUAL) {
9524 DP(NETIF_MSG_PROBE,
9525 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9526 rc = -ENODEV;
9527 goto test_nvram_exit;
9528 }
9529 }
9530
9531test_nvram_exit:
9532 return rc;
9533}
9534
9535static int bnx2x_test_intr(struct bnx2x *bp)
9536{
9537 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9538 int i, rc;
9539
9540 if (!netif_running(bp->dev))
9541 return -ENODEV;
9542
8d9c5f34 9543 config->hdr.length = 0;
af246401
EG
9544 if (CHIP_IS_E1(bp))
9545 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9546 else
9547 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9548 config->hdr.client_id = BP_CL_ID(bp);
9549 config->hdr.reserved1 = 0;
9550
9551 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9552 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9553 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9554 if (rc == 0) {
9555 bp->set_mac_pending++;
9556 for (i = 0; i < 10; i++) {
9557 if (!bp->set_mac_pending)
9558 break;
9559 msleep_interruptible(10);
9560 }
9561 if (i == 10)
9562 rc = -ENODEV;
9563 }
9564
9565 return rc;
9566}
9567
a2fbb9ea
ET
9568static void bnx2x_self_test(struct net_device *dev,
9569 struct ethtool_test *etest, u64 *buf)
9570{
9571 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9572
9573 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9574
f3c87cdd 9575 if (!netif_running(dev))
a2fbb9ea 9576 return;
a2fbb9ea 9577
33471629 9578 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9579 if (IS_E1HMF(bp))
9580 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9581
9582 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9583 u8 link_up;
9584
9585 link_up = bp->link_vars.link_up;
9586 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9587 bnx2x_nic_load(bp, LOAD_DIAG);
9588 /* wait until link state is restored */
9589 bnx2x_wait_for_link(bp, link_up);
9590
9591 if (bnx2x_test_registers(bp) != 0) {
9592 buf[0] = 1;
9593 etest->flags |= ETH_TEST_FL_FAILED;
9594 }
9595 if (bnx2x_test_memory(bp) != 0) {
9596 buf[1] = 1;
9597 etest->flags |= ETH_TEST_FL_FAILED;
9598 }
9599 buf[2] = bnx2x_test_loopback(bp, link_up);
9600 if (buf[2] != 0)
9601 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9602
f3c87cdd
YG
9603 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9604 bnx2x_nic_load(bp, LOAD_NORMAL);
9605 /* wait until link state is restored */
9606 bnx2x_wait_for_link(bp, link_up);
9607 }
9608 if (bnx2x_test_nvram(bp) != 0) {
9609 buf[3] = 1;
a2fbb9ea
ET
9610 etest->flags |= ETH_TEST_FL_FAILED;
9611 }
f3c87cdd
YG
9612 if (bnx2x_test_intr(bp) != 0) {
9613 buf[4] = 1;
9614 etest->flags |= ETH_TEST_FL_FAILED;
9615 }
9616 if (bp->port.pmf)
9617 if (bnx2x_link_test(bp) != 0) {
9618 buf[5] = 1;
9619 etest->flags |= ETH_TEST_FL_FAILED;
9620 }
f3c87cdd
YG
9621
9622#ifdef BNX2X_EXTRA_DEBUG
9623 bnx2x_panic_dump(bp);
9624#endif
a2fbb9ea
ET
9625}
9626
de832a55
EG
9627static const struct {
9628 long offset;
9629 int size;
9630 u8 string[ETH_GSTRING_LEN];
9631} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9632/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9633 { Q_STATS_OFFSET32(error_bytes_received_hi),
9634 8, "[%d]: rx_error_bytes" },
9635 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9636 8, "[%d]: rx_ucast_packets" },
9637 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9638 8, "[%d]: rx_mcast_packets" },
9639 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9640 8, "[%d]: rx_bcast_packets" },
9641 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9642 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9643 4, "[%d]: rx_phy_ip_err_discards"},
9644 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9645 4, "[%d]: rx_skb_alloc_discard" },
9646 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9647
9648/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9649 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9650 8, "[%d]: tx_packets" }
9651};
9652
bb2a0f7a
YG
9653static const struct {
9654 long offset;
9655 int size;
9656 u32 flags;
66e855f3
YG
9657#define STATS_FLAGS_PORT 1
9658#define STATS_FLAGS_FUNC 2
de832a55 9659#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9660 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9661} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9662/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9663 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9664 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9665 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9666 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9667 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9668 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9669 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9670 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9671 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9672 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9673 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9674 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9675 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9676 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9677 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9678 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9679 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9680/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9681 8, STATS_FLAGS_PORT, "rx_fragments" },
9682 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9683 8, STATS_FLAGS_PORT, "rx_jabbers" },
9684 { STATS_OFFSET32(no_buff_discard_hi),
9685 8, STATS_FLAGS_BOTH, "rx_discards" },
9686 { STATS_OFFSET32(mac_filter_discard),
9687 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9688 { STATS_OFFSET32(xxoverflow_discard),
9689 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9690 { STATS_OFFSET32(brb_drop_hi),
9691 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9692 { STATS_OFFSET32(brb_truncate_hi),
9693 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9694 { STATS_OFFSET32(pause_frames_received_hi),
9695 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9696 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9697 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9698 { STATS_OFFSET32(nig_timer_max),
9699 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9700/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9701 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9702 { STATS_OFFSET32(rx_skb_alloc_failed),
9703 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9704 { STATS_OFFSET32(hw_csum_err),
9705 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9706
9707 { STATS_OFFSET32(total_bytes_transmitted_hi),
9708 8, STATS_FLAGS_BOTH, "tx_bytes" },
9709 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9710 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9711 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9712 8, STATS_FLAGS_BOTH, "tx_packets" },
9713 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9714 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9715 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9716 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9717 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9718 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9719 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9720 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9721/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9722 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9723 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9724 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9725 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9726 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9727 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9728 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9729 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9730 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9731 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9732 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9733 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9734 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9735 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9736 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9737 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9738 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9739 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9740 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9741/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9742 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9743 { STATS_OFFSET32(pause_frames_sent_hi),
9744 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9745};
9746
de832a55
EG
9747#define IS_PORT_STAT(i) \
9748 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9749#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9750#define IS_E1HMF_MODE_STAT(bp) \
9751 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9752
a2fbb9ea
ET
9753static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9754{
bb2a0f7a 9755 struct bnx2x *bp = netdev_priv(dev);
de832a55 9756 int i, j, k;
bb2a0f7a 9757
a2fbb9ea
ET
9758 switch (stringset) {
9759 case ETH_SS_STATS:
de832a55
EG
9760 if (is_multi(bp)) {
9761 k = 0;
9762 for_each_queue(bp, i) {
9763 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9764 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9765 bnx2x_q_stats_arr[j].string, i);
9766 k += BNX2X_NUM_Q_STATS;
9767 }
9768 if (IS_E1HMF_MODE_STAT(bp))
9769 break;
9770 for (j = 0; j < BNX2X_NUM_STATS; j++)
9771 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9772 bnx2x_stats_arr[j].string);
9773 } else {
9774 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9775 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9776 continue;
9777 strcpy(buf + j*ETH_GSTRING_LEN,
9778 bnx2x_stats_arr[i].string);
9779 j++;
9780 }
bb2a0f7a 9781 }
a2fbb9ea
ET
9782 break;
9783
9784 case ETH_SS_TEST:
9785 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9786 break;
9787 }
9788}
9789
9790static int bnx2x_get_stats_count(struct net_device *dev)
9791{
bb2a0f7a 9792 struct bnx2x *bp = netdev_priv(dev);
de832a55 9793 int i, num_stats;
bb2a0f7a 9794
de832a55
EG
9795 if (is_multi(bp)) {
9796 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9797 if (!IS_E1HMF_MODE_STAT(bp))
9798 num_stats += BNX2X_NUM_STATS;
9799 } else {
9800 if (IS_E1HMF_MODE_STAT(bp)) {
9801 num_stats = 0;
9802 for (i = 0; i < BNX2X_NUM_STATS; i++)
9803 if (IS_FUNC_STAT(i))
9804 num_stats++;
9805 } else
9806 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9807 }
de832a55 9808
bb2a0f7a 9809 return num_stats;
a2fbb9ea
ET
9810}
9811
9812static void bnx2x_get_ethtool_stats(struct net_device *dev,
9813 struct ethtool_stats *stats, u64 *buf)
9814{
9815 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9816 u32 *hw_stats, *offset;
9817 int i, j, k;
bb2a0f7a 9818
de832a55
EG
9819 if (is_multi(bp)) {
9820 k = 0;
9821 for_each_queue(bp, i) {
9822 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9823 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9824 if (bnx2x_q_stats_arr[j].size == 0) {
9825 /* skip this counter */
9826 buf[k + j] = 0;
9827 continue;
9828 }
9829 offset = (hw_stats +
9830 bnx2x_q_stats_arr[j].offset);
9831 if (bnx2x_q_stats_arr[j].size == 4) {
9832 /* 4-byte counter */
9833 buf[k + j] = (u64) *offset;
9834 continue;
9835 }
9836 /* 8-byte counter */
9837 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9838 }
9839 k += BNX2X_NUM_Q_STATS;
9840 }
9841 if (IS_E1HMF_MODE_STAT(bp))
9842 return;
9843 hw_stats = (u32 *)&bp->eth_stats;
9844 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9845 if (bnx2x_stats_arr[j].size == 0) {
9846 /* skip this counter */
9847 buf[k + j] = 0;
9848 continue;
9849 }
9850 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9851 if (bnx2x_stats_arr[j].size == 4) {
9852 /* 4-byte counter */
9853 buf[k + j] = (u64) *offset;
9854 continue;
9855 }
9856 /* 8-byte counter */
9857 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9858 }
de832a55
EG
9859 } else {
9860 hw_stats = (u32 *)&bp->eth_stats;
9861 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9862 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9863 continue;
9864 if (bnx2x_stats_arr[i].size == 0) {
9865 /* skip this counter */
9866 buf[j] = 0;
9867 j++;
9868 continue;
9869 }
9870 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9871 if (bnx2x_stats_arr[i].size == 4) {
9872 /* 4-byte counter */
9873 buf[j] = (u64) *offset;
9874 j++;
9875 continue;
9876 }
9877 /* 8-byte counter */
9878 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9879 j++;
a2fbb9ea 9880 }
a2fbb9ea
ET
9881 }
9882}
9883
9884static int bnx2x_phys_id(struct net_device *dev, u32 data)
9885{
9886 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9887 int port = BP_PORT(bp);
a2fbb9ea
ET
9888 int i;
9889
34f80b04
EG
9890 if (!netif_running(dev))
9891 return 0;
9892
9893 if (!bp->port.pmf)
9894 return 0;
9895
a2fbb9ea
ET
9896 if (data == 0)
9897 data = 2;
9898
9899 for (i = 0; i < (data * 2); i++) {
c18487ee 9900 if ((i % 2) == 0)
34f80b04 9901 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9902 bp->link_params.hw_led_mode,
9903 bp->link_params.chip_id);
9904 else
34f80b04 9905 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9906 bp->link_params.hw_led_mode,
9907 bp->link_params.chip_id);
9908
a2fbb9ea
ET
9909 msleep_interruptible(500);
9910 if (signal_pending(current))
9911 break;
9912 }
9913
c18487ee 9914 if (bp->link_vars.link_up)
34f80b04 9915 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9916 bp->link_vars.line_speed,
9917 bp->link_params.hw_led_mode,
9918 bp->link_params.chip_id);
a2fbb9ea
ET
9919
9920 return 0;
9921}
9922
9923static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9924 .get_settings = bnx2x_get_settings,
9925 .set_settings = bnx2x_set_settings,
9926 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9927 .get_wol = bnx2x_get_wol,
9928 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9929 .get_msglevel = bnx2x_get_msglevel,
9930 .set_msglevel = bnx2x_set_msglevel,
9931 .nway_reset = bnx2x_nway_reset,
9932 .get_link = ethtool_op_get_link,
9933 .get_eeprom_len = bnx2x_get_eeprom_len,
9934 .get_eeprom = bnx2x_get_eeprom,
9935 .set_eeprom = bnx2x_set_eeprom,
9936 .get_coalesce = bnx2x_get_coalesce,
9937 .set_coalesce = bnx2x_set_coalesce,
9938 .get_ringparam = bnx2x_get_ringparam,
9939 .set_ringparam = bnx2x_set_ringparam,
9940 .get_pauseparam = bnx2x_get_pauseparam,
9941 .set_pauseparam = bnx2x_set_pauseparam,
9942 .get_rx_csum = bnx2x_get_rx_csum,
9943 .set_rx_csum = bnx2x_set_rx_csum,
9944 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9945 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9946 .set_flags = bnx2x_set_flags,
9947 .get_flags = ethtool_op_get_flags,
9948 .get_sg = ethtool_op_get_sg,
9949 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9950 .get_tso = ethtool_op_get_tso,
9951 .set_tso = bnx2x_set_tso,
9952 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9953 .self_test = bnx2x_self_test,
9954 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9955 .phys_id = bnx2x_phys_id,
9956 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9957 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9958};
9959
9960/* end of ethtool_ops */
9961
9962/****************************************************************************
9963* General service functions
9964****************************************************************************/
9965
9966static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9967{
9968 u16 pmcsr;
9969
9970 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9971
9972 switch (state) {
9973 case PCI_D0:
34f80b04 9974 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9975 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9976 PCI_PM_CTRL_PME_STATUS));
9977
9978 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9979 /* delay required during transition out of D3hot */
a2fbb9ea 9980 msleep(20);
34f80b04 9981 break;
a2fbb9ea 9982
34f80b04
EG
9983 case PCI_D3hot:
9984 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9985 pmcsr |= 3;
a2fbb9ea 9986
34f80b04
EG
9987 if (bp->wol)
9988 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9989
34f80b04
EG
9990 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9991 pmcsr);
a2fbb9ea 9992
34f80b04
EG
9993 /* No more memory access after this point until
9994 * device is brought back to D0.
9995 */
9996 break;
9997
9998 default:
9999 return -EINVAL;
10000 }
10001 return 0;
a2fbb9ea
ET
10002}
10003
237907c1
EG
10004static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10005{
10006 u16 rx_cons_sb;
10007
10008 /* Tell compiler that status block fields can change */
10009 barrier();
10010 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10011 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10012 rx_cons_sb++;
10013 return (fp->rx_comp_cons != rx_cons_sb);
10014}
10015
34f80b04
EG
10016/*
10017 * net_device service functions
10018 */
10019
a2fbb9ea
ET
10020static int bnx2x_poll(struct napi_struct *napi, int budget)
10021{
10022 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10023 napi);
10024 struct bnx2x *bp = fp->bp;
10025 int work_done = 0;
10026
10027#ifdef BNX2X_STOP_ON_ERROR
10028 if (unlikely(bp->panic))
34f80b04 10029 goto poll_panic;
a2fbb9ea
ET
10030#endif
10031
10032 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10033 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10034 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10035
10036 bnx2x_update_fpsb_idx(fp);
10037
237907c1 10038 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
10039 bnx2x_tx_int(fp, budget);
10040
237907c1 10041 if (bnx2x_has_rx_work(fp))
a2fbb9ea 10042 work_done = bnx2x_rx_int(fp, budget);
da5a662a 10043 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10044
10045 /* must not complete if we consumed full budget */
da5a662a 10046 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10047
10048#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10049poll_panic:
a2fbb9ea 10050#endif
288379f0 10051 napi_complete(napi);
a2fbb9ea 10052
34f80b04 10053 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 10054 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 10055 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
10056 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10057 }
a2fbb9ea
ET
10058 return work_done;
10059}
10060
755735eb
EG
10061
10062/* we split the first BD into headers and data BDs
33471629 10063 * to ease the pain of our fellow microcode engineers
755735eb
EG
10064 * we use one mapping for both BDs
10065 * So far this has only been observed to happen
10066 * in Other Operating Systems(TM)
10067 */
10068static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10069 struct bnx2x_fastpath *fp,
10070 struct eth_tx_bd **tx_bd, u16 hlen,
10071 u16 bd_prod, int nbd)
10072{
10073 struct eth_tx_bd *h_tx_bd = *tx_bd;
10074 struct eth_tx_bd *d_tx_bd;
10075 dma_addr_t mapping;
10076 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10077
10078 /* first fix first BD */
10079 h_tx_bd->nbd = cpu_to_le16(nbd);
10080 h_tx_bd->nbytes = cpu_to_le16(hlen);
10081
10082 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10083 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10084 h_tx_bd->addr_lo, h_tx_bd->nbd);
10085
10086 /* now get a new data BD
10087 * (after the pbd) and fill it */
10088 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10089 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10090
10091 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10092 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10093
10094 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10095 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10096 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10097 d_tx_bd->vlan = 0;
10098 /* this marks the BD as one that has no individual mapping
10099 * the FW ignores this flag in a BD not marked start
10100 */
10101 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10102 DP(NETIF_MSG_TX_QUEUED,
10103 "TSO split data size is %d (%x:%x)\n",
10104 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10105
10106 /* update tx_bd for marking the last BD flag */
10107 *tx_bd = d_tx_bd;
10108
10109 return bd_prod;
10110}
10111
10112static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10113{
10114 if (fix > 0)
10115 csum = (u16) ~csum_fold(csum_sub(csum,
10116 csum_partial(t_header - fix, fix, 0)));
10117
10118 else if (fix < 0)
10119 csum = (u16) ~csum_fold(csum_add(csum,
10120 csum_partial(t_header, -fix, 0)));
10121
10122 return swab16(csum);
10123}
10124
10125static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10126{
10127 u32 rc;
10128
10129 if (skb->ip_summed != CHECKSUM_PARTIAL)
10130 rc = XMIT_PLAIN;
10131
10132 else {
10133 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10134 rc = XMIT_CSUM_V6;
10135 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10136 rc |= XMIT_CSUM_TCP;
10137
10138 } else {
10139 rc = XMIT_CSUM_V4;
10140 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10141 rc |= XMIT_CSUM_TCP;
10142 }
10143 }
10144
10145 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10146 rc |= XMIT_GSO_V4;
10147
10148 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10149 rc |= XMIT_GSO_V6;
10150
10151 return rc;
10152}
10153
632da4d6 10154#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
10155/* check if packet requires linearization (packet is too fragmented) */
10156static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10157 u32 xmit_type)
10158{
10159 int to_copy = 0;
10160 int hlen = 0;
10161 int first_bd_sz = 0;
10162
10163 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10164 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10165
10166 if (xmit_type & XMIT_GSO) {
10167 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10168 /* Check if LSO packet needs to be copied:
10169 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10170 int wnd_size = MAX_FETCH_BD - 3;
33471629 10171 /* Number of windows to check */
755735eb
EG
10172 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10173 int wnd_idx = 0;
10174 int frag_idx = 0;
10175 u32 wnd_sum = 0;
10176
10177 /* Headers length */
10178 hlen = (int)(skb_transport_header(skb) - skb->data) +
10179 tcp_hdrlen(skb);
10180
10181 /* Amount of data (w/o headers) on linear part of SKB*/
10182 first_bd_sz = skb_headlen(skb) - hlen;
10183
10184 wnd_sum = first_bd_sz;
10185
10186 /* Calculate the first sum - it's special */
10187 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10188 wnd_sum +=
10189 skb_shinfo(skb)->frags[frag_idx].size;
10190
10191 /* If there was data on linear skb data - check it */
10192 if (first_bd_sz > 0) {
10193 if (unlikely(wnd_sum < lso_mss)) {
10194 to_copy = 1;
10195 goto exit_lbl;
10196 }
10197
10198 wnd_sum -= first_bd_sz;
10199 }
10200
10201 /* Others are easier: run through the frag list and
10202 check all windows */
10203 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10204 wnd_sum +=
10205 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10206
10207 if (unlikely(wnd_sum < lso_mss)) {
10208 to_copy = 1;
10209 break;
10210 }
10211 wnd_sum -=
10212 skb_shinfo(skb)->frags[wnd_idx].size;
10213 }
10214
10215 } else {
10216 /* in non-LSO too fragmented packet should always
10217 be linearized */
10218 to_copy = 1;
10219 }
10220 }
10221
10222exit_lbl:
10223 if (unlikely(to_copy))
10224 DP(NETIF_MSG_TX_QUEUED,
10225 "Linearization IS REQUIRED for %s packet. "
10226 "num_frags %d hlen %d first_bd_sz %d\n",
10227 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10228 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10229
10230 return to_copy;
10231}
632da4d6 10232#endif
755735eb
EG
10233
10234/* called with netif_tx_lock
a2fbb9ea 10235 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10236 * netif_wake_queue()
a2fbb9ea
ET
10237 */
10238static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10239{
10240 struct bnx2x *bp = netdev_priv(dev);
10241 struct bnx2x_fastpath *fp;
555f6c78 10242 struct netdev_queue *txq;
a2fbb9ea
ET
10243 struct sw_tx_bd *tx_buf;
10244 struct eth_tx_bd *tx_bd;
10245 struct eth_tx_parse_bd *pbd = NULL;
10246 u16 pkt_prod, bd_prod;
755735eb 10247 int nbd, fp_index;
a2fbb9ea 10248 dma_addr_t mapping;
755735eb
EG
10249 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10250 int vlan_off = (bp->e1hov ? 4 : 0);
10251 int i;
10252 u8 hlen = 0;
a2fbb9ea
ET
10253
10254#ifdef BNX2X_STOP_ON_ERROR
10255 if (unlikely(bp->panic))
10256 return NETDEV_TX_BUSY;
10257#endif
10258
555f6c78
EG
10259 fp_index = skb_get_queue_mapping(skb);
10260 txq = netdev_get_tx_queue(dev, fp_index);
10261
a2fbb9ea 10262 fp = &bp->fp[fp_index];
755735eb 10263
231fd58a 10264 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10265 fp->eth_q_stats.driver_xoff++,
555f6c78 10266 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10267 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10268 return NETDEV_TX_BUSY;
10269 }
10270
755735eb
EG
10271 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10272 " gso type %x xmit_type %x\n",
10273 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10274 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10275
632da4d6 10276#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10277 /* First, check if we need to linearize the skb
755735eb
EG
10278 (due to FW restrictions) */
10279 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10280 /* Statistics of linearization */
10281 bp->lin_cnt++;
10282 if (skb_linearize(skb) != 0) {
10283 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10284 "silently dropping this SKB\n");
10285 dev_kfree_skb_any(skb);
da5a662a 10286 return NETDEV_TX_OK;
755735eb
EG
10287 }
10288 }
632da4d6 10289#endif
755735eb 10290
a2fbb9ea 10291 /*
755735eb 10292 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10293 then for TSO or xsum we have a parsing info BD,
755735eb 10294 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10295 (don't forget to mark the last one as last,
10296 and to unmap only AFTER you write to the BD ...)
755735eb 10297 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10298 */
10299
10300 pkt_prod = fp->tx_pkt_prod++;
755735eb 10301 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10302
755735eb 10303 /* get a tx_buf and first BD */
a2fbb9ea
ET
10304 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10305 tx_bd = &fp->tx_desc_ring[bd_prod];
10306
10307 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10308 tx_bd->general_data = (UNICAST_ADDRESS <<
10309 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10310 /* header nbd */
10311 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10312
755735eb
EG
10313 /* remember the first BD of the packet */
10314 tx_buf->first_bd = fp->tx_bd_prod;
10315 tx_buf->skb = skb;
a2fbb9ea
ET
10316
10317 DP(NETIF_MSG_TX_QUEUED,
10318 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10319 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10320
0c6671b0
EG
10321#ifdef BCM_VLAN
10322 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10323 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10324 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10325 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10326 vlan_off += 4;
10327 } else
0c6671b0 10328#endif
755735eb 10329 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10330
755735eb 10331 if (xmit_type) {
755735eb 10332 /* turn on parsing and get a BD */
a2fbb9ea
ET
10333 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10334 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10335
10336 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10337 }
10338
10339 if (xmit_type & XMIT_CSUM) {
10340 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10341
10342 /* for now NS flag is not used in Linux */
755735eb 10343 pbd->global_data = (hlen |
96fc1784 10344 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 10345 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10346
755735eb
EG
10347 pbd->ip_hlen = (skb_transport_header(skb) -
10348 skb_network_header(skb)) / 2;
10349
10350 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10351
755735eb
EG
10352 pbd->total_hlen = cpu_to_le16(hlen);
10353 hlen = hlen*2 - vlan_off;
a2fbb9ea 10354
755735eb
EG
10355 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10356
10357 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10358 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10359 ETH_TX_BD_FLAGS_IP_CSUM;
10360 else
10361 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10362
10363 if (xmit_type & XMIT_CSUM_TCP) {
10364 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10365
10366 } else {
10367 s8 fix = SKB_CS_OFF(skb); /* signed! */
10368
a2fbb9ea 10369 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10370 pbd->cs_offset = fix / 2;
a2fbb9ea 10371
755735eb
EG
10372 DP(NETIF_MSG_TX_QUEUED,
10373 "hlen %d offset %d fix %d csum before fix %x\n",
10374 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10375 SKB_CS(skb));
10376
10377 /* HW bug: fixup the CSUM */
10378 pbd->tcp_pseudo_csum =
10379 bnx2x_csum_fix(skb_transport_header(skb),
10380 SKB_CS(skb), fix);
10381
10382 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10383 pbd->tcp_pseudo_csum);
10384 }
a2fbb9ea
ET
10385 }
10386
10387 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10388 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10389
10390 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10391 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10392 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10393 tx_bd->nbd = cpu_to_le16(nbd);
10394 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10395
10396 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10397 " nbytes %d flags %x vlan %x\n",
10398 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10399 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10400 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10401
755735eb 10402 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10403
10404 DP(NETIF_MSG_TX_QUEUED,
10405 "TSO packet len %d hlen %d total len %d tso size %d\n",
10406 skb->len, hlen, skb_headlen(skb),
10407 skb_shinfo(skb)->gso_size);
10408
10409 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10410
755735eb
EG
10411 if (unlikely(skb_headlen(skb) > hlen))
10412 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10413 bd_prod, ++nbd);
a2fbb9ea
ET
10414
10415 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10416 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10417 pbd->tcp_flags = pbd_tcp_flags(skb);
10418
10419 if (xmit_type & XMIT_GSO_V4) {
10420 pbd->ip_id = swab16(ip_hdr(skb)->id);
10421 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10422 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10423 ip_hdr(skb)->daddr,
10424 0, IPPROTO_TCP, 0));
755735eb
EG
10425
10426 } else
10427 pbd->tcp_pseudo_csum =
10428 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10429 &ipv6_hdr(skb)->daddr,
10430 0, IPPROTO_TCP, 0));
10431
a2fbb9ea
ET
10432 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10433 }
10434
755735eb
EG
10435 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10436 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10437
755735eb
EG
10438 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10439 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10440
755735eb
EG
10441 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10442 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10443
755735eb
EG
10444 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10445 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10446 tx_bd->nbytes = cpu_to_le16(frag->size);
10447 tx_bd->vlan = cpu_to_le16(pkt_prod);
10448 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10449
755735eb
EG
10450 DP(NETIF_MSG_TX_QUEUED,
10451 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10452 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10453 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10454 }
10455
755735eb 10456 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10457 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10458
10459 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10460 tx_bd, tx_bd->bd_flags.as_bitfield);
10461
a2fbb9ea
ET
10462 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10463
755735eb 10464 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10465 * if the packet contains or ends with it
10466 */
10467 if (TX_BD_POFF(bd_prod) < nbd)
10468 nbd++;
10469
10470 if (pbd)
10471 DP(NETIF_MSG_TX_QUEUED,
10472 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10473 " tcp_flags %x xsum %x seq %u hlen %u\n",
10474 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10475 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10476 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10477
755735eb 10478 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10479
58f4c4cf
EG
10480 /*
10481 * Make sure that the BD data is updated before updating the producer
10482 * since FW might read the BD right after the producer is updated.
10483 * This is only applicable for weak-ordered memory model archs such
10484 * as IA-64. The following barrier is also mandatory since FW will
10485 * assumes packets must have BDs.
10486 */
10487 wmb();
10488
96fc1784
ET
10489 fp->hw_tx_prods->bds_prod =
10490 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 10491 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
10492 fp->hw_tx_prods->packets_prod =
10493 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 10494 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
10495
10496 mmiowb();
10497
755735eb 10498 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10499 dev->trans_start = jiffies;
10500
10501 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10502 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10503 if we put Tx into XOFF state. */
10504 smp_mb();
555f6c78 10505 netif_tx_stop_queue(txq);
de832a55 10506 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10507 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10508 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10509 }
10510 fp->tx_pkt++;
10511
10512 return NETDEV_TX_OK;
10513}
10514
bb2a0f7a 10515/* called with rtnl_lock */
a2fbb9ea
ET
10516static int bnx2x_open(struct net_device *dev)
10517{
10518 struct bnx2x *bp = netdev_priv(dev);
10519
6eccabb3
EG
10520 netif_carrier_off(dev);
10521
a2fbb9ea
ET
10522 bnx2x_set_power_state(bp, PCI_D0);
10523
bb2a0f7a 10524 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10525}
10526
bb2a0f7a 10527/* called with rtnl_lock */
a2fbb9ea
ET
10528static int bnx2x_close(struct net_device *dev)
10529{
a2fbb9ea
ET
10530 struct bnx2x *bp = netdev_priv(dev);
10531
10532 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10533 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10534 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10535 if (!CHIP_REV_IS_SLOW(bp))
10536 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10537
10538 return 0;
10539}
10540
34f80b04
EG
10541/* called with netif_tx_lock from set_multicast */
10542static void bnx2x_set_rx_mode(struct net_device *dev)
10543{
10544 struct bnx2x *bp = netdev_priv(dev);
10545 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10546 int port = BP_PORT(bp);
10547
10548 if (bp->state != BNX2X_STATE_OPEN) {
10549 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10550 return;
10551 }
10552
10553 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10554
10555 if (dev->flags & IFF_PROMISC)
10556 rx_mode = BNX2X_RX_MODE_PROMISC;
10557
10558 else if ((dev->flags & IFF_ALLMULTI) ||
10559 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10560 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10561
10562 else { /* some multicasts */
10563 if (CHIP_IS_E1(bp)) {
10564 int i, old, offset;
10565 struct dev_mc_list *mclist;
10566 struct mac_configuration_cmd *config =
10567 bnx2x_sp(bp, mcast_config);
10568
10569 for (i = 0, mclist = dev->mc_list;
10570 mclist && (i < dev->mc_count);
10571 i++, mclist = mclist->next) {
10572
10573 config->config_table[i].
10574 cam_entry.msb_mac_addr =
10575 swab16(*(u16 *)&mclist->dmi_addr[0]);
10576 config->config_table[i].
10577 cam_entry.middle_mac_addr =
10578 swab16(*(u16 *)&mclist->dmi_addr[2]);
10579 config->config_table[i].
10580 cam_entry.lsb_mac_addr =
10581 swab16(*(u16 *)&mclist->dmi_addr[4]);
10582 config->config_table[i].cam_entry.flags =
10583 cpu_to_le16(port);
10584 config->config_table[i].
10585 target_table_entry.flags = 0;
10586 config->config_table[i].
10587 target_table_entry.client_id = 0;
10588 config->config_table[i].
10589 target_table_entry.vlan_id = 0;
10590
10591 DP(NETIF_MSG_IFUP,
10592 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10593 config->config_table[i].
10594 cam_entry.msb_mac_addr,
10595 config->config_table[i].
10596 cam_entry.middle_mac_addr,
10597 config->config_table[i].
10598 cam_entry.lsb_mac_addr);
10599 }
8d9c5f34 10600 old = config->hdr.length;
34f80b04
EG
10601 if (old > i) {
10602 for (; i < old; i++) {
10603 if (CAM_IS_INVALID(config->
10604 config_table[i])) {
af246401 10605 /* already invalidated */
34f80b04
EG
10606 break;
10607 }
10608 /* invalidate */
10609 CAM_INVALIDATE(config->
10610 config_table[i]);
10611 }
10612 }
10613
10614 if (CHIP_REV_IS_SLOW(bp))
10615 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10616 else
10617 offset = BNX2X_MAX_MULTICAST*(1 + port);
10618
8d9c5f34 10619 config->hdr.length = i;
34f80b04 10620 config->hdr.offset = offset;
8d9c5f34 10621 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10622 config->hdr.reserved1 = 0;
10623
10624 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10625 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10626 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10627 0);
10628 } else { /* E1H */
10629 /* Accept one or more multicasts */
10630 struct dev_mc_list *mclist;
10631 u32 mc_filter[MC_HASH_SIZE];
10632 u32 crc, bit, regidx;
10633 int i;
10634
10635 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10636
10637 for (i = 0, mclist = dev->mc_list;
10638 mclist && (i < dev->mc_count);
10639 i++, mclist = mclist->next) {
10640
7c510e4b
JB
10641 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10642 mclist->dmi_addr);
34f80b04
EG
10643
10644 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10645 bit = (crc >> 24) & 0xff;
10646 regidx = bit >> 5;
10647 bit &= 0x1f;
10648 mc_filter[regidx] |= (1 << bit);
10649 }
10650
10651 for (i = 0; i < MC_HASH_SIZE; i++)
10652 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10653 mc_filter[i]);
10654 }
10655 }
10656
10657 bp->rx_mode = rx_mode;
10658 bnx2x_set_storm_rx_mode(bp);
10659}
10660
10661/* called with rtnl_lock */
a2fbb9ea
ET
10662static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10663{
10664 struct sockaddr *addr = p;
10665 struct bnx2x *bp = netdev_priv(dev);
10666
34f80b04 10667 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10668 return -EINVAL;
10669
10670 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10671 if (netif_running(dev)) {
10672 if (CHIP_IS_E1(bp))
3101c2bc 10673 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10674 else
3101c2bc 10675 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10676 }
a2fbb9ea
ET
10677
10678 return 0;
10679}
10680
c18487ee 10681/* called with rtnl_lock */
a2fbb9ea
ET
10682static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10683{
10684 struct mii_ioctl_data *data = if_mii(ifr);
10685 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10686 int port = BP_PORT(bp);
a2fbb9ea
ET
10687 int err;
10688
10689 switch (cmd) {
10690 case SIOCGMIIPHY:
34f80b04 10691 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10692
c14423fe 10693 /* fallthrough */
c18487ee 10694
a2fbb9ea 10695 case SIOCGMIIREG: {
c18487ee 10696 u16 mii_regval;
a2fbb9ea 10697
c18487ee
YR
10698 if (!netif_running(dev))
10699 return -EAGAIN;
a2fbb9ea 10700
34f80b04 10701 mutex_lock(&bp->port.phy_mutex);
3196a88a 10702 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10703 DEFAULT_PHY_DEV_ADDR,
10704 (data->reg_num & 0x1f), &mii_regval);
10705 data->val_out = mii_regval;
34f80b04 10706 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10707 return err;
10708 }
10709
10710 case SIOCSMIIREG:
10711 if (!capable(CAP_NET_ADMIN))
10712 return -EPERM;
10713
c18487ee
YR
10714 if (!netif_running(dev))
10715 return -EAGAIN;
10716
34f80b04 10717 mutex_lock(&bp->port.phy_mutex);
3196a88a 10718 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10719 DEFAULT_PHY_DEV_ADDR,
10720 (data->reg_num & 0x1f), data->val_in);
34f80b04 10721 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10722 return err;
10723
10724 default:
10725 /* do nothing */
10726 break;
10727 }
10728
10729 return -EOPNOTSUPP;
10730}
10731
34f80b04 10732/* called with rtnl_lock */
a2fbb9ea
ET
10733static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10734{
10735 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10736 int rc = 0;
a2fbb9ea
ET
10737
10738 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10739 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10740 return -EINVAL;
10741
10742 /* This does not race with packet allocation
c14423fe 10743 * because the actual alloc size is
a2fbb9ea
ET
10744 * only updated as part of load
10745 */
10746 dev->mtu = new_mtu;
10747
10748 if (netif_running(dev)) {
34f80b04
EG
10749 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10750 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10751 }
34f80b04
EG
10752
10753 return rc;
a2fbb9ea
ET
10754}
10755
10756static void bnx2x_tx_timeout(struct net_device *dev)
10757{
10758 struct bnx2x *bp = netdev_priv(dev);
10759
10760#ifdef BNX2X_STOP_ON_ERROR
10761 if (!bp->panic)
10762 bnx2x_panic();
10763#endif
10764 /* This allows the netif to be shutdown gracefully before resetting */
10765 schedule_work(&bp->reset_task);
10766}
10767
10768#ifdef BCM_VLAN
34f80b04 10769/* called with rtnl_lock */
a2fbb9ea
ET
10770static void bnx2x_vlan_rx_register(struct net_device *dev,
10771 struct vlan_group *vlgrp)
10772{
10773 struct bnx2x *bp = netdev_priv(dev);
10774
10775 bp->vlgrp = vlgrp;
0c6671b0
EG
10776
10777 /* Set flags according to the required capabilities */
10778 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10779
10780 if (dev->features & NETIF_F_HW_VLAN_TX)
10781 bp->flags |= HW_VLAN_TX_FLAG;
10782
10783 if (dev->features & NETIF_F_HW_VLAN_RX)
10784 bp->flags |= HW_VLAN_RX_FLAG;
10785
a2fbb9ea 10786 if (netif_running(dev))
49d66772 10787 bnx2x_set_client_config(bp);
a2fbb9ea 10788}
34f80b04 10789
a2fbb9ea
ET
10790#endif
10791
10792#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10793static void poll_bnx2x(struct net_device *dev)
10794{
10795 struct bnx2x *bp = netdev_priv(dev);
10796
10797 disable_irq(bp->pdev->irq);
10798 bnx2x_interrupt(bp->pdev->irq, dev);
10799 enable_irq(bp->pdev->irq);
10800}
10801#endif
10802
c64213cd
SH
10803static const struct net_device_ops bnx2x_netdev_ops = {
10804 .ndo_open = bnx2x_open,
10805 .ndo_stop = bnx2x_close,
10806 .ndo_start_xmit = bnx2x_start_xmit,
10807 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10808 .ndo_set_mac_address = bnx2x_change_mac_addr,
10809 .ndo_validate_addr = eth_validate_addr,
10810 .ndo_do_ioctl = bnx2x_ioctl,
10811 .ndo_change_mtu = bnx2x_change_mtu,
10812 .ndo_tx_timeout = bnx2x_tx_timeout,
10813#ifdef BCM_VLAN
10814 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10815#endif
10816#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10817 .ndo_poll_controller = poll_bnx2x,
10818#endif
10819};
10820
10821
34f80b04
EG
10822static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10823 struct net_device *dev)
a2fbb9ea
ET
10824{
10825 struct bnx2x *bp;
10826 int rc;
10827
10828 SET_NETDEV_DEV(dev, &pdev->dev);
10829 bp = netdev_priv(dev);
10830
34f80b04
EG
10831 bp->dev = dev;
10832 bp->pdev = pdev;
a2fbb9ea 10833 bp->flags = 0;
34f80b04 10834 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10835
10836 rc = pci_enable_device(pdev);
10837 if (rc) {
10838 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10839 goto err_out;
10840 }
10841
10842 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10843 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10844 " aborting\n");
10845 rc = -ENODEV;
10846 goto err_out_disable;
10847 }
10848
10849 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10850 printk(KERN_ERR PFX "Cannot find second PCI device"
10851 " base address, aborting\n");
10852 rc = -ENODEV;
10853 goto err_out_disable;
10854 }
10855
34f80b04
EG
10856 if (atomic_read(&pdev->enable_cnt) == 1) {
10857 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10858 if (rc) {
10859 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10860 " aborting\n");
10861 goto err_out_disable;
10862 }
a2fbb9ea 10863
34f80b04
EG
10864 pci_set_master(pdev);
10865 pci_save_state(pdev);
10866 }
a2fbb9ea
ET
10867
10868 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10869 if (bp->pm_cap == 0) {
10870 printk(KERN_ERR PFX "Cannot find power management"
10871 " capability, aborting\n");
10872 rc = -EIO;
10873 goto err_out_release;
10874 }
10875
10876 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10877 if (bp->pcie_cap == 0) {
10878 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10879 " aborting\n");
10880 rc = -EIO;
10881 goto err_out_release;
10882 }
10883
10884 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10885 bp->flags |= USING_DAC_FLAG;
10886 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10887 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10888 " failed, aborting\n");
10889 rc = -EIO;
10890 goto err_out_release;
10891 }
10892
10893 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10894 printk(KERN_ERR PFX "System does not support DMA,"
10895 " aborting\n");
10896 rc = -EIO;
10897 goto err_out_release;
10898 }
10899
34f80b04
EG
10900 dev->mem_start = pci_resource_start(pdev, 0);
10901 dev->base_addr = dev->mem_start;
10902 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10903
10904 dev->irq = pdev->irq;
10905
275f165f 10906 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10907 if (!bp->regview) {
10908 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10909 rc = -ENOMEM;
10910 goto err_out_release;
10911 }
10912
34f80b04
EG
10913 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10914 min_t(u64, BNX2X_DB_SIZE,
10915 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10916 if (!bp->doorbells) {
10917 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10918 rc = -ENOMEM;
10919 goto err_out_unmap;
10920 }
10921
10922 bnx2x_set_power_state(bp, PCI_D0);
10923
34f80b04
EG
10924 /* clean indirect addresses */
10925 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10926 PCICFG_VENDOR_ID_OFFSET);
10927 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10928 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10929 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10930 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10931
34f80b04 10932 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10933
c64213cd 10934 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10935 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10936 dev->features |= NETIF_F_SG;
10937 dev->features |= NETIF_F_HW_CSUM;
10938 if (bp->flags & USING_DAC_FLAG)
10939 dev->features |= NETIF_F_HIGHDMA;
10940#ifdef BCM_VLAN
10941 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10942 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10943#endif
10944 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10945 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10946
10947 return 0;
10948
10949err_out_unmap:
10950 if (bp->regview) {
10951 iounmap(bp->regview);
10952 bp->regview = NULL;
10953 }
a2fbb9ea
ET
10954 if (bp->doorbells) {
10955 iounmap(bp->doorbells);
10956 bp->doorbells = NULL;
10957 }
10958
10959err_out_release:
34f80b04
EG
10960 if (atomic_read(&pdev->enable_cnt) == 1)
10961 pci_release_regions(pdev);
a2fbb9ea
ET
10962
10963err_out_disable:
10964 pci_disable_device(pdev);
10965 pci_set_drvdata(pdev, NULL);
10966
10967err_out:
10968 return rc;
10969}
10970
25047950
ET
10971static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10972{
10973 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10974
10975 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10976 return val;
10977}
10978
10979/* return value of 1=2.5GHz 2=5GHz */
10980static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10981{
10982 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10983
10984 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10985 return val;
10986}
10987
a2fbb9ea
ET
10988static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10989 const struct pci_device_id *ent)
10990{
10991 static int version_printed;
10992 struct net_device *dev = NULL;
10993 struct bnx2x *bp;
25047950 10994 int rc;
a2fbb9ea
ET
10995
10996 if (version_printed++ == 0)
10997 printk(KERN_INFO "%s", version);
10998
10999 /* dev zeroed in init_etherdev */
555f6c78 11000 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11001 if (!dev) {
11002 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11003 return -ENOMEM;
34f80b04 11004 }
a2fbb9ea 11005
a2fbb9ea
ET
11006 bp = netdev_priv(dev);
11007 bp->msglevel = debug;
11008
34f80b04 11009 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11010 if (rc < 0) {
11011 free_netdev(dev);
11012 return rc;
11013 }
11014
a2fbb9ea
ET
11015 pci_set_drvdata(pdev, dev);
11016
34f80b04 11017 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11018 if (rc)
11019 goto init_one_exit;
11020
11021 rc = register_netdev(dev);
34f80b04 11022 if (rc) {
693fc0d1 11023 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11024 goto init_one_exit;
11025 }
11026
25047950 11027 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11028 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11029 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11030 bnx2x_get_pcie_width(bp),
11031 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11032 dev->base_addr, bp->pdev->irq);
e174961c 11033 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 11034 return 0;
34f80b04
EG
11035
11036init_one_exit:
11037 if (bp->regview)
11038 iounmap(bp->regview);
11039
11040 if (bp->doorbells)
11041 iounmap(bp->doorbells);
11042
11043 free_netdev(dev);
11044
11045 if (atomic_read(&pdev->enable_cnt) == 1)
11046 pci_release_regions(pdev);
11047
11048 pci_disable_device(pdev);
11049 pci_set_drvdata(pdev, NULL);
11050
11051 return rc;
a2fbb9ea
ET
11052}
11053
11054static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11055{
11056 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11057 struct bnx2x *bp;
11058
11059 if (!dev) {
228241eb
ET
11060 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11061 return;
11062 }
228241eb 11063 bp = netdev_priv(dev);
a2fbb9ea 11064
a2fbb9ea
ET
11065 unregister_netdev(dev);
11066
11067 if (bp->regview)
11068 iounmap(bp->regview);
11069
11070 if (bp->doorbells)
11071 iounmap(bp->doorbells);
11072
11073 free_netdev(dev);
34f80b04
EG
11074
11075 if (atomic_read(&pdev->enable_cnt) == 1)
11076 pci_release_regions(pdev);
11077
a2fbb9ea
ET
11078 pci_disable_device(pdev);
11079 pci_set_drvdata(pdev, NULL);
11080}
11081
11082static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11083{
11084 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11085 struct bnx2x *bp;
11086
34f80b04
EG
11087 if (!dev) {
11088 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11089 return -ENODEV;
11090 }
11091 bp = netdev_priv(dev);
a2fbb9ea 11092
34f80b04 11093 rtnl_lock();
a2fbb9ea 11094
34f80b04 11095 pci_save_state(pdev);
228241eb 11096
34f80b04
EG
11097 if (!netif_running(dev)) {
11098 rtnl_unlock();
11099 return 0;
11100 }
a2fbb9ea
ET
11101
11102 netif_device_detach(dev);
a2fbb9ea 11103
da5a662a 11104 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11105
a2fbb9ea 11106 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11107
34f80b04
EG
11108 rtnl_unlock();
11109
a2fbb9ea
ET
11110 return 0;
11111}
11112
11113static int bnx2x_resume(struct pci_dev *pdev)
11114{
11115 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11116 struct bnx2x *bp;
a2fbb9ea
ET
11117 int rc;
11118
228241eb
ET
11119 if (!dev) {
11120 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11121 return -ENODEV;
11122 }
228241eb 11123 bp = netdev_priv(dev);
a2fbb9ea 11124
34f80b04
EG
11125 rtnl_lock();
11126
228241eb 11127 pci_restore_state(pdev);
34f80b04
EG
11128
11129 if (!netif_running(dev)) {
11130 rtnl_unlock();
11131 return 0;
11132 }
11133
a2fbb9ea
ET
11134 bnx2x_set_power_state(bp, PCI_D0);
11135 netif_device_attach(dev);
11136
da5a662a 11137 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11138
34f80b04
EG
11139 rtnl_unlock();
11140
11141 return rc;
a2fbb9ea
ET
11142}
11143
f8ef6e44
YG
11144static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11145{
11146 int i;
11147
11148 bp->state = BNX2X_STATE_ERROR;
11149
11150 bp->rx_mode = BNX2X_RX_MODE_NONE;
11151
11152 bnx2x_netif_stop(bp, 0);
11153
11154 del_timer_sync(&bp->timer);
11155 bp->stats_state = STATS_STATE_DISABLED;
11156 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11157
11158 /* Release IRQs */
11159 bnx2x_free_irq(bp);
11160
11161 if (CHIP_IS_E1(bp)) {
11162 struct mac_configuration_cmd *config =
11163 bnx2x_sp(bp, mcast_config);
11164
8d9c5f34 11165 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11166 CAM_INVALIDATE(config->config_table[i]);
11167 }
11168
11169 /* Free SKBs, SGEs, TPA pool and driver internals */
11170 bnx2x_free_skbs(bp);
555f6c78 11171 for_each_rx_queue(bp, i)
f8ef6e44 11172 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11173 for_each_rx_queue(bp, i)
7cde1c8b 11174 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11175 bnx2x_free_mem(bp);
11176
11177 bp->state = BNX2X_STATE_CLOSED;
11178
11179 netif_carrier_off(bp->dev);
11180
11181 return 0;
11182}
11183
11184static void bnx2x_eeh_recover(struct bnx2x *bp)
11185{
11186 u32 val;
11187
11188 mutex_init(&bp->port.phy_mutex);
11189
11190 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11191 bp->link_params.shmem_base = bp->common.shmem_base;
11192 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11193
11194 if (!bp->common.shmem_base ||
11195 (bp->common.shmem_base < 0xA0000) ||
11196 (bp->common.shmem_base >= 0xC0000)) {
11197 BNX2X_DEV_INFO("MCP not active\n");
11198 bp->flags |= NO_MCP_FLAG;
11199 return;
11200 }
11201
11202 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11203 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11204 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11205 BNX2X_ERR("BAD MCP validity signature\n");
11206
11207 if (!BP_NOMCP(bp)) {
11208 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11209 & DRV_MSG_SEQ_NUMBER_MASK);
11210 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11211 }
11212}
11213
493adb1f
WX
11214/**
11215 * bnx2x_io_error_detected - called when PCI error is detected
11216 * @pdev: Pointer to PCI device
11217 * @state: The current pci connection state
11218 *
11219 * This function is called after a PCI bus error affecting
11220 * this device has been detected.
11221 */
11222static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11223 pci_channel_state_t state)
11224{
11225 struct net_device *dev = pci_get_drvdata(pdev);
11226 struct bnx2x *bp = netdev_priv(dev);
11227
11228 rtnl_lock();
11229
11230 netif_device_detach(dev);
11231
11232 if (netif_running(dev))
f8ef6e44 11233 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11234
11235 pci_disable_device(pdev);
11236
11237 rtnl_unlock();
11238
11239 /* Request a slot reset */
11240 return PCI_ERS_RESULT_NEED_RESET;
11241}
11242
11243/**
11244 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11245 * @pdev: Pointer to PCI device
11246 *
11247 * Restart the card from scratch, as if from a cold-boot.
11248 */
11249static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11250{
11251 struct net_device *dev = pci_get_drvdata(pdev);
11252 struct bnx2x *bp = netdev_priv(dev);
11253
11254 rtnl_lock();
11255
11256 if (pci_enable_device(pdev)) {
11257 dev_err(&pdev->dev,
11258 "Cannot re-enable PCI device after reset\n");
11259 rtnl_unlock();
11260 return PCI_ERS_RESULT_DISCONNECT;
11261 }
11262
11263 pci_set_master(pdev);
11264 pci_restore_state(pdev);
11265
11266 if (netif_running(dev))
11267 bnx2x_set_power_state(bp, PCI_D0);
11268
11269 rtnl_unlock();
11270
11271 return PCI_ERS_RESULT_RECOVERED;
11272}
11273
11274/**
11275 * bnx2x_io_resume - called when traffic can start flowing again
11276 * @pdev: Pointer to PCI device
11277 *
11278 * This callback is called when the error recovery driver tells us that
11279 * its OK to resume normal operation.
11280 */
11281static void bnx2x_io_resume(struct pci_dev *pdev)
11282{
11283 struct net_device *dev = pci_get_drvdata(pdev);
11284 struct bnx2x *bp = netdev_priv(dev);
11285
11286 rtnl_lock();
11287
f8ef6e44
YG
11288 bnx2x_eeh_recover(bp);
11289
493adb1f 11290 if (netif_running(dev))
f8ef6e44 11291 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11292
11293 netif_device_attach(dev);
11294
11295 rtnl_unlock();
11296}
11297
11298static struct pci_error_handlers bnx2x_err_handler = {
11299 .error_detected = bnx2x_io_error_detected,
11300 .slot_reset = bnx2x_io_slot_reset,
11301 .resume = bnx2x_io_resume,
11302};
11303
a2fbb9ea 11304static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11305 .name = DRV_MODULE_NAME,
11306 .id_table = bnx2x_pci_tbl,
11307 .probe = bnx2x_init_one,
11308 .remove = __devexit_p(bnx2x_remove_one),
11309 .suspend = bnx2x_suspend,
11310 .resume = bnx2x_resume,
11311 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11312};
11313
11314static int __init bnx2x_init(void)
11315{
1cf167f2
EG
11316 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11317 if (bnx2x_wq == NULL) {
11318 printk(KERN_ERR PFX "Cannot create workqueue\n");
11319 return -ENOMEM;
11320 }
11321
a2fbb9ea
ET
11322 return pci_register_driver(&bnx2x_pci_driver);
11323}
11324
11325static void __exit bnx2x_cleanup(void)
11326{
11327 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11328
11329 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11330}
11331
11332module_init(bnx2x_init);
11333module_exit(bnx2x_cleanup);
11334